HDDS-825. Code cleanup based on messages from ErrorProne.
Contributed by Anu Engineer.
This commit is contained in:
parent
fcd94eeab8
commit
a16aa2f60b
|
@ -296,6 +296,7 @@ public class XceiverClientGrpc extends XceiverClientSpi {
|
|||
// For stand alone pipeline, there is no notion called setup pipeline.
|
||||
}
|
||||
|
||||
@Override
|
||||
public void destroyPipeline() {
|
||||
// For stand alone pipeline, there is no notion called destroy pipeline.
|
||||
}
|
||||
|
|
|
@ -170,6 +170,7 @@ public class XceiverClientManager implements Closeable {
|
|||
/**
|
||||
* Close and remove all the cached clients.
|
||||
*/
|
||||
@Override
|
||||
public void close() {
|
||||
//closing is done through RemovalListener
|
||||
clientCache.invalidateAll();
|
||||
|
|
|
@ -100,6 +100,7 @@ public final class XceiverClientRatis extends XceiverClientSpi {
|
|||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public void createPipeline() throws IOException {
|
||||
final RaftGroup group = RatisHelper.newRaftGroup(pipeline);
|
||||
LOG.debug("creating pipeline:{} with {}", pipeline.getId(), group);
|
||||
|
@ -110,6 +111,7 @@ public final class XceiverClientRatis extends XceiverClientSpi {
|
|||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public void destroyPipeline() throws IOException {
|
||||
final RaftGroup group = RatisHelper.newRaftGroup(pipeline);
|
||||
LOG.debug("destroying pipeline:{} with {}", pipeline.getId(), group);
|
||||
|
|
|
@ -83,7 +83,7 @@ public class ChunkInputStream extends InputStream implements Seekable {
|
|||
}
|
||||
|
||||
private void initializeChunkOffset() {
|
||||
int tempOffset = 0;
|
||||
long tempOffset = 0;
|
||||
for (int i = 0; i < chunks.size(); i++) {
|
||||
chunkOffset[i] = tempOffset;
|
||||
tempOffset += chunks.get(i).getLen();
|
||||
|
|
|
@ -196,6 +196,17 @@ public final class Pipeline {
|
|||
return new Builder(pipeline);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Pipeline{" +
|
||||
"id=" + id +
|
||||
", type=" + type +
|
||||
", factor=" + factor +
|
||||
", state=" + state +
|
||||
", nodeStatus=" + nodeStatus +
|
||||
'}';
|
||||
}
|
||||
|
||||
/**
|
||||
* Builder class for Pipeline.
|
||||
*/
|
||||
|
|
|
@ -68,7 +68,6 @@ public class StorageInfo {
|
|||
throws IOException {
|
||||
Preconditions.checkNotNull(type);
|
||||
Preconditions.checkNotNull(cid);
|
||||
Preconditions.checkNotNull(cT);
|
||||
properties.setProperty(NODE_TYPE, type.name());
|
||||
properties.setProperty(CLUSTER_ID, cid);
|
||||
properties.setProperty(CREATION_TIME, String.valueOf(cT));
|
||||
|
|
|
@ -24,11 +24,9 @@ import org.apache.hadoop.hdds.HddsUtils;
|
|||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.metrics2.util.MBeans;
|
||||
import org.apache.hadoop.utils.RocksDBStoreMBean;
|
||||
import org.apache.ratis.thirdparty.com.google.common.annotations.
|
||||
VisibleForTesting;
|
||||
import org.apache.ratis.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||
import org.rocksdb.ColumnFamilyDescriptor;
|
||||
import org.rocksdb.ColumnFamilyHandle;
|
||||
|
||||
import org.rocksdb.DBOptions;
|
||||
import org.rocksdb.RocksDB;
|
||||
import org.rocksdb.RocksDBException;
|
||||
|
@ -192,7 +190,6 @@ public class RDBStore implements DBStore {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void move(byte[] key, byte[] value, Table source,
|
||||
Table dest) throws IOException {
|
||||
|
@ -226,7 +223,7 @@ public class RDBStore implements DBStore {
|
|||
} catch (RocksDBException rockdbException) {
|
||||
LOG.error("Move of key failed. Key:{}", DFSUtil.bytes2String(sourceKey));
|
||||
throw toIOException("Unable to move key: " +
|
||||
DFSUtil.bytes2String(sourceKey), rockdbException);
|
||||
DFSUtil.bytes2String(sourceKey), rockdbException);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ public enum DummyAction implements AuditAction {
|
|||
SET_OWNER("SET_OWNER"),
|
||||
SET_QUOTA("SET_QUOTA");
|
||||
|
||||
private String action;
|
||||
private final String action;
|
||||
|
||||
DummyAction(String action) {
|
||||
this.action = action;
|
||||
|
|
|
@ -41,7 +41,7 @@ public class TestLeaseManager {
|
|||
/**
|
||||
* Dummy resource on which leases can be acquired.
|
||||
*/
|
||||
private final class DummyResource {
|
||||
private static final class DummyResource {
|
||||
|
||||
private final String name;
|
||||
|
||||
|
@ -61,6 +61,21 @@ public class TestLeaseManager {
|
|||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adding to String method to fix the ErrorProne warning that this method
|
||||
* is later used in String functions, which would print out (e.g.
|
||||
* `org.apache.hadoop.ozone.lease.TestLeaseManager.DummyResource@
|
||||
* 4488aabb`) instead of useful information.
|
||||
*
|
||||
* @return Name of the Dummy object.
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
return "DummyResource{" +
|
||||
"name='" + name + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -55,7 +55,7 @@ public class TestHddsIdFactory {
|
|||
List<Future<Integer>> result = executor.invokeAll(tasks);
|
||||
assertEquals(IDS_PER_THREAD * NUM_OF_THREADS, ID_SET.size());
|
||||
for (Future<Integer> r : result) {
|
||||
assertEquals(r.get().intValue(), IDS_PER_THREAD);
|
||||
assertEquals(IDS_PER_THREAD, r.get().intValue());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,24 +1,21 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with this
|
||||
* work for additional information regarding copyright ownership. The ASF
|
||||
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations under
|
||||
* the License.
|
||||
*/
|
||||
package org.apache.hadoop.utils;
|
||||
|
||||
import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.lang3.tuple.ImmutablePair;
|
||||
|
@ -28,9 +25,9 @@ import org.apache.hadoop.hdfs.DFSUtil;
|
|||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||
import org.apache.hadoop.ozone.OzoneConfigKeys;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.utils.MetadataStore.KeyValue;
|
||||
import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
|
||||
import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter;
|
||||
import org.apache.hadoop.utils.MetadataStore.KeyValue;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Rule;
|
||||
|
@ -50,14 +47,14 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.UUID;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import static org.junit.runners.Parameterized.Parameters;
|
||||
|
||||
/**
|
||||
|
@ -66,27 +63,24 @@ import static org.junit.runners.Parameterized.Parameters;
|
|||
@RunWith(Parameterized.class)
|
||||
public class TestMetadataStore {
|
||||
|
||||
private final static int MAX_GETRANGE_LENGTH = 100;
|
||||
private final String storeImpl;
|
||||
|
||||
@Rule
|
||||
public ExpectedException expectedException = ExpectedException.none();
|
||||
private MetadataStore store;
|
||||
private File testDir;
|
||||
public TestMetadataStore(String metadataImpl) {
|
||||
this.storeImpl = metadataImpl;
|
||||
}
|
||||
|
||||
@Parameters
|
||||
public static Collection<Object[]> data() {
|
||||
return Arrays.asList(new Object[][] {
|
||||
return Arrays.asList(new Object[][]{
|
||||
{OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB},
|
||||
{OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB}
|
||||
});
|
||||
}
|
||||
|
||||
private MetadataStore store;
|
||||
private File testDir;
|
||||
private final static int MAX_GETRANGE_LENGTH = 100;
|
||||
|
||||
@Rule
|
||||
public ExpectedException expectedException = ExpectedException.none();
|
||||
|
||||
@Before
|
||||
public void init() throws IOException {
|
||||
if (OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB.equals(storeImpl)) {
|
||||
|
@ -109,7 +103,7 @@ public class TestMetadataStore {
|
|||
// Add 20 entries.
|
||||
// {a0 : a-value0} to {a9 : a-value9}
|
||||
// {b0 : b-value0} to {b9 : b-value9}
|
||||
for (int i=0; i<10; i++) {
|
||||
for (int i = 0; i < 10; i++) {
|
||||
store.put(getBytes("a" + i), getBytes("a-value" + i));
|
||||
store.put(getBytes("b" + i), getBytes("b-value" + i));
|
||||
}
|
||||
|
@ -178,7 +172,7 @@ public class TestMetadataStore {
|
|||
GenericTestUtils.setLogLevel(MetadataStoreBuilder.LOG, Level.DEBUG);
|
||||
GenericTestUtils.LogCapturer logCapturer =
|
||||
GenericTestUtils.LogCapturer.captureLogs(MetadataStoreBuilder.LOG);
|
||||
if(storeImpl.equals(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB)) {
|
||||
if (storeImpl.equals(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB)) {
|
||||
dbType = "RocksDB";
|
||||
} else {
|
||||
dbType = "LevelDB";
|
||||
|
@ -241,7 +235,7 @@ public class TestMetadataStore {
|
|||
|
||||
@Test
|
||||
public void testGetDelete() throws IOException {
|
||||
for (int i=0; i<10; i++) {
|
||||
for (int i = 0; i < 10; i++) {
|
||||
byte[] va = store.get(getBytes("a" + i));
|
||||
assertEquals("a-value" + i, getString(va));
|
||||
|
||||
|
@ -273,7 +267,7 @@ public class TestMetadataStore {
|
|||
return null;
|
||||
}
|
||||
char[] arr = key.toCharArray();
|
||||
return new StringBuffer().append(arr[0]).append("-value")
|
||||
return new StringBuilder().append(arr[0]).append("-value")
|
||||
.append(arr[arr.length - 1]).toString();
|
||||
}
|
||||
|
||||
|
@ -326,14 +320,14 @@ public class TestMetadataStore {
|
|||
char num = value.charAt(value.length() - 1);
|
||||
// each value adds 1
|
||||
int i = Character.getNumericValue(num) + 1;
|
||||
value = value.substring(0, value.length() - 1) + i;
|
||||
value = value.substring(0, value.length() - 1) + i;
|
||||
result.add(value);
|
||||
return true;
|
||||
});
|
||||
|
||||
assertFalse(result.isEmpty());
|
||||
for (int i=0; i<result.size(); i++) {
|
||||
assertEquals("b-value" + (i+1), result.get(i));
|
||||
for (int i = 0; i < result.size(); i++) {
|
||||
assertEquals("b-value" + (i + 1), result.get(i));
|
||||
}
|
||||
|
||||
// iterate from a non exist key
|
||||
|
@ -388,7 +382,7 @@ public class TestMetadataStore {
|
|||
result = store.getRangeKVs(null, 100, filter1);
|
||||
assertEquals(10, result.size());
|
||||
assertTrue(result.stream().allMatch(entry ->
|
||||
new String(entry.getKey()).startsWith("b")
|
||||
new String(entry.getKey(), UTF_8).startsWith("b")
|
||||
));
|
||||
assertEquals(20, filter1.getKeysScannedNum());
|
||||
assertEquals(10, filter1.getKeysHintedNum());
|
||||
|
@ -416,7 +410,7 @@ public class TestMetadataStore {
|
|||
assertEquals("b-value2", getString(result.get(0).getValue()));
|
||||
|
||||
// If filter is null, no effect.
|
||||
result = store.getRangeKVs(null, 1, null);
|
||||
result = store.getRangeKVs(null, 1, (MetadataKeyFilter[]) null);
|
||||
assertEquals(1, result.size());
|
||||
assertEquals("a0", getString(result.get(0).getKey()));
|
||||
}
|
||||
|
@ -461,7 +455,7 @@ public class TestMetadataStore {
|
|||
// If startKey is invalid, the returned list should be empty.
|
||||
List<Map.Entry<byte[], byte[]>> kvs =
|
||||
store.getRangeKVs(getBytes("unknownKey"), MAX_GETRANGE_LENGTH);
|
||||
assertEquals(kvs.size(), 0);
|
||||
assertEquals(0, kvs.size());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -504,7 +498,7 @@ public class TestMetadataStore {
|
|||
.build();
|
||||
|
||||
List<String> expectedResult = Lists.newArrayList();
|
||||
for (int i = 0; i<10; i++) {
|
||||
for (int i = 0; i < 10; i++) {
|
||||
dbStore.put(getBytes("batch-" + i), getBytes("batch-value-" + i));
|
||||
expectedResult.add("batch-" + i);
|
||||
}
|
||||
|
@ -541,43 +535,44 @@ public class TestMetadataStore {
|
|||
new KeyPrefixFilter().addFilter("b0", true).addFilter("b");
|
||||
} catch (IllegalArgumentException e) {
|
||||
exception = e;
|
||||
assertTrue(exception.getMessage().contains("KeyPrefix: b already " +
|
||||
"rejected"));
|
||||
}
|
||||
assertTrue(exception.getMessage().contains("KeyPrefix: b already " +
|
||||
"rejected"));
|
||||
|
||||
try {
|
||||
new KeyPrefixFilter().addFilter("b0").addFilter("b", true);
|
||||
} catch (IllegalArgumentException e) {
|
||||
exception = e;
|
||||
assertTrue(exception.getMessage().contains("KeyPrefix: b already " +
|
||||
"accepted"));
|
||||
}
|
||||
assertTrue(exception.getMessage().contains("KeyPrefix: b already " +
|
||||
"accepted"));
|
||||
|
||||
try {
|
||||
new KeyPrefixFilter().addFilter("b", true).addFilter("b0");
|
||||
} catch (IllegalArgumentException e) {
|
||||
exception = e;
|
||||
assertTrue(exception.getMessage().contains("KeyPrefix: b0 already " +
|
||||
"rejected"));
|
||||
}
|
||||
assertTrue(exception.getMessage().contains("KeyPrefix: b0 already " +
|
||||
"rejected"));
|
||||
|
||||
try {
|
||||
new KeyPrefixFilter().addFilter("b").addFilter("b0", true);
|
||||
} catch (IllegalArgumentException e) {
|
||||
exception = e;
|
||||
assertTrue(exception.getMessage().contains("KeyPrefix: b0 already " +
|
||||
"accepted"));
|
||||
}
|
||||
assertTrue(exception.getMessage().contains("KeyPrefix: b0 already " +
|
||||
"accepted"));
|
||||
|
||||
MetadataKeyFilter filter1 = new KeyPrefixFilter(true)
|
||||
.addFilter("a0")
|
||||
.addFilter("a1")
|
||||
.addFilter("b", true);
|
||||
.addFilter("a0")
|
||||
.addFilter("a1")
|
||||
.addFilter("b", true);
|
||||
result = store.getRangeKVs(null, 100, filter1);
|
||||
assertEquals(2, result.size());
|
||||
assertTrue(result.stream().anyMatch(entry -> new String(entry.getKey())
|
||||
assertTrue(result.stream().anyMatch(entry -> new String(entry.getKey(),
|
||||
UTF_8)
|
||||
.startsWith("a0")) && result.stream().anyMatch(entry -> new String(
|
||||
entry.getKey()).startsWith("a1")));
|
||||
entry.getKey(), UTF_8).startsWith("a1")));
|
||||
|
||||
filter1 = new KeyPrefixFilter(true).addFilter("b", true);
|
||||
result = store.getRangeKVs(null, 100, filter1);
|
||||
|
@ -586,7 +581,8 @@ public class TestMetadataStore {
|
|||
filter1 = new KeyPrefixFilter().addFilter("b", true);
|
||||
result = store.getRangeKVs(null, 100, filter1);
|
||||
assertEquals(10, result.size());
|
||||
assertTrue(result.stream().allMatch(entry -> new String(entry.getKey())
|
||||
assertTrue(result.stream().allMatch(entry -> new String(entry.getKey(),
|
||||
UTF_8)
|
||||
.startsWith("a")));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,12 +29,14 @@ import javax.management.MBeanServer;
|
|||
import java.io.File;
|
||||
import java.lang.management.ManagementFactory;
|
||||
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
|
||||
/**
|
||||
* Test the JMX interface for the rocksdb metastore implementation.
|
||||
*/
|
||||
public class TestRocksDBStoreMBean {
|
||||
|
||||
Configuration conf;
|
||||
private Configuration conf;
|
||||
|
||||
@Before
|
||||
public void init() throws Exception {
|
||||
|
@ -57,7 +59,7 @@ public class TestRocksDBStoreMBean {
|
|||
.setCreateIfMissing(true).setDbFile(testDir).build();
|
||||
|
||||
for (int i = 0; i < 10; i++) {
|
||||
metadataStore.put("key".getBytes(), "value".getBytes());
|
||||
metadataStore.put("key".getBytes(UTF_8), "value".getBytes(UTF_8));
|
||||
}
|
||||
|
||||
MBeanServer platformMBeanServer =
|
||||
|
|
|
@ -131,7 +131,7 @@ public class TestDBStoreBuilder {
|
|||
RandomStringUtils.random(9).getBytes(StandardCharsets.UTF_8);
|
||||
firstTable.put(key, value);
|
||||
byte[] temp = firstTable.get(key);
|
||||
Arrays.equals(value, temp);
|
||||
Assert.assertTrue(Arrays.equals(value, temp));
|
||||
}
|
||||
|
||||
try (Table secondTable = dbStore.getTable("Second")) {
|
||||
|
@ -161,7 +161,7 @@ public class TestDBStoreBuilder {
|
|||
RandomStringUtils.random(9).getBytes(StandardCharsets.UTF_8);
|
||||
firstTable.put(key, value);
|
||||
byte[] temp = firstTable.get(key);
|
||||
Arrays.equals(value, temp);
|
||||
Assert.assertTrue(Arrays.equals(value, temp));
|
||||
}
|
||||
|
||||
try (Table secondTable = dbStore.getTable("Second")) {
|
||||
|
|
|
@ -35,9 +35,9 @@ import org.rocksdb.StatsLevel;
|
|||
import org.rocksdb.WriteBatch;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
|
@ -112,8 +112,8 @@ public class TestRDBTableStore {
|
|||
|
||||
@Test
|
||||
public void delete() throws Exception {
|
||||
List<byte[]> deletedKeys = new LinkedList<>();
|
||||
List<byte[]> validKeys = new LinkedList<>();
|
||||
List<byte[]> deletedKeys = new ArrayList<>();
|
||||
List<byte[]> validKeys = new ArrayList<>();
|
||||
byte[] value =
|
||||
RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
|
||||
for (int x = 0; x < 100; x++) {
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
/**
|
||||
* DB test Utils.
|
||||
*/
|
||||
package org.apache.hadoop.utils;
|
|
@ -141,8 +141,8 @@ public class BlockManagerImpl implements BlockManager {
|
|||
long bcsId = blockID.getBlockCommitSequenceId();
|
||||
Preconditions.checkNotNull(blockID,
|
||||
"BlockID cannot be null in GetBlock request");
|
||||
Preconditions.checkNotNull(blockID.getContainerID(),
|
||||
"Container name cannot be null");
|
||||
Preconditions.checkNotNull(container,
|
||||
"Container cannot be null");
|
||||
|
||||
KeyValueContainerData containerData = (KeyValueContainerData) container
|
||||
.getContainerData();
|
||||
|
|
|
@ -114,7 +114,7 @@ public class GrpcReplicationClient {
|
|||
this.containerId = containerId;
|
||||
this.outputPath = outputPath;
|
||||
try {
|
||||
outputPath = Preconditions.checkNotNull(outputPath);
|
||||
Preconditions.checkNotNull(outputPath, "Output path cannot be null");
|
||||
Path parentPath = Preconditions.checkNotNull(outputPath.getParent());
|
||||
Files.createDirectories(parentPath);
|
||||
stream =
|
||||
|
|
|
@ -71,8 +71,8 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol {
|
|||
new HashMap<>();
|
||||
private Map<DatanodeDetails, NodeReportProto> nodeReports = new HashMap<>();
|
||||
private AtomicInteger commandStatusReport = new AtomicInteger(0);
|
||||
private List<CommandStatus> cmdStatusList = new LinkedList<>();
|
||||
private List<SCMCommandProto> scmCommandRequests = new LinkedList<>();
|
||||
private List<CommandStatus> cmdStatusList = new ArrayList<>();
|
||||
private List<SCMCommandProto> scmCommandRequests = new ArrayList<>();
|
||||
/**
|
||||
* Returns the number of heartbeats made to this class.
|
||||
*
|
||||
|
|
|
@ -50,7 +50,7 @@ import java.io.File;
|
|||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.LinkedList;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
|
@ -86,9 +86,9 @@ public class TestDatanodeStateMachine {
|
|||
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT, 500,
|
||||
TimeUnit.MILLISECONDS);
|
||||
conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true);
|
||||
serverAddresses = new LinkedList<>();
|
||||
scmServers = new LinkedList<>();
|
||||
mockServers = new LinkedList<>();
|
||||
serverAddresses = new ArrayList<>();
|
||||
scmServers = new ArrayList<>();
|
||||
mockServers = new ArrayList<>();
|
||||
for (int x = 0; x < scmServerCount; x++) {
|
||||
int port = SCMTestUtils.getReuseableAddress().getPort();
|
||||
String address = "127.0.0.1";
|
||||
|
@ -361,8 +361,8 @@ public class TestDatanodeStateMachine {
|
|||
@Test
|
||||
public void testDatanodeStateMachineWithInvalidConfiguration()
|
||||
throws Exception {
|
||||
LinkedList<Map.Entry<String, String>> confList =
|
||||
new LinkedList<Map.Entry<String, String>>();
|
||||
List<Map.Entry<String, String>> confList =
|
||||
new ArrayList<>();
|
||||
confList.add(Maps.immutableEntry(ScmConfigKeys.OZONE_SCM_NAMES, ""));
|
||||
|
||||
// Invalid ozone.scm.names
|
||||
|
|
|
@ -49,6 +49,7 @@ import java.io.File;
|
|||
import java.io.IOException;
|
||||
import java.util.UUID;
|
||||
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
|
@ -160,7 +161,7 @@ public class TestHddsDispatcher {
|
|||
String datanodeId, Long containerId, Long localId) {
|
||||
|
||||
ByteString data = ByteString.copyFrom(
|
||||
UUID.randomUUID().toString().getBytes());
|
||||
UUID.randomUUID().toString().getBytes(UTF_8));
|
||||
ContainerProtos.ChunkInfo chunk = ContainerProtos.ChunkInfo
|
||||
.newBuilder()
|
||||
.setChunkName(
|
||||
|
|
|
@ -61,7 +61,7 @@ public class TestReportPublisher {
|
|||
/**
|
||||
* Dummy report publisher for testing.
|
||||
*/
|
||||
private class DummyReportPublisher extends ReportPublisher {
|
||||
private static class DummyReportPublisher extends ReportPublisher {
|
||||
|
||||
private final long frequency;
|
||||
private int getReportCount = 0;
|
||||
|
|
|
@ -1,19 +1,18 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with this
|
||||
* work for additional information regarding copyright ownership. The ASF
|
||||
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations under
|
||||
* the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.ozone.container.common.volume;
|
||||
|
@ -23,7 +22,6 @@ import org.apache.hadoop.fs.GetSpaceUsed;
|
|||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile;
|
||||
import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
|
||||
import static org.junit.Assert.*;
|
||||
import org.junit.Before;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
|
@ -35,19 +33,22 @@ import java.io.IOException;
|
|||
import java.util.Properties;
|
||||
import java.util.UUID;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
/**
|
||||
* Unit tests for {@link HddsVolume}.
|
||||
*/
|
||||
public class TestHddsVolume {
|
||||
|
||||
@Rule
|
||||
public TemporaryFolder folder = new TemporaryFolder();
|
||||
|
||||
private static final String DATANODE_UUID = UUID.randomUUID().toString();
|
||||
private static final String CLUSTER_ID = UUID.randomUUID().toString();
|
||||
private static final Configuration CONF = new Configuration();
|
||||
private static final String DU_CACHE_FILE = "scmUsed";
|
||||
|
||||
@Rule
|
||||
public TemporaryFolder folder = new TemporaryFolder();
|
||||
private File rootDir;
|
||||
private HddsVolume volume;
|
||||
private File versionFile;
|
||||
|
@ -69,9 +70,9 @@ public class TestHddsVolume {
|
|||
// clusterID is not specified and the version file should not be written
|
||||
// to disk.
|
||||
assertTrue(volume.getClusterID() == null);
|
||||
assertEquals(volume.getStorageType(), StorageType.DEFAULT);
|
||||
assertEquals(volume.getStorageState(),
|
||||
HddsVolume.VolumeState.NOT_FORMATTED);
|
||||
assertEquals(StorageType.DEFAULT, volume.getStorageType());
|
||||
assertEquals(HddsVolume.VolumeState.NOT_FORMATTED,
|
||||
volume.getStorageState());
|
||||
assertFalse("Version file should not be created when clusterID is not " +
|
||||
"known.", versionFile.exists());
|
||||
|
||||
|
@ -84,7 +85,7 @@ public class TestHddsVolume {
|
|||
assertTrue("Volume format should create Version file",
|
||||
versionFile.exists());
|
||||
assertEquals(volume.getClusterID(), CLUSTER_ID);
|
||||
assertEquals(volume.getStorageState(), HddsVolume.VolumeState.NORMAL);
|
||||
assertEquals(HddsVolume.VolumeState.NORMAL, volume.getStorageState());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -111,7 +112,7 @@ public class TestHddsVolume {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testShutdown() throws Exception{
|
||||
public void testShutdown() throws Exception {
|
||||
// Return dummy value > 0 for scmUsage so that scm cache file is written
|
||||
// during shutdown.
|
||||
GetSpaceUsed scmUsageMock = Mockito.mock(GetSpaceUsed.class);
|
||||
|
@ -125,8 +126,7 @@ public class TestHddsVolume {
|
|||
volume.shutdown();
|
||||
|
||||
// Volume state should be "NON_EXISTENT" when volume is shutdown.
|
||||
assertEquals(volume.getStorageState(),
|
||||
HddsVolume.VolumeState.NON_EXISTENT);
|
||||
assertEquals(HddsVolume.VolumeState.NON_EXISTENT, volume.getStorageState());
|
||||
|
||||
// Volume should save scmUsed cache file once volume is shutdown
|
||||
File scmUsedFile = new File(folder.getRoot(), DU_CACHE_FILE);
|
||||
|
@ -139,7 +139,7 @@ public class TestHddsVolume {
|
|||
// as usage thread is shutdown.
|
||||
volume.getAvailable();
|
||||
fail("HddsVolume#shutdown test failed");
|
||||
} catch (Exception ex){
|
||||
} catch (Exception ex) {
|
||||
assertTrue(ex instanceof IOException);
|
||||
assertTrue(ex.getMessage().contains(
|
||||
"Volume Usage thread is not running."));
|
||||
|
|
|
@ -22,7 +22,6 @@ import java.io.IOException;
|
|||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
|
@ -69,7 +68,7 @@ public class TestVolumeSet {
|
|||
}
|
||||
|
||||
@Rule
|
||||
public Timeout testTimeout = new Timeout(300_000);
|
||||
public Timeout testTimeout = new Timeout(300000);
|
||||
|
||||
@Before
|
||||
public void setup() throws Exception {
|
||||
|
@ -153,8 +152,7 @@ public class TestVolumeSet {
|
|||
assertTrue(volumeSet.getFailedVolumesList().get(0).isFailed());
|
||||
|
||||
// Failed volume should not exist in VolumeMap
|
||||
Path volume1Path = new Path(volume1);
|
||||
assertFalse(volumeSet.getVolumeMap().containsKey(volume1Path));
|
||||
assertFalse(volumeSet.getVolumeMap().containsKey(volume1));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
/**
|
||||
* Tests for Container Volumes.
|
||||
*/
|
||||
package org.apache.hadoop.ozone.container.common.volume;
|
|
@ -25,10 +25,9 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
|
|||
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
|
||||
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
|
||||
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
|
||||
import org.apache.hadoop.ozone.container.common.volume
|
||||
.RoundRobinVolumeChoosingPolicy;
|
||||
import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
|
||||
import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
|
||||
import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
|
||||
import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
|
||||
import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.Before;
|
||||
|
@ -37,12 +36,14 @@ import org.junit.Test;
|
|||
import org.junit.rules.TemporaryFolder;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.LinkedList;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
import static org.mockito.ArgumentMatchers.anyList;
|
||||
import static org.mockito.ArgumentMatchers.anyLong;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
@ -52,6 +53,8 @@ import static org.mockito.Mockito.mock;
|
|||
*/
|
||||
public class TestBlockManagerImpl {
|
||||
|
||||
@Rule
|
||||
public TemporaryFolder folder = new TemporaryFolder();
|
||||
private OzoneConfiguration config;
|
||||
private String scmId = UUID.randomUUID().toString();
|
||||
private VolumeSet volumeSet;
|
||||
|
@ -62,10 +65,6 @@ public class TestBlockManagerImpl {
|
|||
private BlockManagerImpl blockManager;
|
||||
private BlockID blockID;
|
||||
|
||||
@Rule
|
||||
public TemporaryFolder folder = new TemporaryFolder();
|
||||
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
config = new OzoneConfiguration();
|
||||
|
@ -93,7 +92,7 @@ public class TestBlockManagerImpl {
|
|||
blockData = new BlockData(blockID);
|
||||
blockData.addMetadata("VOLUME", "ozone");
|
||||
blockData.addMetadata("OWNER", "hdfs");
|
||||
List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
|
||||
List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
|
||||
ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
|
||||
.getLocalID(), 0), 0, 1024);
|
||||
chunkList.add(info.getProtoBufMessage());
|
||||
|
@ -124,88 +123,74 @@ public class TestBlockManagerImpl {
|
|||
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testDeleteBlock() throws Exception {
|
||||
assertEquals(0,
|
||||
keyValueContainer.getContainerData().getKeyCount());
|
||||
//Put Block
|
||||
blockManager.putBlock(keyValueContainer, blockData);
|
||||
assertEquals(1,
|
||||
keyValueContainer.getContainerData().getKeyCount());
|
||||
//Delete Block
|
||||
blockManager.deleteBlock(keyValueContainer, blockID);
|
||||
assertEquals(0,
|
||||
keyValueContainer.getContainerData().getKeyCount());
|
||||
try {
|
||||
assertEquals(0,
|
||||
keyValueContainer.getContainerData().getKeyCount());
|
||||
//Put Block
|
||||
blockManager.putBlock(keyValueContainer, blockData);
|
||||
assertEquals(1,
|
||||
keyValueContainer.getContainerData().getKeyCount());
|
||||
//Delete Block
|
||||
blockManager.deleteBlock(keyValueContainer, blockID);
|
||||
assertEquals(0,
|
||||
keyValueContainer.getContainerData().getKeyCount());
|
||||
try {
|
||||
blockManager.getBlock(keyValueContainer, blockID);
|
||||
fail("testDeleteBlock");
|
||||
} catch (StorageContainerException ex) {
|
||||
GenericTestUtils.assertExceptionContains(
|
||||
"Unable to find the block", ex);
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
fail("testDeleteBlock failed");
|
||||
blockManager.getBlock(keyValueContainer, blockID);
|
||||
fail("testDeleteBlock");
|
||||
} catch (StorageContainerException ex) {
|
||||
GenericTestUtils.assertExceptionContains(
|
||||
"Unable to find the block", ex);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testListBlock() throws Exception {
|
||||
try {
|
||||
blockManager.putBlock(keyValueContainer, blockData);
|
||||
List<BlockData> listBlockData = blockManager.listBlock(
|
||||
keyValueContainer, 1, 10);
|
||||
assertNotNull(listBlockData);
|
||||
assertTrue(listBlockData.size() == 1);
|
||||
|
||||
for (long i = 2; i <= 10; i++) {
|
||||
blockID = new BlockID(1L, i);
|
||||
blockData = new BlockData(blockID);
|
||||
blockData.addMetadata("VOLUME", "ozone");
|
||||
blockData.addMetadata("OWNER", "hdfs");
|
||||
List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
|
||||
ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
|
||||
.getLocalID(), 0), 0, 1024);
|
||||
chunkList.add(info.getProtoBufMessage());
|
||||
blockData.setChunks(chunkList);
|
||||
blockManager.putBlock(keyValueContainer, blockData);
|
||||
List<BlockData> listBlockData = blockManager.listBlock(
|
||||
keyValueContainer, 1, 10);
|
||||
assertNotNull(listBlockData);
|
||||
assertTrue(listBlockData.size() == 1);
|
||||
|
||||
for (long i = 2; i <= 10; i++) {
|
||||
blockID = new BlockID(1L, i);
|
||||
blockData = new BlockData(blockID);
|
||||
blockData.addMetadata("VOLUME", "ozone");
|
||||
blockData.addMetadata("OWNER", "hdfs");
|
||||
List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
|
||||
ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
|
||||
.getLocalID(), 0), 0, 1024);
|
||||
chunkList.add(info.getProtoBufMessage());
|
||||
blockData.setChunks(chunkList);
|
||||
blockManager.putBlock(keyValueContainer, blockData);
|
||||
}
|
||||
|
||||
listBlockData = blockManager.listBlock(
|
||||
keyValueContainer, 1, 10);
|
||||
assertNotNull(listBlockData);
|
||||
assertTrue(listBlockData.size() == 10);
|
||||
|
||||
} catch (IOException ex) {
|
||||
fail("testListBlock failed");
|
||||
}
|
||||
|
||||
listBlockData = blockManager.listBlock(
|
||||
keyValueContainer, 1, 10);
|
||||
assertNotNull(listBlockData);
|
||||
assertTrue(listBlockData.size() == 10);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetNoSuchBlock() throws Exception {
|
||||
assertEquals(0,
|
||||
keyValueContainer.getContainerData().getKeyCount());
|
||||
//Put Block
|
||||
blockManager.putBlock(keyValueContainer, blockData);
|
||||
assertEquals(1,
|
||||
keyValueContainer.getContainerData().getKeyCount());
|
||||
//Delete Block
|
||||
blockManager.deleteBlock(keyValueContainer, blockID);
|
||||
assertEquals(0,
|
||||
keyValueContainer.getContainerData().getKeyCount());
|
||||
try {
|
||||
assertEquals(0,
|
||||
keyValueContainer.getContainerData().getKeyCount());
|
||||
//Put Block
|
||||
blockManager.putBlock(keyValueContainer, blockData);
|
||||
assertEquals(1,
|
||||
keyValueContainer.getContainerData().getKeyCount());
|
||||
//Delete Block
|
||||
blockManager.deleteBlock(keyValueContainer, blockID);
|
||||
assertEquals(0,
|
||||
keyValueContainer.getContainerData().getKeyCount());
|
||||
try {
|
||||
//Since the block has been deleted, we should not be able to find it
|
||||
blockManager.getBlock(keyValueContainer, blockID);
|
||||
fail("testGetNoSuchBlock failed");
|
||||
} catch (StorageContainerException ex) {
|
||||
GenericTestUtils.assertExceptionContains(
|
||||
"Unable to find the block", ex);
|
||||
assertEquals(ContainerProtos.Result.NO_SUCH_BLOCK, ex.getResult());
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
//Since the block has been deleted, we should not be able to find it
|
||||
blockManager.getBlock(keyValueContainer, blockID);
|
||||
fail("testGetNoSuchBlock failed");
|
||||
} catch (StorageContainerException ex) {
|
||||
GenericTestUtils.assertExceptionContains(
|
||||
"Unable to find the block", ex);
|
||||
assertEquals(ContainerProtos.Result.NO_SUCH_BLOCK, ex.getResult());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,6 +43,7 @@ import java.nio.ByteBuffer;
|
|||
import java.util.Arrays;
|
||||
import java.util.UUID;
|
||||
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
import static org.junit.Assert.*;
|
||||
import static org.mockito.ArgumentMatchers.anyList;
|
||||
import static org.mockito.ArgumentMatchers.anyLong;
|
||||
|
@ -88,7 +89,7 @@ public class TestChunkManagerImpl {
|
|||
|
||||
keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
|
||||
|
||||
data = "testing write chunks".getBytes();
|
||||
data = "testing write chunks".getBytes(UTF_8);
|
||||
// Creating BlockData
|
||||
blockID = new BlockID(1L, 1L);
|
||||
chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID
|
||||
|
|
|
@ -42,9 +42,9 @@ import org.junit.runner.RunWith;
|
|||
import org.junit.runners.Parameterized;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.UUID;
|
||||
|
@ -252,7 +252,7 @@ public class TestKeyValueBlockIterator {
|
|||
.randomUUID().toString());
|
||||
MetadataStore metadataStore = BlockUtils.getDB(containerData, conf);
|
||||
|
||||
List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
|
||||
List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
|
||||
ChunkInfo info = new ChunkInfo("chunkfile", 0, 1024);
|
||||
chunkList.add(info.getProtoBufMessage());
|
||||
|
||||
|
|
|
@ -51,12 +51,13 @@ import java.io.File;
|
|||
import java.io.FileInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.List;
|
||||
import java.util.LinkedList;
|
||||
import java.util.UUID;
|
||||
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
import static org.apache.ratis.util.Preconditions.assertTrue;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
|
@ -135,7 +136,7 @@ public class TestKeyValueContainer {
|
|||
BlockData blockData = new BlockData(blockID);
|
||||
blockData.addMetadata("VOLUME", "ozone");
|
||||
blockData.addMetadata("OWNER", "hdfs");
|
||||
List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
|
||||
List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
|
||||
ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
|
||||
.getLocalID(), 0), 0, 1024);
|
||||
chunkList.add(info.getProtoBufMessage());
|
||||
|
@ -163,8 +164,6 @@ public class TestKeyValueContainer {
|
|||
// Check whether containerMetaDataPath and chunksPath exists or not.
|
||||
assertTrue(containerMetaDataPath != null);
|
||||
assertTrue(chunksPath != null);
|
||||
File containerMetaDataLoc = new File(containerMetaDataPath);
|
||||
|
||||
//Check whether container file and container db file exists or not.
|
||||
assertTrue(keyValueContainer.getContainerFile().exists(),
|
||||
".Container File does not exist");
|
||||
|
@ -190,7 +189,7 @@ public class TestKeyValueContainer {
|
|||
//write one few keys to check the key count after import
|
||||
MetadataStore metadataStore = BlockUtils.getDB(keyValueContainerData, conf);
|
||||
for (int i = 0; i < numberOfKeysToWrite; i++) {
|
||||
metadataStore.put(("test" + i).getBytes(), "test".getBytes());
|
||||
metadataStore.put(("test" + i).getBytes(UTF_8), "test".getBytes(UTF_8));
|
||||
}
|
||||
metadataStore.close();
|
||||
|
||||
|
@ -247,7 +246,7 @@ public class TestKeyValueContainer {
|
|||
container.importContainerData(fis, packer);
|
||||
}
|
||||
fail("Container is imported twice. Previous files are overwritten");
|
||||
} catch (Exception ex) {
|
||||
} catch (IOException ex) {
|
||||
//all good
|
||||
}
|
||||
|
||||
|
|
|
@ -226,9 +226,10 @@ public class TestKeyValueHandler {
|
|||
VolumeSet volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf);
|
||||
KeyValueHandler keyValueHandler = new KeyValueHandler(conf, cset,
|
||||
volumeSet, metrics);
|
||||
assertEquals(keyValueHandler.getVolumeChoosingPolicyForTesting()
|
||||
.getClass().getName(), "org.apache.hadoop.ozone.container.common" +
|
||||
".volume.RoundRobinVolumeChoosingPolicy");
|
||||
assertEquals("org.apache.hadoop.ozone.container.common" +
|
||||
".volume.RoundRobinVolumeChoosingPolicy",
|
||||
keyValueHandler.getVolumeChoosingPolicyForTesting()
|
||||
.getClass().getName());
|
||||
|
||||
//Set a class which is not of sub class of VolumeChoosingPolicy
|
||||
conf.set(HDDS_DATANODE_VOLUME_CHOOSING_POLICY,
|
||||
|
|
|
@ -23,7 +23,6 @@ import java.io.FileOutputStream;
|
|||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.Charset;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
|
@ -45,6 +44,8 @@ import org.junit.Assert;
|
|||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
|
||||
/**
|
||||
* Test the tar/untar for a given container.
|
||||
*/
|
||||
|
@ -161,7 +162,7 @@ public class TestTarContainerPacker {
|
|||
//read the container descriptor only
|
||||
try (FileInputStream input = new FileInputStream(targetFile.toFile())) {
|
||||
String containerYaml = new String(packer.unpackContainerDescriptor(input),
|
||||
Charset.forName(StandardCharsets.UTF_8.name()));
|
||||
Charset.forName(UTF_8.name()));
|
||||
Assert.assertEquals(TEST_DESCRIPTOR_FILE_CONTENT, containerYaml);
|
||||
}
|
||||
|
||||
|
@ -177,7 +178,7 @@ public class TestTarContainerPacker {
|
|||
try (FileInputStream input = new FileInputStream(targetFile.toFile())) {
|
||||
descriptor =
|
||||
new String(packer.unpackContainerData(destinationContainer, input),
|
||||
Charset.forName(StandardCharsets.UTF_8.name()));
|
||||
Charset.forName(UTF_8.name()));
|
||||
}
|
||||
|
||||
assertExampleMetadataDbIsGood(
|
||||
|
@ -204,7 +205,7 @@ public class TestTarContainerPacker {
|
|||
|
||||
try (FileInputStream testFile = new FileInputStream(dbFile.toFile())) {
|
||||
List<String> strings = IOUtils
|
||||
.readLines(testFile, Charset.forName(StandardCharsets.UTF_8.name()));
|
||||
.readLines(testFile, Charset.forName(UTF_8.name()));
|
||||
Assert.assertEquals(1, strings.size());
|
||||
Assert.assertEquals(TEST_DB_FILE_CONTENT, strings.get(0));
|
||||
}
|
||||
|
@ -222,7 +223,7 @@ public class TestTarContainerPacker {
|
|||
|
||||
try (FileInputStream testFile = new FileInputStream(chunkFile.toFile())) {
|
||||
List<String> strings = IOUtils
|
||||
.readLines(testFile, Charset.forName(StandardCharsets.UTF_8.name()));
|
||||
.readLines(testFile, Charset.forName(UTF_8.name()));
|
||||
Assert.assertEquals(1, strings.size());
|
||||
Assert.assertEquals(TEST_CHUNK_FILE_CONTENT, strings.get(0));
|
||||
}
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
/**
|
||||
* Chunk Manager Checks.
|
||||
*/
|
||||
package org.apache.hadoop.ozone.container.keyvalue;
|
|
@ -139,6 +139,7 @@ public class EventQueue implements EventPublisher, AutoCloseable {
|
|||
* @throws IllegalArgumentException If there is no EventHandler for
|
||||
* the specific event.
|
||||
*/
|
||||
@Override
|
||||
public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void fireEvent(
|
||||
EVENT_TYPE event, PAYLOAD payload) {
|
||||
|
||||
|
@ -219,7 +220,9 @@ public class EventQueue implements EventPublisher, AutoCloseable {
|
|||
try {
|
||||
Thread.sleep(100);
|
||||
} catch (InterruptedException e) {
|
||||
e.printStackTrace();
|
||||
LOG.warn("Interrupted exception while sleeping.", e);
|
||||
// We ignore this exception for time being. Review? should we
|
||||
// propogate it back to caller?
|
||||
}
|
||||
|
||||
if (Time.now() > currentTime + timeout) {
|
||||
|
@ -229,7 +232,7 @@ public class EventQueue implements EventPublisher, AutoCloseable {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
|
||||
isRunning = false;
|
||||
|
|
|
@ -1,24 +1,21 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with this
|
||||
* work for additional information regarding copyright ownership. The ASF
|
||||
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations under
|
||||
* the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdds.server.events;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import org.apache.hadoop.hdds.HddsIdFactory;
|
||||
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
||||
import org.apache.hadoop.ozone.lease.LeaseManager;
|
||||
|
@ -27,6 +24,9 @@ import org.junit.Assert;
|
|||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Test the basic functionality of event watcher.
|
||||
*/
|
||||
|
@ -41,7 +41,7 @@ public class TestEventWatcher {
|
|||
private static final TypedEvent<ReplicationCompletedEvent>
|
||||
REPLICATION_COMPLETED = new TypedEvent<>(ReplicationCompletedEvent.class);
|
||||
|
||||
LeaseManager<Long> leaseManager;
|
||||
private LeaseManager<Long> leaseManager;
|
||||
|
||||
@Before
|
||||
public void startLeaseManager() {
|
||||
|
@ -56,7 +56,6 @@ public class TestEventWatcher {
|
|||
DefaultMetricsSystem.shutdown();
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testEventHandling() throws InterruptedException {
|
||||
EventQueue queue = new EventQueue();
|
||||
|
@ -180,7 +179,7 @@ public class TestEventWatcher {
|
|||
|
||||
queue.fireEvent(REPLICATION_COMPLETED, event1Completed);
|
||||
|
||||
Thread.sleep(2200l);
|
||||
Thread.sleep(2200L);
|
||||
|
||||
//until now: 3 in-progress activities are tracked with three
|
||||
// UnderreplicatedEvents. The first one is completed, the remaining two
|
||||
|
@ -201,27 +200,29 @@ public class TestEventWatcher {
|
|||
}
|
||||
|
||||
private EventWatcher<UnderreplicatedEvent, ReplicationCompletedEvent>
|
||||
createEventWatcher() {
|
||||
createEventWatcher() {
|
||||
return new CommandWatcherExample(WATCH_UNDER_REPLICATED,
|
||||
REPLICATION_COMPLETED, leaseManager);
|
||||
}
|
||||
|
||||
private class CommandWatcherExample
|
||||
private static class CommandWatcherExample
|
||||
extends EventWatcher<UnderreplicatedEvent, ReplicationCompletedEvent> {
|
||||
|
||||
public CommandWatcherExample(Event<UnderreplicatedEvent> startEvent,
|
||||
CommandWatcherExample(Event<UnderreplicatedEvent> startEvent,
|
||||
Event<ReplicationCompletedEvent> completionEvent,
|
||||
LeaseManager<Long> leaseManager) {
|
||||
super("TestCommandWatcher", startEvent, completionEvent, leaseManager);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onTimeout(EventPublisher publisher, UnderreplicatedEvent payload) {
|
||||
protected void onTimeout(EventPublisher publisher,
|
||||
UnderreplicatedEvent payload) {
|
||||
publisher.fireEvent(UNDER_REPLICATED, payload);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onFinished(EventPublisher publisher, UnderreplicatedEvent payload) {
|
||||
protected void onFinished(EventPublisher publisher,
|
||||
UnderreplicatedEvent payload) {
|
||||
//Good job. We did it.
|
||||
}
|
||||
|
||||
|
@ -240,13 +241,14 @@ public class TestEventWatcher {
|
|||
|
||||
private final String datanodeId;
|
||||
|
||||
public ReplicationCompletedEvent(long id, String containerId,
|
||||
ReplicationCompletedEvent(long id, String containerId,
|
||||
String datanodeId) {
|
||||
this.id = id;
|
||||
this.containerId = containerId;
|
||||
this.datanodeId = datanodeId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getId() {
|
||||
return id;
|
||||
}
|
||||
|
@ -279,11 +281,12 @@ public class TestEventWatcher {
|
|||
|
||||
private final String containerId;
|
||||
|
||||
public UnderreplicatedEvent(long id, String containerId) {
|
||||
UnderreplicatedEvent(long id, String containerId) {
|
||||
this.containerId = containerId;
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getId() {
|
||||
return id;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
/**
|
||||
* Tests for Event Watcher.
|
||||
*/
|
||||
package org.apache.hadoop.hdds.server.events;
|
|
@ -19,9 +19,12 @@ package org.apache.hadoop.hdds.scm.block;
|
|||
|
||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||
|
||||
import java.util.LinkedList;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Pending Deletes in the block space.
|
||||
*/
|
||||
public class PendingDeleteStatusList {
|
||||
|
||||
private List<PendingDeleteStatus> pendingDeleteStatuses;
|
||||
|
@ -29,7 +32,7 @@ public class PendingDeleteStatusList {
|
|||
|
||||
public PendingDeleteStatusList(DatanodeDetails datanodeDetails) {
|
||||
this.datanodeDetails = datanodeDetails;
|
||||
pendingDeleteStatuses = new LinkedList<>();
|
||||
pendingDeleteStatuses = new ArrayList<>();
|
||||
}
|
||||
|
||||
public void addPendingDeleteStatus(long dnDeleteTransactionId,
|
||||
|
@ -39,6 +42,9 @@ public class PendingDeleteStatusList {
|
|||
containerId));
|
||||
}
|
||||
|
||||
/**
|
||||
* Status of pending deletes.
|
||||
*/
|
||||
public static class PendingDeleteStatus {
|
||||
private long dnDeleteTransactionId;
|
||||
private long scmDeleteTransactionId;
|
||||
|
|
|
@ -44,6 +44,7 @@ public class ChillModePrecheck implements Precheck<ScmOps> {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean check(ScmOps op) throws SCMException {
|
||||
if (inChillMode.get() && ChillModeRestrictedOps
|
||||
.isRestrictedInChillMode(op)) {
|
||||
|
|
|
@ -134,6 +134,16 @@ public final class ContainerReplica implements Comparable<ContainerReplica> {
|
|||
return new ContainerReplicaBuilder();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ContainerReplica{" +
|
||||
"containerID=" + containerID +
|
||||
", datanodeDetails=" + datanodeDetails +
|
||||
", placeOfBirth=" + placeOfBirth +
|
||||
", sequenceId=" + sequenceId +
|
||||
'}';
|
||||
}
|
||||
|
||||
/**
|
||||
* Used for building ContainerReplica instance.
|
||||
*/
|
||||
|
@ -148,12 +158,12 @@ public final class ContainerReplica implements Comparable<ContainerReplica> {
|
|||
/**
|
||||
* Set Container Id.
|
||||
*
|
||||
* @param containerId ContainerID
|
||||
* @param cID ContainerID
|
||||
* @return ContainerReplicaBuilder
|
||||
*/
|
||||
public ContainerReplicaBuilder setContainerID(
|
||||
final ContainerID containerId) {
|
||||
containerID = containerId;
|
||||
final ContainerID cID) {
|
||||
this.containerID = cID;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.LinkedList;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.stream.Collectors;
|
||||
|
@ -102,7 +102,7 @@ public abstract class SCMCommonPolicy implements ContainerPlacementPolicy {
|
|||
* @return list of datanodes chosen.
|
||||
* @throws SCMException SCM exception.
|
||||
*/
|
||||
|
||||
@Override
|
||||
public List<DatanodeDetails> chooseDatanodes(
|
||||
List<DatanodeDetails> excludedNodes,
|
||||
int nodesRequired, final long sizeRequired) throws SCMException {
|
||||
|
@ -167,7 +167,7 @@ public abstract class SCMCommonPolicy implements ContainerPlacementPolicy {
|
|||
public List<DatanodeDetails> getResultSet(
|
||||
int nodesRequired, List<DatanodeDetails> healthyNodes)
|
||||
throws SCMException {
|
||||
List<DatanodeDetails> results = new LinkedList<>();
|
||||
List<DatanodeDetails> results = new ArrayList<>();
|
||||
for (int x = 0; x < nodesRequired; x++) {
|
||||
// invoke the choose function defined in the derived classes.
|
||||
DatanodeDetails nodeId = chooseNode(healthyNodes);
|
||||
|
|
|
@ -83,6 +83,7 @@ public final class SCMContainerPlacementRandom extends SCMCommonPolicy
|
|||
* @param healthyNodes - all healthy datanodes.
|
||||
* @return one randomly chosen datanode that from two randomly chosen datanode
|
||||
*/
|
||||
@Override
|
||||
public DatanodeDetails chooseNode(final List<DatanodeDetails> healthyNodes) {
|
||||
DatanodeDetails selectedNode =
|
||||
healthyNodes.get(getRand().nextInt(healthyNodes.size()));
|
||||
|
|
|
@ -23,7 +23,7 @@ import org.apache.hadoop.hdds.scm.exceptions.SCMException;
|
|||
* DatanodeMetric acts as the basis for all the metric that is used in
|
||||
* comparing 2 datanodes.
|
||||
*/
|
||||
public interface DatanodeMetric<T, S> extends Comparable<T> {
|
||||
public interface DatanodeMetric<T, S> {
|
||||
|
||||
/**
|
||||
* Some syntactic sugar over Comparable interface. This makes code easier to
|
||||
|
@ -87,5 +87,4 @@ public interface DatanodeMetric<T, S> extends Comparable<T> {
|
|||
*/
|
||||
void subtract(T value);
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -136,7 +136,6 @@ public class LongMetric implements DatanodeMetric<Long, Long> {
|
|||
* @throws ClassCastException if the specified object's type prevents it
|
||||
* from being compared to this object.
|
||||
*/
|
||||
@Override
|
||||
public int compareTo(Long o) {
|
||||
return Long.compare(this.value, o);
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import com.google.common.base.Preconditions;
|
|||
/**
|
||||
* SCM Node Metric that is used in the placement classes.
|
||||
*/
|
||||
public class SCMNodeMetric implements DatanodeMetric<SCMNodeStat, Long> {
|
||||
public class SCMNodeMetric implements DatanodeMetric<SCMNodeStat, Long> {
|
||||
private SCMNodeStat stat;
|
||||
|
||||
/**
|
||||
|
@ -191,7 +191,7 @@ public class SCMNodeMetric implements DatanodeMetric<SCMNodeStat, Long> {
|
|||
* @throws ClassCastException if the specified object's type prevents it
|
||||
* from being compared to this object.
|
||||
*/
|
||||
@Override
|
||||
//@Override
|
||||
public int compareTo(SCMNodeStat o) {
|
||||
if (isEqual(o)) {
|
||||
return 0;
|
||||
|
|
|
@ -52,6 +52,7 @@ public class SCMNodeStat implements NodeStat {
|
|||
/**
|
||||
* @return the total configured capacity of the node.
|
||||
*/
|
||||
@Override
|
||||
public LongMetric getCapacity() {
|
||||
return capacity;
|
||||
}
|
||||
|
@ -59,6 +60,7 @@ public class SCMNodeStat implements NodeStat {
|
|||
/**
|
||||
* @return the total SCM used space on the node.
|
||||
*/
|
||||
@Override
|
||||
public LongMetric getScmUsed() {
|
||||
return scmUsed;
|
||||
}
|
||||
|
@ -66,6 +68,7 @@ public class SCMNodeStat implements NodeStat {
|
|||
/**
|
||||
* @return the total remaining space available on the node.
|
||||
*/
|
||||
@Override
|
||||
public LongMetric getRemaining() {
|
||||
return remaining;
|
||||
}
|
||||
|
@ -77,12 +80,9 @@ public class SCMNodeStat implements NodeStat {
|
|||
* @param newUsed in bytes
|
||||
* @param newRemaining in bytes
|
||||
*/
|
||||
@Override
|
||||
@VisibleForTesting
|
||||
public void set(long newCapacity, long newUsed, long newRemaining) {
|
||||
Preconditions.checkNotNull(newCapacity, "Capacity cannot be null");
|
||||
Preconditions.checkNotNull(newUsed, "used cannot be null");
|
||||
Preconditions.checkNotNull(newRemaining, "remaining cannot be null");
|
||||
|
||||
Preconditions.checkArgument(newCapacity >= 0, "Capacity cannot be " +
|
||||
"negative.");
|
||||
Preconditions.checkArgument(newUsed >= 0, "used space cannot be " +
|
||||
|
@ -101,6 +101,7 @@ public class SCMNodeStat implements NodeStat {
|
|||
* @param stat Nodestat.
|
||||
* @return SCMNodeStat
|
||||
*/
|
||||
@Override
|
||||
public SCMNodeStat add(NodeStat stat) {
|
||||
this.capacity.set(this.getCapacity().get() + stat.getCapacity().get());
|
||||
this.scmUsed.set(this.getScmUsed().get() + stat.getScmUsed().get());
|
||||
|
@ -114,6 +115,7 @@ public class SCMNodeStat implements NodeStat {
|
|||
* @param stat SCMNodeStat.
|
||||
* @return Modified SCMNodeStat
|
||||
*/
|
||||
@Override
|
||||
public SCMNodeStat subtract(NodeStat stat) {
|
||||
this.capacity.set(this.getCapacity().get() - stat.getCapacity().get());
|
||||
this.scmUsed.set(this.getScmUsed().get() - stat.getScmUsed().get());
|
||||
|
|
|
@ -48,12 +48,13 @@ public class ReplicationActivityStatus implements
|
|||
replicationStatusListener = new ReplicationStatusListener();
|
||||
chillModeStatusListener = new ChillModeStatusListener();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isReplicationEnabled() {
|
||||
return replicationEnabled.get();
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
@Override
|
||||
public void setReplicationEnabled(boolean enabled) {
|
||||
replicationEnabled.set(enabled);
|
||||
}
|
||||
|
|
|
@ -98,6 +98,7 @@ public class ReplicationManager implements Runnable {
|
|||
threadFactory.newThread(this).start();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
|
||||
while (running) {
|
||||
|
@ -168,6 +169,7 @@ public class ReplicationManager implements Runnable {
|
|||
|
||||
} else if (deficit < 0) {
|
||||
//TODO: too many replicas. Not handled yet.
|
||||
LOG.debug("Too many replicas is not handled yet.");
|
||||
}
|
||||
|
||||
} catch (Exception e) {
|
||||
|
|
|
@ -22,8 +22,8 @@ import com.google.common.base.Preconditions;
|
|||
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
|
||||
import org.apache.hadoop.util.Time;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
|
@ -39,7 +39,7 @@ import java.util.concurrent.locks.ReentrantLock;
|
|||
*/
|
||||
public class CommandQueue {
|
||||
// This list is used as default return value.
|
||||
private static final List<SCMCommand> DEFAULT_LIST = new LinkedList<>();
|
||||
private static final List<SCMCommand> DEFAULT_LIST = new ArrayList<>();
|
||||
private final Map<UUID, Commands> commandMap;
|
||||
private final Lock lock;
|
||||
private long commandsInQueue;
|
||||
|
@ -136,7 +136,7 @@ public class CommandQueue {
|
|||
* Constructs a Commands class.
|
||||
*/
|
||||
Commands() {
|
||||
commands = new LinkedList<>();
|
||||
commands = new ArrayList<>();
|
||||
updateTime = 0;
|
||||
readTime = 0;
|
||||
}
|
||||
|
@ -182,7 +182,7 @@ public class CommandQueue {
|
|||
*/
|
||||
public List<SCMCommand> getCommands() {
|
||||
List<SCMCommand> temp = this.commands;
|
||||
this.commands = new LinkedList<>();
|
||||
this.commands = new ArrayList<>();
|
||||
readTime = Time.monotonicNow();
|
||||
return temp;
|
||||
}
|
||||
|
|
|
@ -144,6 +144,8 @@ public class NodeStateManager implements Runnable, Closeable {
|
|||
executorService = HadoopExecutors.newScheduledThreadPool(1,
|
||||
new ThreadFactoryBuilder().setDaemon(true)
|
||||
.setNameFormat("SCM Heartbeat Processing Thread - %d").build());
|
||||
//BUG:BUG TODO: The return value is ignored, if an exception is thrown in
|
||||
// the executing funtion, it will be ignored.
|
||||
executorService.schedule(this, heartbeatCheckerIntervalMs,
|
||||
TimeUnit.MILLISECONDS);
|
||||
}
|
||||
|
@ -331,7 +333,7 @@ public class NodeStateManager implements Runnable, Closeable {
|
|||
* @return list of nodes
|
||||
*/
|
||||
public List<DatanodeDetails> getNodes(NodeState state) {
|
||||
List<DatanodeDetails> nodes = new LinkedList<>();
|
||||
List<DatanodeDetails> nodes = new ArrayList<>();
|
||||
nodeStateMap.getNodes(state).forEach(
|
||||
uuid -> {
|
||||
try {
|
||||
|
@ -352,7 +354,7 @@ public class NodeStateManager implements Runnable, Closeable {
|
|||
* @return all the managed nodes
|
||||
*/
|
||||
public List<DatanodeDetails> getAllNodes() {
|
||||
List<DatanodeDetails> nodes = new LinkedList<>();
|
||||
List<DatanodeDetails> nodes = new ArrayList<>();
|
||||
nodeStateMap.getAllNodes().forEach(
|
||||
uuid -> {
|
||||
try {
|
||||
|
@ -613,6 +615,8 @@ public class NodeStateManager implements Runnable, Closeable {
|
|||
|
||||
if (!Thread.currentThread().isInterrupted() &&
|
||||
!executorService.isShutdown()) {
|
||||
//BUGBUG: The return future needs to checked here to make sure the
|
||||
// exceptions are handled correctly.
|
||||
executorService.schedule(this, heartbeatCheckerIntervalMs,
|
||||
TimeUnit.MILLISECONDS);
|
||||
} else {
|
||||
|
|
|
@ -59,6 +59,7 @@ public class Node2ContainerMap extends Node2ObjectsMap<ContainerID> {
|
|||
* @param datanodeID -- Datanode UUID
|
||||
* @param containerIDs - List of ContainerIDs.
|
||||
*/
|
||||
@Override
|
||||
public void insertNewDatanode(UUID datanodeID, Set<ContainerID> containerIDs)
|
||||
throws SCMException {
|
||||
super.insertNewDatanode(datanodeID, containerIDs);
|
||||
|
@ -84,6 +85,7 @@ public class Node2ContainerMap extends Node2ObjectsMap<ContainerID> {
|
|||
}
|
||||
|
||||
@VisibleForTesting
|
||||
@Override
|
||||
public int size() {
|
||||
return dn2ObjectMap.size();
|
||||
}
|
||||
|
|
|
@ -172,7 +172,7 @@ public class NodeStateMap {
|
|||
public List<UUID> getNodes(NodeState state) {
|
||||
lock.readLock().lock();
|
||||
try {
|
||||
return new LinkedList<>(stateMap.get(state));
|
||||
return new ArrayList<>(stateMap.get(state));
|
||||
} finally {
|
||||
lock.readLock().unlock();
|
||||
}
|
||||
|
@ -186,7 +186,7 @@ public class NodeStateMap {
|
|||
public List<UUID> getAllNodes() {
|
||||
lock.readLock().lock();
|
||||
try {
|
||||
return new LinkedList<>(nodeMap.keySet());
|
||||
return new ArrayList<>(nodeMap.keySet());
|
||||
} finally {
|
||||
lock.readLock().unlock();
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.hadoop.hdds.scm.node.NodeManager;
|
|||
import org.apache.hadoop.hdds.server.ServerUtils;
|
||||
import org.apache.hadoop.hdds.server.events.EventPublisher;
|
||||
import org.apache.hadoop.ozone.OzoneConsts;
|
||||
import org.apache.hadoop.utils.MetadataKeyFilters;
|
||||
import org.apache.hadoop.utils.MetadataStore;
|
||||
import org.apache.hadoop.utils.MetadataStoreBuilder;
|
||||
import org.slf4j.Logger;
|
||||
|
@ -94,7 +95,8 @@ public class SCMPipelineManager implements PipelineManager {
|
|||
return;
|
||||
}
|
||||
List<Map.Entry<byte[], byte[]>> pipelines =
|
||||
pipelineStore.getSequentialRangeKVs(null, Integer.MAX_VALUE, null);
|
||||
pipelineStore.getSequentialRangeKVs(null, Integer.MAX_VALUE,
|
||||
(MetadataKeyFilters.MetadataKeyFilter[])null);
|
||||
|
||||
for (Map.Entry<byte[], byte[]> entry : pipelines) {
|
||||
Pipeline pipeline = Pipeline.getFromProtobuf(
|
||||
|
|
|
@ -58,7 +58,6 @@ import org.slf4j.LoggerFactory;
|
|||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.ArrayList;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
|
@ -354,7 +353,7 @@ public class SCMClientProtocolServer implements
|
|||
*/
|
||||
public List<DatanodeDetails> queryNode(HddsProtos.NodeState state) {
|
||||
Preconditions.checkNotNull(state, "Node Query set cannot be null");
|
||||
return new LinkedList<>(queryNodeState(state));
|
||||
return new ArrayList<>(queryNodeState(state));
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
|
|
|
@ -89,7 +89,7 @@ import org.slf4j.LoggerFactory;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.LinkedList;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
|
@ -225,7 +225,7 @@ public class SCMDatanodeProtocolServer implements
|
|||
@Override
|
||||
public SCMHeartbeatResponseProto sendHeartbeat(
|
||||
SCMHeartbeatRequestProto heartbeat) throws IOException {
|
||||
List<SCMCommandProto> cmdResponses = new LinkedList<>();
|
||||
List<SCMCommandProto> cmdResponses = new ArrayList<>();
|
||||
for (SCMCommand cmd : heartbeatDispatcher.dispatch(heartbeat)) {
|
||||
cmdResponses.add(getCommandResponse(cmd));
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ import static org.junit.Assert.assertEquals;
|
|||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* Unit tests for {@link HddsServerUtil}
|
||||
* Unit tests for {@link HddsServerUtil}.
|
||||
*/
|
||||
public class TestHddsServerUtils {
|
||||
public static final Logger LOG = LoggerFactory.getLogger(
|
||||
|
@ -58,6 +58,7 @@ public class TestHddsServerUtils {
|
|||
* Test getting OZONE_SCM_DATANODE_ADDRESS_KEY with port.
|
||||
*/
|
||||
@Test
|
||||
@SuppressWarnings("StringSplitter")
|
||||
public void testGetDatanodeAddressWithPort() {
|
||||
final String scmHost = "host123:100";
|
||||
final Configuration conf = new OzoneConfiguration();
|
||||
|
@ -78,8 +79,8 @@ public class TestHddsServerUtils {
|
|||
conf.set(OZONE_SCM_DATANODE_ADDRESS_KEY, scmHost);
|
||||
final InetSocketAddress address =
|
||||
HddsServerUtil.getScmAddressForDataNodes(conf);
|
||||
assertEquals(address.getHostName(), scmHost);
|
||||
assertEquals(address.getPort(), OZONE_SCM_DATANODE_PORT_DEFAULT);
|
||||
assertEquals(scmHost, address.getHostName());
|
||||
assertEquals(OZONE_SCM_DATANODE_PORT_DEFAULT, address.getPort());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -93,8 +94,8 @@ public class TestHddsServerUtils {
|
|||
conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost);
|
||||
final InetSocketAddress address =
|
||||
HddsServerUtil.getScmAddressForDataNodes(conf);
|
||||
assertEquals(address.getHostName(), scmHost);
|
||||
assertEquals(address.getPort(), OZONE_SCM_DATANODE_PORT_DEFAULT);
|
||||
assertEquals(scmHost, address.getHostName());
|
||||
assertEquals(OZONE_SCM_DATANODE_PORT_DEFAULT, address.getPort());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -103,6 +104,7 @@ public class TestHddsServerUtils {
|
|||
* OZONE_SCM_CLIENT_ADDRESS_KEY should be ignored.
|
||||
*/
|
||||
@Test
|
||||
@SuppressWarnings("StringSplitter")
|
||||
public void testDatanodeAddressFallbackToClientWithPort() {
|
||||
final String scmHost = "host123:100";
|
||||
final Configuration conf = new OzoneConfiguration();
|
||||
|
@ -124,8 +126,8 @@ public class TestHddsServerUtils {
|
|||
conf.set(OZONE_SCM_NAMES, scmHost);
|
||||
final InetSocketAddress address =
|
||||
HddsServerUtil.getScmAddressForDataNodes(conf);
|
||||
assertEquals(address.getHostName(), scmHost);
|
||||
assertEquals(address.getPort(), OZONE_SCM_DATANODE_PORT_DEFAULT);
|
||||
assertEquals(scmHost, address.getHostName());
|
||||
assertEquals(OZONE_SCM_DATANODE_PORT_DEFAULT, address.getPort());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -134,6 +136,7 @@ public class TestHddsServerUtils {
|
|||
* defined by OZONE_SCM_NAMES should be ignored.
|
||||
*/
|
||||
@Test
|
||||
@SuppressWarnings("StringSplitter")
|
||||
public void testDatanodeAddressFallbackToScmNamesWithPort() {
|
||||
final String scmHost = "host123:100";
|
||||
final Configuration conf = new OzoneConfiguration();
|
||||
|
@ -141,7 +144,7 @@ public class TestHddsServerUtils {
|
|||
final InetSocketAddress address =
|
||||
HddsServerUtil.getScmAddressForDataNodes(conf);
|
||||
assertEquals(address.getHostName(), scmHost.split(":")[0]);
|
||||
assertEquals(address.getPort(), OZONE_SCM_DATANODE_PORT_DEFAULT);
|
||||
assertEquals(OZONE_SCM_DATANODE_PORT_DEFAULT, address.getPort());
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -48,7 +48,7 @@ import org.mockito.Mockito;
|
|||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.LinkedList;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
@ -378,12 +378,9 @@ public class TestNodeManager {
|
|||
* Check for NPE when datanodeDetails is passed null for sendHeartbeat.
|
||||
*
|
||||
* @throws IOException
|
||||
* @throws InterruptedException
|
||||
* @throws TimeoutException
|
||||
*/
|
||||
@Test
|
||||
public void testScmCheckForErrorOnNullDatanodeDetails() throws IOException,
|
||||
InterruptedException, TimeoutException {
|
||||
public void testScmCheckForErrorOnNullDatanodeDetails() throws IOException {
|
||||
try (SCMNodeManager nodeManager = createNodeManager(getConf())) {
|
||||
nodeManager.processHeartbeat(null);
|
||||
} catch (NullPointerException npe) {
|
||||
|
@ -588,7 +585,7 @@ public class TestNodeManager {
|
|||
*/
|
||||
private List<DatanodeDetails> createNodeSet(SCMNodeManager nodeManager, int
|
||||
count) {
|
||||
List<DatanodeDetails> list = new LinkedList<>();
|
||||
List<DatanodeDetails> list = new ArrayList<>();
|
||||
for (int x = 0; x < count; x++) {
|
||||
DatanodeDetails datanodeDetails = TestUtils
|
||||
.createRandomDatanodeAndRegister(nodeManager);
|
||||
|
@ -943,7 +940,7 @@ public class TestNodeManager {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testHandlingSCMCommandEvent() {
|
||||
public void testHandlingSCMCommandEvent() throws IOException {
|
||||
OzoneConfiguration conf = getConf();
|
||||
conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
|
||||
100, TimeUnit.MILLISECONDS);
|
||||
|
@ -974,6 +971,7 @@ public class TestNodeManager {
|
|||
.assertEquals(command.get(0).getClass(), CloseContainerCommand.class);
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -150,22 +150,22 @@ public class TestSCMNodeStorageStatMap {
|
|||
path, reportCapacity, reportScmUsed, reportRemaining, null);
|
||||
StorageReportResult result =
|
||||
map.processNodeReport(key, TestUtils.createNodeReport(storageReport));
|
||||
Assert.assertEquals(result.getStatus(),
|
||||
SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL);
|
||||
Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL,
|
||||
result.getStatus());
|
||||
StorageContainerDatanodeProtocolProtos.NodeReportProto.Builder nrb =
|
||||
NodeReportProto.newBuilder();
|
||||
StorageReportProto srb = reportSet.iterator().next().getProtoBufMessage();
|
||||
reportList.add(srb);
|
||||
result = map.processNodeReport(key, TestUtils.createNodeReport(reportList));
|
||||
Assert.assertEquals(result.getStatus(),
|
||||
SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL);
|
||||
Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL,
|
||||
result.getStatus());
|
||||
|
||||
reportList.add(TestUtils
|
||||
.createStorageReport(UUID.randomUUID(), path, reportCapacity,
|
||||
reportCapacity, 0, null));
|
||||
result = map.processNodeReport(key, TestUtils.createNodeReport(reportList));
|
||||
Assert.assertEquals(result.getStatus(),
|
||||
SCMNodeStorageStatMap.ReportStatus.STORAGE_OUT_OF_SPACE);
|
||||
Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus.STORAGE_OUT_OF_SPACE,
|
||||
result.getStatus());
|
||||
// Mark a disk failed
|
||||
StorageReportProto srb2 = StorageReportProto.newBuilder()
|
||||
.setStorageUuid(UUID.randomUUID().toString())
|
||||
|
@ -174,8 +174,8 @@ public class TestSCMNodeStorageStatMap {
|
|||
reportList.add(srb2);
|
||||
nrb.addAllStorageReport(reportList);
|
||||
result = map.processNodeReport(key, nrb.addStorageReport(srb).build());
|
||||
Assert.assertEquals(result.getStatus(),
|
||||
SCMNodeStorageStatMap.ReportStatus.FAILED_AND_OUT_OF_SPACE_STORAGE);
|
||||
Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus
|
||||
.FAILED_AND_OUT_OF_SPACE_STORAGE, result.getStatus());
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@ public class TestNode2ContainerMap {
|
|||
for (int dnIndex = 1; dnIndex <= DATANODE_COUNT; dnIndex++) {
|
||||
TreeSet<ContainerID> currentSet = new TreeSet<>();
|
||||
for (int cnIndex = 1; cnIndex <= CONTAINER_COUNT; cnIndex++) {
|
||||
long currentCnIndex = (dnIndex * CONTAINER_COUNT) + cnIndex;
|
||||
long currentCnIndex = (long) (dnIndex * CONTAINER_COUNT) + cnIndex;
|
||||
currentSet.add(new ContainerID(currentCnIndex));
|
||||
}
|
||||
testData.put(UUID.randomUUID(), currentSet);
|
||||
|
@ -115,8 +115,8 @@ public class TestNode2ContainerMap {
|
|||
map.insertNewDatanode(key, values);
|
||||
Assert.assertTrue(map.isKnownDatanode(key));
|
||||
ReportResult result = map.processReport(key, values);
|
||||
Assert.assertEquals(result.getStatus(),
|
||||
ReportResult.ReportStatus.ALL_IS_WELL);
|
||||
Assert.assertEquals(ReportResult.ReportStatus.ALL_IS_WELL,
|
||||
result.getStatus());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -19,9 +19,8 @@ package org.apache.hadoop.ozone.container.placement;
|
|||
|
||||
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
|
||||
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.rules.ExpectedException;
|
||||
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
@ -30,20 +29,18 @@ import static org.junit.Assert.assertTrue;
|
|||
* Tests that test Metrics that support placement.
|
||||
*/
|
||||
public class TestDatanodeMetrics {
|
||||
@Rule
|
||||
public ExpectedException exception = ExpectedException.none();
|
||||
@Test
|
||||
public void testSCMNodeMetric() {
|
||||
SCMNodeStat stat = new SCMNodeStat(100L, 10L, 90L);
|
||||
assertEquals((long) stat.getCapacity().get(), 100L);
|
||||
assertEquals((long) stat.getScmUsed().get(), 10L);
|
||||
assertEquals((long) stat.getRemaining().get(), 90L);
|
||||
assertEquals(10L, (long) stat.getScmUsed().get());
|
||||
assertEquals(90L, (long) stat.getRemaining().get());
|
||||
SCMNodeMetric metric = new SCMNodeMetric(stat);
|
||||
|
||||
SCMNodeStat newStat = new SCMNodeStat(100L, 10L, 90L);
|
||||
assertEquals((long) stat.getCapacity().get(), 100L);
|
||||
assertEquals((long) stat.getScmUsed().get(), 10L);
|
||||
assertEquals((long) stat.getRemaining().get(), 90L);
|
||||
assertEquals(100L, (long) stat.getCapacity().get());
|
||||
assertEquals(10L, (long) stat.getScmUsed().get());
|
||||
assertEquals(90L, (long) stat.getRemaining().get());
|
||||
|
||||
SCMNodeMetric newMetric = new SCMNodeMetric(newStat);
|
||||
assertTrue(metric.isEqual(newMetric.get()));
|
||||
|
|
|
@ -124,6 +124,7 @@ public class ObjectStore {
|
|||
* @return String - Ozone Volume name.
|
||||
* @throws IOException - Throws if the s3Bucket does not exist.
|
||||
*/
|
||||
@SuppressWarnings("StringSplitter")
|
||||
public String getOzoneVolumeName(String s3BucketName) throws IOException {
|
||||
String mapping = getOzoneBucketMapping(s3BucketName);
|
||||
return mapping.split("/")[0];
|
||||
|
@ -136,6 +137,7 @@ public class ObjectStore {
|
|||
* @return String - Ozone bucket Name.
|
||||
* @throws IOException - Throws if the s3bucket does not exist.
|
||||
*/
|
||||
@SuppressWarnings("StringSplitter")
|
||||
public String getOzoneBucketName(String s3BucketName) throws IOException {
|
||||
String mapping = getOzoneBucketMapping(s3BucketName);
|
||||
return mapping.split("/")[1];
|
||||
|
|
|
@ -610,6 +610,7 @@ public class RpcClient implements ClientProtocol {
|
|||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("StringSplitter")
|
||||
public String getOzoneVolumeName(String s3BucketName) throws IOException {
|
||||
String mapping = getOzoneBucketMapping(s3BucketName);
|
||||
return mapping.split("/")[0];
|
||||
|
@ -617,6 +618,7 @@ public class RpcClient implements ClientProtocol {
|
|||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("StringSplitter")
|
||||
public String getOzoneBucketName(String s3BucketName) throws IOException {
|
||||
String mapping = getOzoneBucketMapping(s3BucketName);
|
||||
return mapping.split("/")[1];
|
||||
|
|
|
@ -40,11 +40,12 @@ import static org.junit.Assert.assertThat;
|
|||
|
||||
/**
|
||||
* This test class verifies the parsing of SCM endpoint config settings. The
|
||||
* parsing logic is in {@link org.apache.hadoop.hdds.scm.client.HddsClientUtils}.
|
||||
* parsing logic is in
|
||||
* {@link org.apache.hadoop.hdds.scm.client.HddsClientUtils}.
|
||||
*/
|
||||
public class TestHddsClientUtils {
|
||||
@Rule
|
||||
public Timeout timeout = new Timeout(300_000);
|
||||
public Timeout timeout = new Timeout(300000);
|
||||
|
||||
@Rule
|
||||
public ExpectedException thrown= ExpectedException.none();
|
||||
|
@ -114,13 +115,14 @@ public class TestHddsClientUtils {
|
|||
final String scmHost = "host123";
|
||||
final Configuration conf = new OzoneConfiguration();
|
||||
conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost);
|
||||
final InetSocketAddress address =HddsUtils.getScmAddressForBlockClients(
|
||||
final InetSocketAddress address = HddsUtils.getScmAddressForBlockClients(
|
||||
conf);
|
||||
assertEquals(address.getHostName(), scmHost);
|
||||
assertEquals(address.getPort(), OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT);
|
||||
assertEquals(scmHost, address.getHostName());
|
||||
assertEquals(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, address.getPort());
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("StringSplitter")
|
||||
public void testBlockClientFallbackToClientWithPort() {
|
||||
// When OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY is undefined it should
|
||||
// fallback to OZONE_SCM_CLIENT_ADDRESS_KEY.
|
||||
|
@ -132,8 +134,8 @@ public class TestHddsClientUtils {
|
|||
conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost);
|
||||
final InetSocketAddress address =HddsUtils.getScmAddressForBlockClients(
|
||||
conf);
|
||||
assertEquals(address.getHostName(), scmHost.split(":")[0]);
|
||||
assertEquals(address.getPort(), OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT);
|
||||
assertEquals(scmHost.split(":")[0], address.getHostName());
|
||||
assertEquals(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, address.getPort());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -143,13 +145,14 @@ public class TestHddsClientUtils {
|
|||
final String scmHost = "host456";
|
||||
final Configuration conf = new OzoneConfiguration();
|
||||
conf.set(OZONE_SCM_NAMES, scmHost);
|
||||
final InetSocketAddress address =HddsUtils.getScmAddressForBlockClients(
|
||||
final InetSocketAddress address = HddsUtils.getScmAddressForBlockClients(
|
||||
conf);
|
||||
assertEquals(address.getHostName(), scmHost);
|
||||
assertEquals(address.getPort(), OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT);
|
||||
assertEquals(scmHost, address.getHostName());
|
||||
assertEquals(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, address.getPort());
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("StringSplitter")
|
||||
public void testBlockClientFallbackToScmNamesWithPort() {
|
||||
// When OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY and OZONE_SCM_CLIENT_ADDRESS_KEY
|
||||
// are undefined it should fallback to OZONE_SCM_NAMES.
|
||||
|
@ -159,10 +162,10 @@ public class TestHddsClientUtils {
|
|||
final String scmHost = "host456:200";
|
||||
final Configuration conf = new OzoneConfiguration();
|
||||
conf.set(OZONE_SCM_NAMES, scmHost);
|
||||
final InetSocketAddress address =HddsUtils.getScmAddressForBlockClients(
|
||||
final InetSocketAddress address = HddsUtils.getScmAddressForBlockClients(
|
||||
conf);
|
||||
assertEquals(address.getHostName(), scmHost.split(":")[0]);
|
||||
assertEquals(address.getPort(), OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT);
|
||||
assertEquals(scmHost.split(":")[0], address.getHostName());
|
||||
assertEquals(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, address.getPort());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -172,12 +175,13 @@ public class TestHddsClientUtils {
|
|||
final String scmHost = "host456";
|
||||
final Configuration conf = new OzoneConfiguration();
|
||||
conf.set(OZONE_SCM_NAMES, scmHost);
|
||||
final InetSocketAddress address =HddsUtils.getScmAddressForClients(conf);
|
||||
assertEquals(address.getHostName(), scmHost);
|
||||
assertEquals(address.getPort(), OZONE_SCM_CLIENT_PORT_DEFAULT);
|
||||
final InetSocketAddress address = HddsUtils.getScmAddressForClients(conf);
|
||||
assertEquals(scmHost, address.getHostName());
|
||||
assertEquals(OZONE_SCM_CLIENT_PORT_DEFAULT, address.getPort());
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("StringSplitter")
|
||||
public void testClientFallbackToScmNamesWithPort() {
|
||||
// When OZONE_SCM_CLIENT_ADDRESS_KEY is undefined, it should fallback
|
||||
// to OZONE_SCM_NAMES.
|
||||
|
@ -187,9 +191,9 @@ public class TestHddsClientUtils {
|
|||
final String scmHost = "host456:300";
|
||||
final Configuration conf = new OzoneConfiguration();
|
||||
conf.set(OZONE_SCM_NAMES, scmHost);
|
||||
final InetSocketAddress address =HddsUtils.getScmAddressForClients(conf);
|
||||
assertEquals(address.getHostName(), scmHost.split(":")[0]);
|
||||
assertEquals(address.getPort(), OZONE_SCM_CLIENT_PORT_DEFAULT);
|
||||
final InetSocketAddress address = HddsUtils.getScmAddressForClients(conf);
|
||||
assertEquals(scmHost.split(":")[0], address.getHostName());
|
||||
assertEquals(OZONE_SCM_CLIENT_PORT_DEFAULT, address.getPort());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -34,6 +34,7 @@ import java.util.HashMap;
|
|||
/**
|
||||
* This helper class keeps a map of all user and their permissions.
|
||||
*/
|
||||
@SuppressWarnings("ProtocolBufferOrdinal")
|
||||
public class OmOzoneAclMap {
|
||||
// per Acl Type user:rights map
|
||||
private ArrayList<Map<String, OzoneAclRights>> aclMaps;
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
package org.apache.hadoop.ozone.web.response;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.LinkedList;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.hdds.protocol.StorageType;
|
||||
|
@ -85,7 +85,7 @@ public class BucketInfo implements Comparable<BucketInfo> {
|
|||
* Default constructor for BucketInfo.
|
||||
*/
|
||||
public BucketInfo() {
|
||||
acls = new LinkedList<OzoneAcl>();
|
||||
acls = new ArrayList<>();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -318,7 +318,7 @@ public class BucketInfo implements Comparable<BucketInfo> {
|
|||
* for the Json serialization.
|
||||
*/
|
||||
@JsonFilter(BUCKET_INFO)
|
||||
class MixIn {
|
||||
static class MixIn {
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -18,17 +18,19 @@
|
|||
|
||||
package org.apache.hadoop.ozone.web;
|
||||
|
||||
|
||||
import org.apache.hadoop.ozone.OzoneAcl;
|
||||
import org.apache.hadoop.hdds.protocol.StorageType;
|
||||
import org.apache.hadoop.ozone.web.response.BucketInfo;
|
||||
import org.apache.hadoop.ozone.OzoneAcl;
|
||||
import org.apache.hadoop.ozone.OzoneConsts;
|
||||
import org.apache.hadoop.ozone.web.response.BucketInfo;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.LinkedList;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
|
||||
/**
|
||||
* Test Ozone Bucket Info operation.
|
||||
*/
|
||||
|
@ -38,7 +40,7 @@ public class TestBucketInfo {
|
|||
BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName");
|
||||
String bucketInfoString = bucketInfo.toJsonString();
|
||||
BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString);
|
||||
assert(bucketInfo.equals(newBucketInfo));
|
||||
assertEquals(bucketInfo, newBucketInfo);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -46,7 +48,7 @@ public class TestBucketInfo {
|
|||
BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName");
|
||||
String bucketInfoString = bucketInfo.toDBString();
|
||||
BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString);
|
||||
assert(bucketInfo.equals(newBucketInfo));
|
||||
assertEquals(bucketInfo, newBucketInfo);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -54,18 +56,17 @@ public class TestBucketInfo {
|
|||
BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName");
|
||||
String bucketInfoString = bucketInfo.toDBString();
|
||||
BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString);
|
||||
assert(bucketInfo.equals(newBucketInfo));
|
||||
List<OzoneAcl> aclList = new LinkedList<>();
|
||||
assertEquals(bucketInfo, newBucketInfo);
|
||||
List<OzoneAcl> aclList = new ArrayList<>();
|
||||
|
||||
aclList.add(OzoneAcl.parseAcl("user:bilbo:r"));
|
||||
aclList.add(OzoneAcl.parseAcl("user:samwise:rw"));
|
||||
newBucketInfo.setAcls(aclList);
|
||||
|
||||
assert(newBucketInfo.getAcls() != null);
|
||||
assert(newBucketInfo.getAcls().size() == 2);
|
||||
assertNotNull(newBucketInfo.getAcls());
|
||||
assertEquals(2, newBucketInfo.getAcls().size());
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testBucketInfoVersionAndType() throws IOException {
|
||||
BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName");
|
||||
|
@ -75,7 +76,7 @@ public class TestBucketInfo {
|
|||
String bucketInfoString = bucketInfo.toDBString();
|
||||
|
||||
BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString);
|
||||
assert(bucketInfo.equals(newBucketInfo));
|
||||
assertEquals(bucketInfo, newBucketInfo);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,19 +1,18 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with this
|
||||
* work for additional information regarding copyright ownership. The ASF
|
||||
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations under
|
||||
* the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.ozone.web;
|
||||
|
@ -82,29 +81,29 @@ public class TestQuota {
|
|||
@Test
|
||||
public void testVerifyQuota() {
|
||||
OzoneQuota qt = OzoneQuota.parseQuota("10TB");
|
||||
assertEquals(qt.getSize(), 10);
|
||||
assertEquals(qt.getUnit(), OzoneQuota.Units.TB);
|
||||
assertEquals(qt.sizeInBytes(), 10L * (1024L * 1024L * 1024L * 1024L));
|
||||
assertEquals(10, qt.getSize());
|
||||
assertEquals(OzoneQuota.Units.TB, qt.getUnit());
|
||||
assertEquals(10L * (1024L * 1024L * 1024L * 1024L), qt.sizeInBytes());
|
||||
|
||||
qt = OzoneQuota.parseQuota("10MB");
|
||||
assertEquals(qt.getSize(), 10);
|
||||
assertEquals(qt.getUnit(), OzoneQuota.Units.MB);
|
||||
assertEquals(qt.sizeInBytes(), 10L * (1024L * 1024L));
|
||||
assertEquals(10, qt.getSize());
|
||||
assertEquals(OzoneQuota.Units.MB, qt.getUnit());
|
||||
assertEquals(10L * (1024L * 1024L), qt.sizeInBytes());
|
||||
|
||||
qt = OzoneQuota.parseQuota("10GB");
|
||||
assertEquals(qt.getSize(), 10);
|
||||
assertEquals(qt.getUnit(), OzoneQuota.Units.GB);
|
||||
assertEquals(qt.sizeInBytes(), 10L * (1024L * 1024L * 1024L));
|
||||
assertEquals(10, qt.getSize());
|
||||
assertEquals(OzoneQuota.Units.GB, qt.getUnit());
|
||||
assertEquals(10L * (1024L * 1024L * 1024L), qt.sizeInBytes());
|
||||
|
||||
qt = OzoneQuota.parseQuota("10BYTES");
|
||||
assertEquals(qt.getSize(), 10);
|
||||
assertEquals(qt.getUnit(), OzoneQuota.Units.BYTES);
|
||||
assertEquals(qt.sizeInBytes(), 10L);
|
||||
assertEquals(10, qt.getSize());
|
||||
assertEquals(OzoneQuota.Units.BYTES, qt.getUnit());
|
||||
assertEquals(10L, qt.sizeInBytes());
|
||||
|
||||
OzoneQuota emptyQuota = new OzoneQuota();
|
||||
assertEquals(emptyQuota.sizeInBytes(), -1L);
|
||||
assertEquals(emptyQuota.getSize(), 0);
|
||||
assertEquals(emptyQuota.getUnit(), OzoneQuota.Units.UNDEFINED);
|
||||
assertEquals(-1L, emptyQuota.sizeInBytes());
|
||||
assertEquals(0, emptyQuota.getSize());
|
||||
assertEquals(OzoneQuota.Units.UNDEFINED, emptyQuota.getUnit());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -23,8 +23,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
|||
import org.apache.hadoop.hdds.scm.container.ContainerID;
|
||||
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
|
||||
import org.apache.hadoop.hdds.scm.container.ContainerManager;
|
||||
import org.apache.hadoop.hdds.scm.container.common.helpers
|
||||
.ContainerWithPipeline;
|
||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
|
||||
import org.apache.hadoop.ozone.MiniOzoneCluster;
|
||||
import org.junit.AfterClass;
|
||||
|
@ -36,11 +35,12 @@ import java.io.IOException;
|
|||
import java.util.Set;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos
|
||||
.ReplicationFactor.THREE;
|
||||
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos
|
||||
.ReplicationType.RATIS;
|
||||
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
|
||||
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS;
|
||||
|
||||
/**
|
||||
* Tests for Pipeline Closing.
|
||||
*/
|
||||
public class TestPipelineClose {
|
||||
|
||||
private static MiniOzoneCluster cluster;
|
||||
|
@ -88,7 +88,6 @@ public class TestPipelineClose {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testPipelineCloseWithClosedContainer() throws IOException {
|
||||
Set<ContainerID> set = pipelineManager
|
||||
|
@ -112,8 +111,8 @@ public class TestPipelineClose {
|
|||
pipelineManager.finalizePipeline(ratisContainer1.getPipeline().getId());
|
||||
Pipeline pipeline1 = pipelineManager
|
||||
.getPipeline(ratisContainer1.getPipeline().getId());
|
||||
Assert.assertEquals(pipeline1.getPipelineState(),
|
||||
Pipeline.PipelineState.CLOSED);
|
||||
Assert.assertEquals(Pipeline.PipelineState.CLOSED,
|
||||
pipeline1.getPipelineState());
|
||||
pipelineManager.removePipeline(pipeline1.getId());
|
||||
for (DatanodeDetails dn : ratisContainer1.getPipeline().getNodes()) {
|
||||
// Assert that the pipeline has been removed from Node2PipelineMap as well
|
||||
|
@ -131,12 +130,12 @@ public class TestPipelineClose {
|
|||
|
||||
ContainerID cId2 = ratisContainer2.getContainerInfo().containerID();
|
||||
pipelineManager.finalizePipeline(ratisContainer2.getPipeline().getId());
|
||||
Assert.assertEquals(
|
||||
pipelineManager.getPipeline(ratisContainer2.getPipeline().getId())
|
||||
.getPipelineState(), Pipeline.PipelineState.CLOSED);
|
||||
Assert.assertEquals(Pipeline.PipelineState.CLOSED,
|
||||
pipelineManager.getPipeline(
|
||||
ratisContainer2.getPipeline().getId()).getPipelineState());
|
||||
Pipeline pipeline2 = pipelineManager
|
||||
.getPipeline(ratisContainer2.getPipeline().getId());
|
||||
Assert.assertEquals(pipeline2.getPipelineState(),
|
||||
Pipeline.PipelineState.CLOSED);
|
||||
Assert.assertEquals(Pipeline.PipelineState.CLOSED,
|
||||
pipeline2.getPipelineState());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
/**
|
||||
* Package info tests.
|
||||
*/
|
||||
package org.apache.hadoop.hdds.scm.pipeline;
|
|
@ -29,7 +29,15 @@ import java.io.IOException;
|
|||
import java.util.List;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
public class OzoneTestUtils {
|
||||
/**
|
||||
* Helper class for Tests.
|
||||
*/
|
||||
public final class OzoneTestUtils {
|
||||
/**
|
||||
* Never Constructed.
|
||||
*/
|
||||
private OzoneTestUtils() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Close containers which contain the blocks listed in
|
||||
|
@ -55,7 +63,7 @@ public class OzoneTestUtils {
|
|||
.getContainer(ContainerID.valueof(
|
||||
blockID.getContainerID())).isOpen());
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
throw new AssertionError("Failed to close the container", e);
|
||||
}
|
||||
}, omKeyLocationInfoGroups);
|
||||
}
|
||||
|
|
|
@ -40,12 +40,11 @@ import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
|
|||
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
|
||||
import org.apache.hadoop.utils.MetadataKeyFilters;
|
||||
import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
|
||||
import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter;
|
||||
import org.apache.hadoop.utils.MetadataStore;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.util.LinkedList;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
@ -83,8 +82,8 @@ public class TestStorageContainerManagerHelper {
|
|||
storageHandler.createVolume(createVolumeArgs);
|
||||
|
||||
BucketArgs bucketArgs = new BucketArgs(bucket, createVolumeArgs);
|
||||
bucketArgs.setAddAcls(new LinkedList<>());
|
||||
bucketArgs.setRemoveAcls(new LinkedList<>());
|
||||
bucketArgs.setAddAcls(new ArrayList<>());
|
||||
bucketArgs.setRemoveAcls(new ArrayList<>());
|
||||
bucketArgs.setStorageType(StorageType.DISK);
|
||||
storageHandler.createBucket(bucketArgs);
|
||||
|
||||
|
@ -144,9 +143,6 @@ public class TestStorageContainerManagerHelper {
|
|||
public List<Long> getAllBlocks(Long containeID) throws IOException {
|
||||
List<Long> allBlocks = Lists.newArrayList();
|
||||
MetadataStore meta = getContainerMetadata(containeID);
|
||||
MetadataKeyFilter filter =
|
||||
(preKey, currentKey, nextKey) -> !DFSUtil.bytes2String(currentKey)
|
||||
.startsWith(OzoneConsts.DELETING_KEY_PREFIX);
|
||||
List<Map.Entry<byte[], byte[]>> kvs =
|
||||
meta.getRangeKVs(null, Integer.MAX_VALUE,
|
||||
MetadataKeyFilters.getNormalKeyFilter());
|
||||
|
|
|
@ -61,6 +61,8 @@ import java.util.concurrent.TimeUnit;
|
|||
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
|
||||
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
|
||||
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
|
||||
/**
|
||||
* Tests Close Container Exception handling by Ozone Client.
|
||||
*/
|
||||
|
@ -121,7 +123,8 @@ public class TestCloseContainerHandlingByClient {
|
|||
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
|
||||
// write data more than 1 chunk
|
||||
byte[] data = ContainerTestHelper
|
||||
.getFixedLengthString(keyString, chunkSize + chunkSize / 2).getBytes();
|
||||
.getFixedLengthString(keyString, chunkSize + chunkSize / 2)
|
||||
.getBytes(UTF_8);
|
||||
key.write(data);
|
||||
|
||||
Assert.assertTrue(key.getOutputStream() instanceof ChunkGroupOutputStream);
|
||||
|
@ -141,9 +144,9 @@ public class TestCloseContainerHandlingByClient {
|
|||
Assert.assertEquals(2 * data.length, keyInfo.getDataSize());
|
||||
|
||||
// Written the same data twice
|
||||
String dataString = new String(data);
|
||||
dataString.concat(dataString);
|
||||
validateData(keyName, dataString.getBytes());
|
||||
String dataString = new String(data, UTF_8);
|
||||
dataString = dataString.concat(dataString);
|
||||
validateData(keyName, dataString.getBytes(UTF_8));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -152,7 +155,8 @@ public class TestCloseContainerHandlingByClient {
|
|||
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
|
||||
// write data more than 1 chunk
|
||||
byte[] data = ContainerTestHelper
|
||||
.getFixedLengthString(keyString, chunkSize + chunkSize / 2).getBytes();
|
||||
.getFixedLengthString(keyString, chunkSize + chunkSize / 2)
|
||||
.getBytes(UTF_8);
|
||||
key.write(data);
|
||||
|
||||
Assert.assertTrue(key.getOutputStream() instanceof ChunkGroupOutputStream);
|
||||
|
@ -184,7 +188,7 @@ public class TestCloseContainerHandlingByClient {
|
|||
// write data more than 1 chunk
|
||||
byte[] data =
|
||||
ContainerTestHelper.getFixedLengthString(keyString, (3 * blockSize))
|
||||
.getBytes();
|
||||
.getBytes(UTF_8);
|
||||
Assert.assertEquals(data.length, 3 * blockSize);
|
||||
key.write(data);
|
||||
|
||||
|
@ -199,7 +203,7 @@ public class TestCloseContainerHandlingByClient {
|
|||
// write 1 more block worth of data. It will fail and new block will be
|
||||
// allocated
|
||||
key.write(ContainerTestHelper.getFixedLengthString(keyString, blockSize)
|
||||
.getBytes());
|
||||
.getBytes(UTF_8));
|
||||
|
||||
key.close();
|
||||
// read the key from OM again and match the length.The length will still
|
||||
|
@ -232,13 +236,13 @@ public class TestCloseContainerHandlingByClient {
|
|||
Assert.assertEquals(4, groupOutputStream.getStreamEntries().size());
|
||||
String dataString =
|
||||
ContainerTestHelper.getFixedLengthString(keyString, (2 * blockSize));
|
||||
byte[] data = dataString.getBytes();
|
||||
byte[] data = dataString.getBytes(UTF_8);
|
||||
key.write(data);
|
||||
// 3 block are completely written to the DataNode in 3 blocks.
|
||||
// Data of length half of chunkSize resides in the chunkOutput stream buffer
|
||||
String dataString2 =
|
||||
ContainerTestHelper.getFixedLengthString(keyString, chunkSize * 1 / 2);
|
||||
key.write(dataString2.getBytes());
|
||||
key.write(dataString2.getBytes(UTF_8));
|
||||
//get the name of a valid container
|
||||
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
|
||||
.setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
|
||||
|
@ -257,9 +261,9 @@ public class TestCloseContainerHandlingByClient {
|
|||
// closeContainerException and remaining data in the chunkOutputStream
|
||||
// buffer will be copied into a different allocated block and will be
|
||||
// committed.
|
||||
Assert.assertEquals(dataString.concat(dataString2).getBytes().length,
|
||||
Assert.assertEquals(dataString.concat(dataString2).getBytes(UTF_8).length,
|
||||
keyInfo.getDataSize());
|
||||
validateData(keyName, dataString.concat(dataString2).getBytes());
|
||||
validateData(keyName, dataString.concat(dataString2).getBytes(UTF_8));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -274,7 +278,8 @@ public class TestCloseContainerHandlingByClient {
|
|||
Assert.assertEquals(4, groupOutputStream.getStreamEntries().size());
|
||||
// write data 3 blocks and one more chunk
|
||||
byte[] writtenData =
|
||||
ContainerTestHelper.getFixedLengthString(keyString, keyLen).getBytes();
|
||||
ContainerTestHelper.getFixedLengthString(keyString, keyLen)
|
||||
.getBytes(UTF_8);
|
||||
byte[] data = Arrays.copyOfRange(writtenData, 0, 3 * blockSize + chunkSize);
|
||||
Assert.assertEquals(data.length, 3 * blockSize + chunkSize);
|
||||
key.write(data);
|
||||
|
@ -367,8 +372,8 @@ public class TestCloseContainerHandlingByClient {
|
|||
.isContainerPresent(cluster, containerID, dn))) {
|
||||
for (DatanodeDetails datanodeDetails : datanodes) {
|
||||
GenericTestUtils.waitFor(() -> ContainerTestHelper
|
||||
.isContainerClosed(cluster, containerID, datanodeDetails), 500,
|
||||
15 * 1000);
|
||||
.isContainerClosed(cluster, containerID, datanodeDetails),
|
||||
500, 15 * 1000);
|
||||
//double check if it's really closed
|
||||
// (waitFor also throws an exception)
|
||||
Assert.assertTrue(ContainerTestHelper
|
||||
|
@ -395,7 +400,7 @@ public class TestCloseContainerHandlingByClient {
|
|||
Assert.assertEquals(2, groupOutputStream.getStreamEntries().size());
|
||||
String dataString =
|
||||
ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize));
|
||||
byte[] data = dataString.getBytes();
|
||||
byte[] data = dataString.getBytes(UTF_8);
|
||||
key.write(data);
|
||||
List<OmKeyLocationInfo> locationInfos =
|
||||
new ArrayList<>(groupOutputStream.getLocationInfoList());
|
||||
|
@ -411,7 +416,7 @@ public class TestCloseContainerHandlingByClient {
|
|||
waitForContainerClose(keyName, key, HddsProtos.ReplicationType.RATIS);
|
||||
dataString =
|
||||
ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize));
|
||||
data = dataString.getBytes();
|
||||
data = dataString.getBytes(UTF_8);
|
||||
key.write(data);
|
||||
Assert.assertEquals(2, groupOutputStream.getStreamEntries().size());
|
||||
|
||||
|
@ -443,7 +448,8 @@ public class TestCloseContainerHandlingByClient {
|
|||
String keyName = "ratis";
|
||||
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
|
||||
byte[] data = ContainerTestHelper
|
||||
.getFixedLengthString(keyString, chunkSize + chunkSize / 2).getBytes();
|
||||
.getFixedLengthString(keyString, chunkSize + chunkSize / 2)
|
||||
.getBytes(UTF_8);
|
||||
key.write(data);
|
||||
|
||||
//get the name of a valid container
|
||||
|
@ -462,9 +468,9 @@ public class TestCloseContainerHandlingByClient {
|
|||
// updated correctly in OzoneManager once the steam is closed
|
||||
key.close();
|
||||
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
|
||||
String dataString = new String(data);
|
||||
dataString.concat(dataString);
|
||||
String dataString = new String(data, UTF_8);
|
||||
dataString = dataString.concat(dataString);
|
||||
Assert.assertEquals(2 * data.length, keyInfo.getDataSize());
|
||||
validateData(keyName, dataString.getBytes());
|
||||
validateData(keyName, dataString.getBytes(UTF_8));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
/**
|
||||
* Test utils for Ozone.
|
||||
*/
|
||||
package org.apache.hadoop.ozone;
|
|
@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
|||
import org.apache.hadoop.ozone.TestOzoneHelper;
|
||||
import org.apache.log4j.Level;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Rule;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.AfterClass;
|
||||
|
@ -158,6 +159,7 @@ public class TestOzoneVolumes extends TestOzoneHelper {
|
|||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
@Ignore("Test is ignored for time being, to be enabled after security.")
|
||||
public void testGetVolumesByUser() throws IOException {
|
||||
testGetVolumesByUser(port);
|
||||
}
|
||||
|
@ -167,6 +169,7 @@ public class TestOzoneVolumes extends TestOzoneHelper {
|
|||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
@Ignore("Test is ignored for time being, to be enabled after security.")
|
||||
public void testGetVolumesOfAnotherUser() throws IOException {
|
||||
super.testGetVolumesOfAnotherUser(port);
|
||||
}
|
||||
|
@ -177,6 +180,7 @@ public class TestOzoneVolumes extends TestOzoneHelper {
|
|||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
@Ignore("Test is ignored for time being, to be enabled after security.")
|
||||
public void testGetVolumesOfAnotherUserShouldFail() throws IOException {
|
||||
super.testGetVolumesOfAnotherUserShouldFail(port);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
/**
|
||||
* REST client tests.
|
||||
*/
|
||||
package org.apache.hadoop.ozone.web.client;
|
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
/**
|
||||
* Rest Client Tests.
|
||||
*/
|
||||
package org.apache.hadoop.ozone.web;
|
|
@ -26,7 +26,7 @@ import java.io.IOException;
|
|||
import java.nio.file.DirectoryNotEmptyException;
|
||||
import java.nio.file.FileAlreadyExistsException;
|
||||
import java.nio.file.NoSuchFileException;
|
||||
import java.util.LinkedList;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.hdds.protocol.StorageType;
|
||||
|
@ -211,7 +211,7 @@ public abstract class BucketProcessTemplate {
|
|||
args.getHeaders().getRequestHeader(Header.OZONE_ACLS);
|
||||
List<String> filteredSet = null;
|
||||
if (aclStrings != null) {
|
||||
filteredSet = new LinkedList<>();
|
||||
filteredSet = new ArrayList<>();
|
||||
for (String s : aclStrings) {
|
||||
if (s.startsWith(tag)) {
|
||||
filteredSet.add(s.replaceFirst(tag, ""));
|
||||
|
|
|
@ -312,5 +312,6 @@ public interface StorageHandler extends Closeable{
|
|||
/**
|
||||
* Closes all the opened resources.
|
||||
*/
|
||||
@Override
|
||||
void close();
|
||||
}
|
||||
|
|
|
@ -37,9 +37,9 @@ public class TestErrorCode {
|
|||
OzoneException e = ErrorTable
|
||||
.newError(ErrorTable.ACCESS_DENIED, getRequestID(), "/test/path",
|
||||
"localhost");
|
||||
assertEquals(e.getHostID(), "localhost");
|
||||
assertEquals(e.getShortMessage(),
|
||||
ErrorTable.ACCESS_DENIED.getShortMessage());
|
||||
assertEquals("localhost", e.getHostID());
|
||||
assertEquals(ErrorTable.ACCESS_DENIED.getShortMessage(),
|
||||
e.getShortMessage());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
/**
|
||||
* Tests the REST error codes.
|
||||
*/
|
||||
package org.apache.hadoop.ozone.web;
|
|
@ -257,6 +257,7 @@ public class BucketManagerImpl implements BucketManager {
|
|||
* @param bucketName - Name of the bucket.
|
||||
* @throws IOException - on Failure.
|
||||
*/
|
||||
@Override
|
||||
public void deleteBucket(String volumeName, String bucketName)
|
||||
throws IOException {
|
||||
Preconditions.checkNotNull(volumeName);
|
||||
|
|
|
@ -22,7 +22,6 @@ import com.google.common.collect.Lists;
|
|||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.hadoop.hdds.client.BlockID;
|
||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||
import org.apache.hadoop.hdds.server.ServerUtils;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.ozone.OmUtils;
|
||||
import org.apache.hadoop.ozone.OzoneConsts;
|
||||
|
@ -117,7 +116,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
|
|||
public OmMetadataManagerImpl(OzoneConfiguration conf) throws IOException {
|
||||
File metaDir = OmUtils.getOmDbDir(conf);
|
||||
this.lock = new OzoneManagerLock(conf);
|
||||
this.openKeyExpireThresholdMS = 1000 * conf.getInt(
|
||||
this.openKeyExpireThresholdMS = 1000L * conf.getInt(
|
||||
OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS,
|
||||
OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT);
|
||||
|
||||
|
|
|
@ -989,6 +989,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
|
|||
* @param bucket - Name of the bucket.
|
||||
* @throws IOException
|
||||
*/
|
||||
@Override
|
||||
public void deleteBucket(String volume, String bucket) throws IOException {
|
||||
Map<String, String> auditMap = buildAuditMap(volume);
|
||||
auditMap.put(OzoneConsts.BUCKET, bucket);
|
||||
|
|
|
@ -66,6 +66,7 @@ public class ServiceListJSONServlet extends HttpServlet {
|
|||
|
||||
private transient OzoneManager om;
|
||||
|
||||
@Override
|
||||
public void init() throws ServletException {
|
||||
this.om = (OzoneManager) getServletContext()
|
||||
.getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE);
|
||||
|
|
|
@ -35,7 +35,7 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.LinkedList;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -73,7 +73,7 @@ public class VolumeManagerImpl implements VolumeManager {
|
|||
// Get the volume list
|
||||
byte[] dbUserKey = metadataManager.getUserKey(owner);
|
||||
byte[] volumeList = metadataManager.getUserTable().get(dbUserKey);
|
||||
List<String> prevVolList = new LinkedList<>();
|
||||
List<String> prevVolList = new ArrayList<>();
|
||||
if (volumeList != null) {
|
||||
VolumeList vlist = VolumeList.parseFrom(volumeList);
|
||||
prevVolList.addAll(vlist.getVolumeNamesList());
|
||||
|
@ -98,7 +98,7 @@ public class VolumeManagerImpl implements VolumeManager {
|
|||
// Get the volume list
|
||||
byte[] dbUserKey = metadataManager.getUserKey(owner);
|
||||
byte[] volumeList = metadataManager.getUserTable().get(dbUserKey);
|
||||
List<String> prevVolList = new LinkedList<>();
|
||||
List<String> prevVolList = new ArrayList<>();
|
||||
if (volumeList != null) {
|
||||
VolumeList vlist = VolumeList.parseFrom(volumeList);
|
||||
prevVolList.addAll(vlist.getVolumeNamesList());
|
||||
|
@ -140,7 +140,7 @@ public class VolumeManagerImpl implements VolumeManager {
|
|||
|
||||
try(WriteBatch batch = new WriteBatch()) {
|
||||
// Write the vol info
|
||||
List<HddsProtos.KeyValue> metadataList = new LinkedList<>();
|
||||
List<HddsProtos.KeyValue> metadataList = new ArrayList<>();
|
||||
for (Map.Entry<String, String> entry :
|
||||
args.getKeyValueMap().entrySet()) {
|
||||
metadataList.add(HddsProtos.KeyValue.newBuilder()
|
||||
|
@ -250,6 +250,7 @@ public class VolumeManagerImpl implements VolumeManager {
|
|||
* @param quota - Quota in bytes.
|
||||
* @throws IOException
|
||||
*/
|
||||
@Override
|
||||
public void setQuota(String volume, long quota) throws IOException {
|
||||
Preconditions.checkNotNull(volume);
|
||||
metadataManager.getLock().acquireVolumeLock(volume);
|
||||
|
@ -293,6 +294,7 @@ public class VolumeManagerImpl implements VolumeManager {
|
|||
* @return VolumeArgs or exception is thrown.
|
||||
* @throws IOException
|
||||
*/
|
||||
@Override
|
||||
public OmVolumeArgs getVolumeInfo(String volume) throws IOException {
|
||||
Preconditions.checkNotNull(volume);
|
||||
metadataManager.getLock().acquireVolumeLock(volume);
|
||||
|
@ -384,6 +386,7 @@ public class VolumeManagerImpl implements VolumeManager {
|
|||
* @return true if the user has access for the volume, false otherwise
|
||||
* @throws IOException
|
||||
*/
|
||||
@Override
|
||||
public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl)
|
||||
throws IOException {
|
||||
Preconditions.checkNotNull(volume);
|
||||
|
|
|
@ -30,6 +30,7 @@ import java.io.OutputStream;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
/**
|
||||
|
@ -62,7 +63,7 @@ public class TestChunkStreams {
|
|||
assertEquals(0, groupOutputStream.getByteOffset());
|
||||
|
||||
String dataString = RandomStringUtils.randomAscii(500);
|
||||
byte[] data = dataString.getBytes();
|
||||
byte[] data = dataString.getBytes(UTF_8);
|
||||
groupOutputStream.write(data, 0, data.length);
|
||||
assertEquals(500, groupOutputStream.getByteOffset());
|
||||
|
||||
|
@ -95,7 +96,8 @@ public class TestChunkStreams {
|
|||
assertEquals(0, groupOutputStream.getByteOffset());
|
||||
|
||||
// first writes of 100 bytes should succeed
|
||||
groupOutputStream.write(RandomStringUtils.randomAscii(100).getBytes());
|
||||
groupOutputStream.write(RandomStringUtils.randomAscii(100)
|
||||
.getBytes(UTF_8));
|
||||
assertEquals(100, groupOutputStream.getByteOffset());
|
||||
|
||||
// second writes of 500 bytes should fail, as there should be only 400
|
||||
|
@ -104,7 +106,8 @@ public class TestChunkStreams {
|
|||
// other add more informative error code rather than exception, need to
|
||||
// change this part.
|
||||
exception.expect(Exception.class);
|
||||
groupOutputStream.write(RandomStringUtils.randomAscii(500).getBytes());
|
||||
groupOutputStream.write(RandomStringUtils.randomAscii(500)
|
||||
.getBytes(UTF_8));
|
||||
assertEquals(100, groupOutputStream.getByteOffset());
|
||||
}
|
||||
}
|
||||
|
@ -115,7 +118,7 @@ public class TestChunkStreams {
|
|||
ArrayList<ChunkInputStream> inputStreams = new ArrayList<>();
|
||||
|
||||
String dataString = RandomStringUtils.randomAscii(500);
|
||||
byte[] buf = dataString.getBytes();
|
||||
byte[] buf = dataString.getBytes(UTF_8);
|
||||
int offset = 0;
|
||||
for (int i = 0; i < 5; i++) {
|
||||
int tempOffset = offset;
|
||||
|
@ -126,12 +129,12 @@ public class TestChunkStreams {
|
|||
new ByteArrayInputStream(buf, tempOffset, 100);
|
||||
|
||||
@Override
|
||||
public void seek(long pos) throws IOException {
|
||||
public synchronized void seek(long pos) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getPos() throws IOException {
|
||||
public synchronized long getPos() throws IOException {
|
||||
return pos;
|
||||
}
|
||||
|
||||
|
@ -142,12 +145,13 @@ public class TestChunkStreams {
|
|||
}
|
||||
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
public synchronized int read() throws IOException {
|
||||
return in.read();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(byte[] b, int off, int len) throws IOException {
|
||||
public synchronized int read(byte[] b, int off, int len)
|
||||
throws IOException {
|
||||
int readLen = in.read(b, off, len);
|
||||
pos += readLen;
|
||||
return readLen;
|
||||
|
@ -162,7 +166,7 @@ public class TestChunkStreams {
|
|||
int len = groupInputStream.read(resBuf, 0, 500);
|
||||
|
||||
assertEquals(500, len);
|
||||
assertEquals(dataString, new String(resBuf));
|
||||
assertEquals(dataString, new String(resBuf, UTF_8));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -172,7 +176,7 @@ public class TestChunkStreams {
|
|||
ArrayList<ChunkInputStream> inputStreams = new ArrayList<>();
|
||||
|
||||
String dataString = RandomStringUtils.randomAscii(500);
|
||||
byte[] buf = dataString.getBytes();
|
||||
byte[] buf = dataString.getBytes(UTF_8);
|
||||
int offset = 0;
|
||||
for (int i = 0; i < 5; i++) {
|
||||
int tempOffset = offset;
|
||||
|
@ -183,28 +187,29 @@ public class TestChunkStreams {
|
|||
new ByteArrayInputStream(buf, tempOffset, 100);
|
||||
|
||||
@Override
|
||||
public void seek(long pos) throws IOException {
|
||||
public synchronized void seek(long pos) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getPos() throws IOException {
|
||||
public synchronized long getPos() throws IOException {
|
||||
return pos;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean seekToNewSource(long targetPos)
|
||||
public synchronized boolean seekToNewSource(long targetPos)
|
||||
throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
public synchronized int read() throws IOException {
|
||||
return in.read();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(byte[] b, int off, int len) throws IOException {
|
||||
public synchronized int read(byte[] b, int off, int len)
|
||||
throws IOException {
|
||||
int readLen = in.read(b, off, len);
|
||||
pos += readLen;
|
||||
return readLen;
|
||||
|
@ -222,14 +227,14 @@ public class TestChunkStreams {
|
|||
assertEquals(60, groupInputStream.getRemainingOfIndex(3));
|
||||
assertEquals(340, len);
|
||||
assertEquals(dataString.substring(0, 340),
|
||||
new String(resBuf).substring(0, 340));
|
||||
new String(resBuf, UTF_8).substring(0, 340));
|
||||
|
||||
// read following 300 bytes, but only 200 left
|
||||
len = groupInputStream.read(resBuf, 340, 260);
|
||||
assertEquals(4, groupInputStream.getCurrentStreamIndex());
|
||||
assertEquals(0, groupInputStream.getRemainingOfIndex(4));
|
||||
assertEquals(160, len);
|
||||
assertEquals(dataString, new String(resBuf).substring(0, 500));
|
||||
assertEquals(dataString, new String(resBuf, UTF_8).substring(0, 500));
|
||||
|
||||
// further read should get EOF
|
||||
len = groupInputStream.read(resBuf, 0, 1);
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
|
||||
package org.apache.hadoop.ozone.om;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
@ -49,6 +48,8 @@ import org.rocksdb.RocksDB;
|
|||
import org.rocksdb.Statistics;
|
||||
import org.rocksdb.StatsLevel;
|
||||
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
|
||||
/**
|
||||
* Test class for @{@link KeyManagerImpl}.
|
||||
* */
|
||||
|
@ -93,11 +94,11 @@ public class TestKeyManagerImpl {
|
|||
Mockito.when(metadataManager.getLock())
|
||||
.thenReturn(new OzoneManagerLock(conf));
|
||||
Mockito.when(metadataManager.getVolumeKey(VOLUME_NAME))
|
||||
.thenReturn(VOLUME_NAME.getBytes());
|
||||
.thenReturn(VOLUME_NAME.getBytes(UTF_8));
|
||||
Mockito.when(metadataManager.getBucketKey(VOLUME_NAME, BUCKET_NAME))
|
||||
.thenReturn(BUCKET_NAME.getBytes());
|
||||
.thenReturn(BUCKET_NAME.getBytes(UTF_8));
|
||||
Mockito.when(metadataManager.getOpenKeyBytes(VOLUME_NAME, BUCKET_NAME,
|
||||
KEY_NAME, 1)).thenReturn(KEY_NAME.getBytes());
|
||||
KEY_NAME, 1)).thenReturn(KEY_NAME.getBytes(UTF_8));
|
||||
}
|
||||
|
||||
private void setupRocksDb() throws Exception {
|
||||
|
@ -129,11 +130,11 @@ public class TestKeyManagerImpl {
|
|||
|
||||
rdbStore = new RDBStore(folder.newFolder(), options, configSet);
|
||||
rdbTable = rdbStore.getTable("testTable");
|
||||
rdbTable.put(VOLUME_NAME.getBytes(),
|
||||
RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8));
|
||||
rdbTable.put(BUCKET_NAME.getBytes(),
|
||||
RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8));
|
||||
rdbTable.put(KEY_NAME.getBytes(), keyData.toByteArray());
|
||||
rdbTable.put(VOLUME_NAME.getBytes(UTF_8),
|
||||
RandomStringUtils.random(10).getBytes(UTF_8));
|
||||
rdbTable.put(BUCKET_NAME.getBytes(UTF_8),
|
||||
RandomStringUtils.random(10).getBytes(UTF_8));
|
||||
rdbTable.put(KEY_NAME.getBytes(UTF_8), keyData.toByteArray());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -252,6 +252,7 @@ public class OzoneFileSystem extends FileSystem {
|
|||
LOG.trace("rename from:{} to:{}", srcKey, dstKey);
|
||||
}
|
||||
|
||||
@Override
|
||||
boolean processKey(String key) throws IOException {
|
||||
String newKeyName = dstKey.concat(key.substring(srcKey.length()));
|
||||
bucket.renameKey(key, newKeyName);
|
||||
|
@ -370,6 +371,7 @@ public class OzoneFileSystem extends FileSystem {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
boolean processKey(String key) throws IOException {
|
||||
if (key.equals("")) {
|
||||
LOG.trace("Skipping deleting root directory");
|
||||
|
@ -496,6 +498,7 @@ public class OzoneFileSystem extends FileSystem {
|
|||
* @return always returns true
|
||||
* @throws IOException
|
||||
*/
|
||||
@Override
|
||||
boolean processKey(String key) throws IOException {
|
||||
Path keyPath = new Path(OZONE_URI_DELIMITER + key);
|
||||
if (key.equals(getPathKey())) {
|
||||
|
|
|
@ -51,6 +51,7 @@ import org.apache.hadoop.ozone.web.utils.OzoneUtils;
|
|||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.util.Time;
|
||||
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
import static org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
@ -176,7 +177,7 @@ public class TestOzoneFileInterfaces {
|
|||
byte[] buffer = new byte[stringLen];
|
||||
// This read will not change the offset inside the file
|
||||
int readBytes = inputStream.read(0, buffer, 0, buffer.length);
|
||||
String out = new String(buffer, 0, buffer.length);
|
||||
String out = new String(buffer, 0, buffer.length, UTF_8);
|
||||
assertEquals(data, out);
|
||||
assertEquals(readBytes, buffer.length);
|
||||
assertEquals(0, inputStream.getPos());
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
/**
|
||||
* Ozone FS Contract tests.
|
||||
*/
|
||||
package org.apache.hadoop.fs.ozone;
|
|
@ -49,6 +49,7 @@ public class AuthorizationHeaderV2 {
|
|||
*
|
||||
* @throws OS3Exception
|
||||
*/
|
||||
@SuppressWarnings("StringSplitter")
|
||||
public void parseHeader() throws OS3Exception {
|
||||
String[] split = authHeader.split(" ");
|
||||
if (split.length != 2) {
|
||||
|
|
|
@ -60,6 +60,7 @@ public class AuthorizationHeaderV4 {
|
|||
* Signature=db81b057718d7c1b3b8dffa29933099551c51d787b3b13b9e0f9ebed45982bf2
|
||||
* @throws OS3Exception
|
||||
*/
|
||||
@SuppressWarnings("StringSplitter")
|
||||
public void parseAuthHeader() throws OS3Exception {
|
||||
String[] split = authHeader.split(" ");
|
||||
|
||||
|
|
|
@ -54,6 +54,7 @@ public class Credential {
|
|||
*
|
||||
* @throws OS3Exception
|
||||
*/
|
||||
@SuppressWarnings("StringSplitter")
|
||||
public void parseCredential() throws OS3Exception {
|
||||
String[] split = credential.split("/");
|
||||
if (split.length == 5) {
|
||||
|
|
|
@ -209,6 +209,7 @@ public class ObjectStoreStub extends ObjectStore {
|
|||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("StringSplitter")
|
||||
public String getOzoneVolumeName(String s3BucketName) throws IOException {
|
||||
if (bucketVolumeMap.get(s3BucketName) == null) {
|
||||
throw new IOException("S3_BUCKET_NOT_FOUND");
|
||||
|
@ -217,6 +218,7 @@ public class ObjectStoreStub extends ObjectStore {
|
|||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("StringSplitter")
|
||||
public String getOzoneBucketName(String s3BucketName) throws IOException {
|
||||
if (bucketVolumeMap.get(s3BucketName) == null) {
|
||||
throw new IOException("S3_BUCKET_NOT_FOUND");
|
||||
|
|
|
@ -63,7 +63,7 @@ public class TestBucketDelete {
|
|||
@Test
|
||||
public void testBucketEndpoint() throws Exception {
|
||||
Response response = bucketEndpoint.delete(bucketName);
|
||||
assertEquals(response.getStatus(), HttpStatus.SC_NO_CONTENT);
|
||||
assertEquals(HttpStatus.SC_NO_CONTENT, response.getStatus());
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -23,6 +23,8 @@ import java.io.ByteArrayInputStream;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.junit.Assert;
|
||||
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
import static org.junit.Assert.*;
|
||||
import org.junit.Test;
|
||||
|
||||
|
@ -40,7 +42,7 @@ public class TestMultiDeleteRequestUnmarshaller {
|
|||
+ ".com/doc/2006-03-01/\"><Object>key1</Object><Object>key2"
|
||||
+ "</Object><Object>key3"
|
||||
+ "</Object></Delete>")
|
||||
.getBytes());
|
||||
.getBytes(UTF_8));
|
||||
|
||||
//WHEN
|
||||
MultiDeleteRequest multiDeleteRequest =
|
||||
|
@ -58,7 +60,7 @@ public class TestMultiDeleteRequestUnmarshaller {
|
|||
("<Delete><Object>key1</Object><Object>key2"
|
||||
+ "</Object><Object>key3"
|
||||
+ "</Object></Delete>")
|
||||
.getBytes());
|
||||
.getBytes(UTF_8));
|
||||
|
||||
//WHEN
|
||||
MultiDeleteRequest multiDeleteRequest =
|
||||
|
|
|
@ -37,6 +37,8 @@ import org.junit.Assert;
|
|||
import org.junit.Test;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
|
||||
/**
|
||||
* Test get object.
|
||||
*/
|
||||
|
@ -54,15 +56,16 @@ public class TestObjectGet {
|
|||
OzoneBucket bucket =
|
||||
volume.getBucket("b1");
|
||||
OzoneOutputStream keyStream =
|
||||
bucket.createKey("key1", CONTENT.getBytes().length);
|
||||
keyStream.write(CONTENT.getBytes());
|
||||
bucket.createKey("key1", CONTENT.getBytes(UTF_8).length);
|
||||
keyStream.write(CONTENT.getBytes(UTF_8));
|
||||
keyStream.close();
|
||||
|
||||
ObjectEndpoint rest = new ObjectEndpoint();
|
||||
rest.setClient(client);
|
||||
HttpHeaders headers = Mockito.mock(HttpHeaders.class);
|
||||
rest.setHeaders(headers);
|
||||
ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes());
|
||||
ByteArrayInputStream body =
|
||||
new ByteArrayInputStream(CONTENT.getBytes(UTF_8));
|
||||
|
||||
//WHEN
|
||||
rest.get("b1", "key1", body);
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
|
|||
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
|
||||
|
||||
import static java.net.HttpURLConnection.HTTP_NOT_FOUND;
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
import org.apache.commons.lang3.RandomStringUtils;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
|
@ -69,9 +70,9 @@ public class TestObjectHead {
|
|||
//GIVEN
|
||||
String value = RandomStringUtils.randomAlphanumeric(32);
|
||||
OzoneOutputStream out = bucket.createKey("key1",
|
||||
value.getBytes().length, ReplicationType.STAND_ALONE,
|
||||
value.getBytes(UTF_8).length, ReplicationType.STAND_ALONE,
|
||||
ReplicationFactor.ONE);
|
||||
out.write(value.getBytes());
|
||||
out.write(value.getBytes(UTF_8));
|
||||
out.close();
|
||||
|
||||
//WHEN
|
||||
|
@ -79,7 +80,7 @@ public class TestObjectHead {
|
|||
|
||||
//THEN
|
||||
Assert.assertEquals(200, response.getStatus());
|
||||
Assert.assertEquals(value.getBytes().length,
|
||||
Assert.assertEquals(value.getBytes(UTF_8).length,
|
||||
Long.parseLong(response.getHeaderString("Content-Length")));
|
||||
|
||||
DateTimeFormatter.RFC_1123_DATE_TIME
|
||||
|
@ -91,7 +92,8 @@ public class TestObjectHead {
|
|||
public void testHeadFailByBadName() throws Exception {
|
||||
//Head an object that doesn't exist.
|
||||
try {
|
||||
keyEndpoint.head(bucketName, "badKeyName");
|
||||
Response response = keyEndpoint.head(bucketName, "badKeyName");
|
||||
Assert.assertEquals(404, response.getStatus());
|
||||
} catch (OS3Exception ex) {
|
||||
Assert.assertTrue(ex.getCode().contains("NoSuchObject"));
|
||||
Assert.assertTrue(ex.getErrorMessage().contains("object does not exist"));
|
||||
|
|
|
@ -286,7 +286,7 @@ public final class RandomKeyGenerator implements Callable<Void> {
|
|||
long maxValue;
|
||||
|
||||
currentValue = () -> numberOfKeysAdded.get();
|
||||
maxValue = numOfVolumes *
|
||||
maxValue = (long) numOfVolumes *
|
||||
numOfBuckets *
|
||||
numOfKeys;
|
||||
|
||||
|
|
|
@ -88,8 +88,8 @@ public class TestFreonWithDatanodeFastRestart {
|
|||
String expectedSnapFile =
|
||||
storage.getSnapshotFile(termIndexBeforeRestart.getTerm(),
|
||||
termIndexBeforeRestart.getIndex()).getAbsolutePath();
|
||||
Assert.assertEquals(snapshotInfo.getFile().getPath().toString(),
|
||||
expectedSnapFile);
|
||||
Assert.assertEquals(expectedSnapFile,
|
||||
snapshotInfo.getFile().getPath().toString());
|
||||
Assert.assertEquals(termInSnapshot, termIndexBeforeRestart);
|
||||
|
||||
// After restart the term index might have progressed to apply pending
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
/**
|
||||
* Freon Ozone Load Generator.
|
||||
*/
|
||||
package org.apache.hadoop.ozone.freon;
|
|
@ -45,10 +45,10 @@ import java.sql.DriverManager;
|
|||
import java.sql.ResultSet;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Statement;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
|
@ -182,7 +182,7 @@ public class TestOmSQLCli {
|
|||
String sql = "SELECT * FROM volumeList";
|
||||
ResultSet rs = executeQuery(conn, sql);
|
||||
List<String> expectedValues =
|
||||
new LinkedList<>(Arrays.asList(volumeName0, volumeName1));
|
||||
new ArrayList<>(Arrays.asList(volumeName0, volumeName1));
|
||||
while (rs.next()) {
|
||||
String userNameRs = rs.getString("userName");
|
||||
String volumeNameRs = rs.getString("volumeName");
|
||||
|
@ -194,7 +194,7 @@ public class TestOmSQLCli {
|
|||
sql = "SELECT * FROM volumeInfo";
|
||||
rs = executeQuery(conn, sql);
|
||||
expectedValues =
|
||||
new LinkedList<>(Arrays.asList(volumeName0, volumeName1));
|
||||
new ArrayList<>(Arrays.asList(volumeName0, volumeName1));
|
||||
while (rs.next()) {
|
||||
String adName = rs.getString("adminName");
|
||||
String ownerName = rs.getString("ownerName");
|
||||
|
@ -208,7 +208,7 @@ public class TestOmSQLCli {
|
|||
sql = "SELECT * FROM aclInfo";
|
||||
rs = executeQuery(conn, sql);
|
||||
expectedValues =
|
||||
new LinkedList<>(Arrays.asList(volumeName0, volumeName1));
|
||||
new ArrayList<>(Arrays.asList(volumeName0, volumeName1));
|
||||
while (rs.next()) {
|
||||
String adName = rs.getString("adminName");
|
||||
String ownerName = rs.getString("ownerName");
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
/**
|
||||
* OM to SQL Converter. Currently broken.
|
||||
*/
|
||||
package org.apache.hadoop.ozone.om;
|
Loading…
Reference in New Issue