HDFS-16075. Use empty array constants present in StorageType and DatanodeInfo to avoid creating redundant objects (#3115)
Reviewed-by: Hui Fei <ferhui@apache.org>
This commit is contained in:
parent
bdc9c8809e
commit
c488abbc79
|
@ -74,7 +74,7 @@ public class BlockLocation implements Serializable {
|
||||||
|
|
||||||
private static final String[] EMPTY_STR_ARRAY = new String[0];
|
private static final String[] EMPTY_STR_ARRAY = new String[0];
|
||||||
private static final StorageType[] EMPTY_STORAGE_TYPE_ARRAY =
|
private static final StorageType[] EMPTY_STORAGE_TYPE_ARRAY =
|
||||||
new StorageType[0];
|
StorageType.EMPTY_ARRAY;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Default Constructor.
|
* Default Constructor.
|
||||||
|
|
|
@ -27,7 +27,7 @@ public class TestBlockLocation {
|
||||||
|
|
||||||
private static final String[] EMPTY_STR_ARRAY = new String[0];
|
private static final String[] EMPTY_STR_ARRAY = new String[0];
|
||||||
private static final StorageType[] EMPTY_STORAGE_TYPE_ARRAY =
|
private static final StorageType[] EMPTY_STORAGE_TYPE_ARRAY =
|
||||||
new StorageType[0];
|
StorageType.EMPTY_ARRAY;
|
||||||
|
|
||||||
private static void checkBlockLocation(final BlockLocation loc)
|
private static void checkBlockLocation(final BlockLocation loc)
|
||||||
throws Exception {
|
throws Exception {
|
||||||
|
|
|
@ -1683,7 +1683,7 @@ class DataStreamer extends Daemon {
|
||||||
|
|
||||||
DatanodeInfo[] getExcludedNodes() {
|
DatanodeInfo[] getExcludedNodes() {
|
||||||
return excludedNodes.getAllPresent(excludedNodes.asMap().keySet())
|
return excludedNodes.getAllPresent(excludedNodes.asMap().keySet())
|
||||||
.keySet().toArray(new DatanodeInfo[0]);
|
.keySet().toArray(DatanodeInfo.EMPTY_ARRAY);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -131,7 +131,7 @@ public class LocatedBlocks {
|
||||||
public int findBlock(long offset) {
|
public int findBlock(long offset) {
|
||||||
// create fake block of size 0 as a key
|
// create fake block of size 0 as a key
|
||||||
LocatedBlock key = new LocatedBlock(
|
LocatedBlock key = new LocatedBlock(
|
||||||
new ExtendedBlock(), new DatanodeInfo[0]);
|
new ExtendedBlock(), DatanodeInfo.EMPTY_ARRAY);
|
||||||
key.setStartOffset(offset);
|
key.setStartOffset(offset);
|
||||||
key.getBlock().setNumBytes(1);
|
key.getBlock().setNumBytes(1);
|
||||||
Comparator<LocatedBlock> comp =
|
Comparator<LocatedBlock> comp =
|
||||||
|
|
|
@ -535,7 +535,7 @@ public class MockNamenode {
|
||||||
*/
|
*/
|
||||||
private static LocatedBlock getMockLocatedBlock(final String nsId) {
|
private static LocatedBlock getMockLocatedBlock(final String nsId) {
|
||||||
LocatedBlock lb = mock(LocatedBlock.class);
|
LocatedBlock lb = mock(LocatedBlock.class);
|
||||||
when(lb.getCachedLocations()).thenReturn(new DatanodeInfo[0]);
|
when(lb.getCachedLocations()).thenReturn(DatanodeInfo.EMPTY_ARRAY);
|
||||||
DatanodeID nodeId = new DatanodeID("localhost", "localhost", "dn0",
|
DatanodeID nodeId = new DatanodeID("localhost", "localhost", "dn0",
|
||||||
1111, 1112, 1113, 1114);
|
1111, 1112, 1113, 1114);
|
||||||
DatanodeInfo dnInfo = new DatanodeDescriptor(nodeId);
|
DatanodeInfo dnInfo = new DatanodeDescriptor(nodeId);
|
||||||
|
|
|
@ -1241,7 +1241,7 @@ public class TestRouterRpc {
|
||||||
newRouterFile, clientName, null, null,
|
newRouterFile, clientName, null, null,
|
||||||
status.getFileId(), null, null);
|
status.getFileId(), null, null);
|
||||||
|
|
||||||
DatanodeInfo[] exclusions = new DatanodeInfo[0];
|
DatanodeInfo[] exclusions = DatanodeInfo.EMPTY_ARRAY;
|
||||||
LocatedBlock newBlock = routerProtocol.getAdditionalDatanode(
|
LocatedBlock newBlock = routerProtocol.getAdditionalDatanode(
|
||||||
newRouterFile, status.getFileId(), block.getBlock(),
|
newRouterFile, status.getFileId(), block.getBlock(),
|
||||||
block.getLocations(), block.getStorageIDs(), exclusions, 1, clientName);
|
block.getLocations(), block.getStorageIDs(), exclusions, 1, clientName);
|
||||||
|
|
|
@ -314,7 +314,7 @@ public class TestRouterRpcMultiDestination extends TestRouterRpc {
|
||||||
assertEquals(1, proxyNumAddBlock2 - proxyNumAddBlock);
|
assertEquals(1, proxyNumAddBlock2 - proxyNumAddBlock);
|
||||||
|
|
||||||
// Get additionalDatanode via router and block is not null.
|
// Get additionalDatanode via router and block is not null.
|
||||||
DatanodeInfo[] exclusions = new DatanodeInfo[0];
|
DatanodeInfo[] exclusions = DatanodeInfo.EMPTY_ARRAY;
|
||||||
LocatedBlock newBlock = clientProtocol.getAdditionalDatanode(
|
LocatedBlock newBlock = clientProtocol.getAdditionalDatanode(
|
||||||
testPath, status.getFileId(), blockTwo.getBlock(),
|
testPath, status.getFileId(), blockTwo.getBlock(),
|
||||||
blockTwo.getLocations(), blockTwo.getStorageIDs(), exclusions,
|
blockTwo.getLocations(), blockTwo.getStorageIDs(), exclusions,
|
||||||
|
|
|
@ -209,7 +209,7 @@ final class FSDirAppendOp {
|
||||||
BlockInfo lastBlock = file.getLastBlock();
|
BlockInfo lastBlock = file.getLastBlock();
|
||||||
if (lastBlock != null) {
|
if (lastBlock != null) {
|
||||||
ExtendedBlock blk = new ExtendedBlock(fsn.getBlockPoolId(), lastBlock);
|
ExtendedBlock blk = new ExtendedBlock(fsn.getBlockPoolId(), lastBlock);
|
||||||
ret = new LocatedBlock(blk, new DatanodeInfo[0]);
|
ret = new LocatedBlock(blk, DatanodeInfo.EMPTY_ARRAY);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -883,7 +883,7 @@ public class TestDFSClientRetries {
|
||||||
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
|
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
|
||||||
|
|
||||||
ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
|
ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
|
||||||
LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
|
LocatedBlock fakeBlock = new LocatedBlock(b, DatanodeInfo.EMPTY_ARRAY);
|
||||||
|
|
||||||
ClientDatanodeProtocol proxy = null;
|
ClientDatanodeProtocol proxy = null;
|
||||||
|
|
||||||
|
|
|
@ -242,7 +242,7 @@ public class TestDFSInputStream {
|
||||||
DFSInputStream dfsInputStream =
|
DFSInputStream dfsInputStream =
|
||||||
(DFSInputStream) fs.open(filePath).getWrappedStream();
|
(DFSInputStream) fs.open(filePath).getWrappedStream();
|
||||||
LocatedBlock lb = mock(LocatedBlock.class);
|
LocatedBlock lb = mock(LocatedBlock.class);
|
||||||
when(lb.getCachedLocations()).thenReturn(new DatanodeInfo[0]);
|
when(lb.getCachedLocations()).thenReturn(DatanodeInfo.EMPTY_ARRAY);
|
||||||
DatanodeID nodeId = new DatanodeID("localhost", "localhost", "dn0", 1111,
|
DatanodeID nodeId = new DatanodeID("localhost", "localhost", "dn0", 1111,
|
||||||
1112, 1113, 1114);
|
1112, 1113, 1114);
|
||||||
DatanodeInfo dnInfo = new DatanodeDescriptor(nodeId);
|
DatanodeInfo dnInfo = new DatanodeDescriptor(nodeId);
|
||||||
|
@ -271,7 +271,7 @@ public class TestDFSInputStream {
|
||||||
DFSInputStream dfsInputStream =
|
DFSInputStream dfsInputStream =
|
||||||
(DFSInputStream) fs.open(filePath).getWrappedStream();
|
(DFSInputStream) fs.open(filePath).getWrappedStream();
|
||||||
LocatedBlock lb = mock(LocatedBlock.class);
|
LocatedBlock lb = mock(LocatedBlock.class);
|
||||||
when(lb.getCachedLocations()).thenReturn(new DatanodeInfo[0]);
|
when(lb.getCachedLocations()).thenReturn(DatanodeInfo.EMPTY_ARRAY);
|
||||||
DatanodeID nodeId = new DatanodeID("localhost", "localhost", "dn0", 1111,
|
DatanodeID nodeId = new DatanodeID("localhost", "localhost", "dn0", 1111,
|
||||||
1112, 1113, 1114);
|
1112, 1113, 1114);
|
||||||
DatanodeInfo dnInfo = new DatanodeDescriptor(nodeId);
|
DatanodeInfo dnInfo = new DatanodeDescriptor(nodeId);
|
||||||
|
|
|
@ -461,8 +461,8 @@ public class TestLeaseRecovery {
|
||||||
|
|
||||||
// Add a block to the file
|
// Add a block to the file
|
||||||
ExtendedBlock block = client.getNamenode().addBlock(
|
ExtendedBlock block = client.getNamenode().addBlock(
|
||||||
file, client.clientName, null, new DatanodeInfo[0], stat.getFileId(),
|
file, client.clientName, null, DatanodeInfo.EMPTY_ARRAY,
|
||||||
new String[0], null).getBlock();
|
stat.getFileId(), new String[0], null).getBlock();
|
||||||
|
|
||||||
// update the pipeline to get a new genstamp.
|
// update the pipeline to get a new genstamp.
|
||||||
ExtendedBlock updatedBlock = client.getNamenode()
|
ExtendedBlock updatedBlock = client.getNamenode()
|
||||||
|
@ -578,7 +578,7 @@ public class TestLeaseRecovery {
|
||||||
// Add a block to the file
|
// Add a block to the file
|
||||||
LocatedBlock blk = client.getNamenode()
|
LocatedBlock blk = client.getNamenode()
|
||||||
.addBlock(file, client.clientName, null,
|
.addBlock(file, client.clientName, null,
|
||||||
new DatanodeInfo[0], stat.getFileId(), new String[0], null);
|
DatanodeInfo.EMPTY_ARRAY, stat.getFileId(), new String[0], null);
|
||||||
ExtendedBlock finalBlock = blk.getBlock();
|
ExtendedBlock finalBlock = blk.getBlock();
|
||||||
if (bytesToWrite != null) {
|
if (bytesToWrite != null) {
|
||||||
// Here we create a output stream and then abort it so the block gets
|
// Here we create a output stream and then abort it so the block gets
|
||||||
|
|
|
@ -65,7 +65,7 @@ public class TestReplaceDatanodeOnFailure {
|
||||||
|
|
||||||
final DatanodeInfo[] infos = new DatanodeInfo[5];
|
final DatanodeInfo[] infos = new DatanodeInfo[5];
|
||||||
final DatanodeInfo[][] datanodes = new DatanodeInfo[infos.length + 1][];
|
final DatanodeInfo[][] datanodes = new DatanodeInfo[infos.length + 1][];
|
||||||
datanodes[0] = new DatanodeInfo[0];
|
datanodes[0] = DatanodeInfo.EMPTY_ARRAY;
|
||||||
for(int i = 0; i < infos.length; ) {
|
for(int i = 0; i < infos.length; ) {
|
||||||
infos[i] = DFSTestUtil.getLocalDatanodeInfo(9867 + i);
|
infos[i] = DFSTestUtil.getLocalDatanodeInfo(9867 + i);
|
||||||
i++;
|
i++;
|
||||||
|
|
|
@ -31,7 +31,7 @@ public class TestLocatedBlock {
|
||||||
|
|
||||||
@Test(timeout = 10000)
|
@Test(timeout = 10000)
|
||||||
public void testAddCachedLocWhenEmpty() {
|
public void testAddCachedLocWhenEmpty() {
|
||||||
DatanodeInfo[] ds = new DatanodeInfo[0];
|
DatanodeInfo[] ds = DatanodeInfo.EMPTY_ARRAY;
|
||||||
ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 1, 1);
|
ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 1, 1);
|
||||||
LocatedBlock l1 = new LocatedBlock(b1, ds);
|
LocatedBlock l1 = new LocatedBlock(b1, ds);
|
||||||
DatanodeDescriptor dn = new DatanodeDescriptor(
|
DatanodeDescriptor dn = new DatanodeDescriptor(
|
||||||
|
|
|
@ -391,7 +391,7 @@ public class TestBlockToken {
|
||||||
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
|
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
|
||||||
|
|
||||||
ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
|
ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
|
||||||
LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
|
LocatedBlock fakeBlock = new LocatedBlock(b, DatanodeInfo.EMPTY_ARRAY);
|
||||||
fakeBlock.setBlockToken(token);
|
fakeBlock.setBlockToken(token);
|
||||||
|
|
||||||
// Create another RPC proxy with the same configuration - this will never
|
// Create another RPC proxy with the same configuration - this will never
|
||||||
|
|
|
@ -392,7 +392,8 @@ public class TestDatanodeManager {
|
||||||
storageTypesList.add(StorageType.PROVIDED);
|
storageTypesList.add(StorageType.PROVIDED);
|
||||||
}
|
}
|
||||||
|
|
||||||
StorageType[] storageTypes= storageTypesList.toArray(new StorageType[0]);
|
StorageType[] storageTypes = storageTypesList.toArray(
|
||||||
|
StorageType.EMPTY_ARRAY);
|
||||||
|
|
||||||
for (int i = 0; i < totalDNs; i++) {
|
for (int i = 0; i < totalDNs; i++) {
|
||||||
// register new datanode
|
// register new datanode
|
||||||
|
@ -694,7 +695,8 @@ public class TestDatanodeManager {
|
||||||
List<StorageType> storageTypesList =
|
List<StorageType> storageTypesList =
|
||||||
new ArrayList<>(Arrays.asList(StorageType.ARCHIVE, StorageType.DISK,
|
new ArrayList<>(Arrays.asList(StorageType.ARCHIVE, StorageType.DISK,
|
||||||
StorageType.SSD, StorageType.DEFAULT, StorageType.SSD));
|
StorageType.SSD, StorageType.DEFAULT, StorageType.SSD));
|
||||||
StorageType[] storageTypes = storageTypesList.toArray(new StorageType[0]);
|
StorageType[] storageTypes = storageTypesList.toArray(
|
||||||
|
StorageType.EMPTY_ARRAY);
|
||||||
|
|
||||||
for (int i = 0; i < totalDNs; i++) {
|
for (int i = 0; i < totalDNs; i++) {
|
||||||
// Register new datanode.
|
// Register new datanode.
|
||||||
|
@ -779,7 +781,8 @@ public class TestDatanodeManager {
|
||||||
List<StorageType> storageTypesList =
|
List<StorageType> storageTypesList =
|
||||||
new ArrayList<>(Arrays.asList(StorageType.DISK, StorageType.DISK,
|
new ArrayList<>(Arrays.asList(StorageType.DISK, StorageType.DISK,
|
||||||
StorageType.DEFAULT, StorageType.SSD, StorageType.SSD));
|
StorageType.DEFAULT, StorageType.SSD, StorageType.SSD));
|
||||||
StorageType[] storageTypes = storageTypesList.toArray(new StorageType[0]);
|
StorageType[] storageTypes = storageTypesList.toArray(
|
||||||
|
StorageType.EMPTY_ARRAY);
|
||||||
|
|
||||||
for (int i = 0; i < totalDNs; i++) {
|
for (int i = 0; i < totalDNs; i++) {
|
||||||
// Register new datanode.
|
// Register new datanode.
|
||||||
|
|
|
@ -127,8 +127,8 @@ public class TestDataXceiverLazyPersistHint {
|
||||||
StorageType.RAM_DISK,
|
StorageType.RAM_DISK,
|
||||||
null,
|
null,
|
||||||
"Dummy-Client",
|
"Dummy-Client",
|
||||||
new DatanodeInfo[0],
|
DatanodeInfo.EMPTY_ARRAY,
|
||||||
new StorageType[0],
|
StorageType.EMPTY_ARRAY,
|
||||||
mock(DatanodeInfo.class),
|
mock(DatanodeInfo.class),
|
||||||
BlockConstructionStage.PIPELINE_SETUP_CREATE,
|
BlockConstructionStage.PIPELINE_SETUP_CREATE,
|
||||||
0, 0, 0, 0,
|
0, 0, 0, 0,
|
||||||
|
|
|
@ -165,7 +165,7 @@ public class TestDiskError {
|
||||||
DataChecksum.Type.CRC32, 512);
|
DataChecksum.Type.CRC32, 512);
|
||||||
new Sender(out).writeBlock(block.getBlock(), StorageType.DEFAULT,
|
new Sender(out).writeBlock(block.getBlock(), StorageType.DEFAULT,
|
||||||
BlockTokenSecretManager.DUMMY_TOKEN, "",
|
BlockTokenSecretManager.DUMMY_TOKEN, "",
|
||||||
new DatanodeInfo[0], new StorageType[0], null,
|
DatanodeInfo.EMPTY_ARRAY, StorageType.EMPTY_ARRAY, null,
|
||||||
BlockConstructionStage.PIPELINE_SETUP_CREATE, 1, 0L, 0L, 0L,
|
BlockConstructionStage.PIPELINE_SETUP_CREATE, 1, 0L, 0L, 0L,
|
||||||
checksum, CachingStrategy.newDefaultStrategy(), false, false,
|
checksum, CachingStrategy.newDefaultStrategy(), false, false,
|
||||||
null, null, new String[0]);
|
null, null, new String[0]);
|
||||||
|
|
|
@ -119,7 +119,7 @@ public class TestBlockPlacementPolicyRackFaultTolerant {
|
||||||
LocatedBlock additionalLocatedBlock =
|
LocatedBlock additionalLocatedBlock =
|
||||||
nameNodeRpc.getAdditionalDatanode(src, fileStatus.getFileId(),
|
nameNodeRpc.getAdditionalDatanode(src, fileStatus.getFileId(),
|
||||||
locatedBlock.getBlock(), locatedBlock.getLocations(),
|
locatedBlock.getBlock(), locatedBlock.getLocations(),
|
||||||
locatedBlock.getStorageIDs(), new DatanodeInfo[0],
|
locatedBlock.getStorageIDs(), DatanodeInfo.EMPTY_ARRAY,
|
||||||
additionalReplication, clientMachine);
|
additionalReplication, clientMachine);
|
||||||
doTestLocatedBlock(replication + additionalReplication, additionalLocatedBlock);
|
doTestLocatedBlock(replication + additionalReplication, additionalLocatedBlock);
|
||||||
}
|
}
|
||||||
|
@ -159,7 +159,7 @@ public class TestBlockPlacementPolicyRackFaultTolerant {
|
||||||
LocatedBlock additionalLocatedBlock =
|
LocatedBlock additionalLocatedBlock =
|
||||||
nameNodeRpc.getAdditionalDatanode(src, fileStatus.getFileId(),
|
nameNodeRpc.getAdditionalDatanode(src, fileStatus.getFileId(),
|
||||||
locatedBlock.getBlock(), partLocs,
|
locatedBlock.getBlock(), partLocs,
|
||||||
partStorageIDs, new DatanodeInfo[0],
|
partStorageIDs, DatanodeInfo.EMPTY_ARRAY,
|
||||||
j, clientMachine);
|
j, clientMachine);
|
||||||
doTestLocatedBlock(i + j, additionalLocatedBlock);
|
doTestLocatedBlock(i + j, additionalLocatedBlock);
|
||||||
}
|
}
|
||||||
|
|
|
@ -859,7 +859,7 @@ public class TestHASafeMode {
|
||||||
pathString,
|
pathString,
|
||||||
client.getClientName(),
|
client.getClientName(),
|
||||||
new ExtendedBlock(previousBlock),
|
new ExtendedBlock(previousBlock),
|
||||||
new DatanodeInfo[0],
|
DatanodeInfo.EMPTY_ARRAY,
|
||||||
DFSClientAdapter.getFileId((DFSOutputStream) create
|
DFSClientAdapter.getFileId((DFSOutputStream) create
|
||||||
.getWrappedStream()), null, null);
|
.getWrappedStream()), null, null);
|
||||||
cluster.restartNameNode(0, true);
|
cluster.restartNameNode(0, true);
|
||||||
|
|
Loading…
Reference in New Issue