HDFS-16075. Use empty array constants present in StorageType and DatanodeInfo to avoid creating redundant objects (#3115)

Reviewed-by: Hui Fei <ferhui@apache.org>
This commit is contained in:
Viraj Jasani 2021-06-21 06:55:12 +05:30 committed by GitHub
parent bdc9c8809e
commit c488abbc79
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 29 additions and 26 deletions

View File

@ -74,7 +74,7 @@ public class BlockLocation implements Serializable {
private static final String[] EMPTY_STR_ARRAY = new String[0];
private static final StorageType[] EMPTY_STORAGE_TYPE_ARRAY =
new StorageType[0];
StorageType.EMPTY_ARRAY;
/**
* Default Constructor.

View File

@ -27,7 +27,7 @@ public class TestBlockLocation {
private static final String[] EMPTY_STR_ARRAY = new String[0];
private static final StorageType[] EMPTY_STORAGE_TYPE_ARRAY =
new StorageType[0];
StorageType.EMPTY_ARRAY;
private static void checkBlockLocation(final BlockLocation loc)
throws Exception {

View File

@ -1683,7 +1683,7 @@ class DataStreamer extends Daemon {
DatanodeInfo[] getExcludedNodes() {
return excludedNodes.getAllPresent(excludedNodes.asMap().keySet())
.keySet().toArray(new DatanodeInfo[0]);
.keySet().toArray(DatanodeInfo.EMPTY_ARRAY);
}
/**

View File

@ -131,7 +131,7 @@ public class LocatedBlocks {
public int findBlock(long offset) {
// create fake block of size 0 as a key
LocatedBlock key = new LocatedBlock(
new ExtendedBlock(), new DatanodeInfo[0]);
new ExtendedBlock(), DatanodeInfo.EMPTY_ARRAY);
key.setStartOffset(offset);
key.getBlock().setNumBytes(1);
Comparator<LocatedBlock> comp =

View File

@ -535,7 +535,7 @@ public class MockNamenode {
*/
private static LocatedBlock getMockLocatedBlock(final String nsId) {
LocatedBlock lb = mock(LocatedBlock.class);
when(lb.getCachedLocations()).thenReturn(new DatanodeInfo[0]);
when(lb.getCachedLocations()).thenReturn(DatanodeInfo.EMPTY_ARRAY);
DatanodeID nodeId = new DatanodeID("localhost", "localhost", "dn0",
1111, 1112, 1113, 1114);
DatanodeInfo dnInfo = new DatanodeDescriptor(nodeId);

View File

@ -1241,7 +1241,7 @@ public class TestRouterRpc {
newRouterFile, clientName, null, null,
status.getFileId(), null, null);
DatanodeInfo[] exclusions = new DatanodeInfo[0];
DatanodeInfo[] exclusions = DatanodeInfo.EMPTY_ARRAY;
LocatedBlock newBlock = routerProtocol.getAdditionalDatanode(
newRouterFile, status.getFileId(), block.getBlock(),
block.getLocations(), block.getStorageIDs(), exclusions, 1, clientName);

View File

@ -314,7 +314,7 @@ public class TestRouterRpcMultiDestination extends TestRouterRpc {
assertEquals(1, proxyNumAddBlock2 - proxyNumAddBlock);
// Get additionalDatanode via router and block is not null.
DatanodeInfo[] exclusions = new DatanodeInfo[0];
DatanodeInfo[] exclusions = DatanodeInfo.EMPTY_ARRAY;
LocatedBlock newBlock = clientProtocol.getAdditionalDatanode(
testPath, status.getFileId(), blockTwo.getBlock(),
blockTwo.getLocations(), blockTwo.getStorageIDs(), exclusions,

View File

@ -209,7 +209,7 @@ final class FSDirAppendOp {
BlockInfo lastBlock = file.getLastBlock();
if (lastBlock != null) {
ExtendedBlock blk = new ExtendedBlock(fsn.getBlockPoolId(), lastBlock);
ret = new LocatedBlock(blk, new DatanodeInfo[0]);
ret = new LocatedBlock(blk, DatanodeInfo.EMPTY_ARRAY);
}
}

View File

@ -883,7 +883,7 @@ public class TestDFSClientRetries {
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
LocatedBlock fakeBlock = new LocatedBlock(b, DatanodeInfo.EMPTY_ARRAY);
ClientDatanodeProtocol proxy = null;

View File

@ -242,7 +242,7 @@ public class TestDFSInputStream {
DFSInputStream dfsInputStream =
(DFSInputStream) fs.open(filePath).getWrappedStream();
LocatedBlock lb = mock(LocatedBlock.class);
when(lb.getCachedLocations()).thenReturn(new DatanodeInfo[0]);
when(lb.getCachedLocations()).thenReturn(DatanodeInfo.EMPTY_ARRAY);
DatanodeID nodeId = new DatanodeID("localhost", "localhost", "dn0", 1111,
1112, 1113, 1114);
DatanodeInfo dnInfo = new DatanodeDescriptor(nodeId);
@ -271,7 +271,7 @@ public class TestDFSInputStream {
DFSInputStream dfsInputStream =
(DFSInputStream) fs.open(filePath).getWrappedStream();
LocatedBlock lb = mock(LocatedBlock.class);
when(lb.getCachedLocations()).thenReturn(new DatanodeInfo[0]);
when(lb.getCachedLocations()).thenReturn(DatanodeInfo.EMPTY_ARRAY);
DatanodeID nodeId = new DatanodeID("localhost", "localhost", "dn0", 1111,
1112, 1113, 1114);
DatanodeInfo dnInfo = new DatanodeDescriptor(nodeId);

View File

@ -461,8 +461,8 @@ public class TestLeaseRecovery {
// Add a block to the file
ExtendedBlock block = client.getNamenode().addBlock(
file, client.clientName, null, new DatanodeInfo[0], stat.getFileId(),
new String[0], null).getBlock();
file, client.clientName, null, DatanodeInfo.EMPTY_ARRAY,
stat.getFileId(), new String[0], null).getBlock();
// update the pipeline to get a new genstamp.
ExtendedBlock updatedBlock = client.getNamenode()
@ -578,7 +578,7 @@ public class TestLeaseRecovery {
// Add a block to the file
LocatedBlock blk = client.getNamenode()
.addBlock(file, client.clientName, null,
new DatanodeInfo[0], stat.getFileId(), new String[0], null);
DatanodeInfo.EMPTY_ARRAY, stat.getFileId(), new String[0], null);
ExtendedBlock finalBlock = blk.getBlock();
if (bytesToWrite != null) {
// Here we create a output stream and then abort it so the block gets

View File

@ -65,7 +65,7 @@ public class TestReplaceDatanodeOnFailure {
final DatanodeInfo[] infos = new DatanodeInfo[5];
final DatanodeInfo[][] datanodes = new DatanodeInfo[infos.length + 1][];
datanodes[0] = new DatanodeInfo[0];
datanodes[0] = DatanodeInfo.EMPTY_ARRAY;
for(int i = 0; i < infos.length; ) {
infos[i] = DFSTestUtil.getLocalDatanodeInfo(9867 + i);
i++;

View File

@ -31,7 +31,7 @@ public class TestLocatedBlock {
@Test(timeout = 10000)
public void testAddCachedLocWhenEmpty() {
DatanodeInfo[] ds = new DatanodeInfo[0];
DatanodeInfo[] ds = DatanodeInfo.EMPTY_ARRAY;
ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 1, 1);
LocatedBlock l1 = new LocatedBlock(b1, ds);
DatanodeDescriptor dn = new DatanodeDescriptor(

View File

@ -391,7 +391,7 @@ public class TestBlockToken {
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
LocatedBlock fakeBlock = new LocatedBlock(b, DatanodeInfo.EMPTY_ARRAY);
fakeBlock.setBlockToken(token);
// Create another RPC proxy with the same configuration - this will never

View File

@ -392,7 +392,8 @@ public class TestDatanodeManager {
storageTypesList.add(StorageType.PROVIDED);
}
StorageType[] storageTypes= storageTypesList.toArray(new StorageType[0]);
StorageType[] storageTypes = storageTypesList.toArray(
StorageType.EMPTY_ARRAY);
for (int i = 0; i < totalDNs; i++) {
// register new datanode
@ -694,7 +695,8 @@ public class TestDatanodeManager {
List<StorageType> storageTypesList =
new ArrayList<>(Arrays.asList(StorageType.ARCHIVE, StorageType.DISK,
StorageType.SSD, StorageType.DEFAULT, StorageType.SSD));
StorageType[] storageTypes = storageTypesList.toArray(new StorageType[0]);
StorageType[] storageTypes = storageTypesList.toArray(
StorageType.EMPTY_ARRAY);
for (int i = 0; i < totalDNs; i++) {
// Register new datanode.
@ -779,7 +781,8 @@ public class TestDatanodeManager {
List<StorageType> storageTypesList =
new ArrayList<>(Arrays.asList(StorageType.DISK, StorageType.DISK,
StorageType.DEFAULT, StorageType.SSD, StorageType.SSD));
StorageType[] storageTypes = storageTypesList.toArray(new StorageType[0]);
StorageType[] storageTypes = storageTypesList.toArray(
StorageType.EMPTY_ARRAY);
for (int i = 0; i < totalDNs; i++) {
// Register new datanode.

View File

@ -127,8 +127,8 @@ public class TestDataXceiverLazyPersistHint {
StorageType.RAM_DISK,
null,
"Dummy-Client",
new DatanodeInfo[0],
new StorageType[0],
DatanodeInfo.EMPTY_ARRAY,
StorageType.EMPTY_ARRAY,
mock(DatanodeInfo.class),
BlockConstructionStage.PIPELINE_SETUP_CREATE,
0, 0, 0, 0,

View File

@ -165,7 +165,7 @@ public class TestDiskError {
DataChecksum.Type.CRC32, 512);
new Sender(out).writeBlock(block.getBlock(), StorageType.DEFAULT,
BlockTokenSecretManager.DUMMY_TOKEN, "",
new DatanodeInfo[0], new StorageType[0], null,
DatanodeInfo.EMPTY_ARRAY, StorageType.EMPTY_ARRAY, null,
BlockConstructionStage.PIPELINE_SETUP_CREATE, 1, 0L, 0L, 0L,
checksum, CachingStrategy.newDefaultStrategy(), false, false,
null, null, new String[0]);

View File

@ -119,7 +119,7 @@ public class TestBlockPlacementPolicyRackFaultTolerant {
LocatedBlock additionalLocatedBlock =
nameNodeRpc.getAdditionalDatanode(src, fileStatus.getFileId(),
locatedBlock.getBlock(), locatedBlock.getLocations(),
locatedBlock.getStorageIDs(), new DatanodeInfo[0],
locatedBlock.getStorageIDs(), DatanodeInfo.EMPTY_ARRAY,
additionalReplication, clientMachine);
doTestLocatedBlock(replication + additionalReplication, additionalLocatedBlock);
}
@ -159,7 +159,7 @@ public class TestBlockPlacementPolicyRackFaultTolerant {
LocatedBlock additionalLocatedBlock =
nameNodeRpc.getAdditionalDatanode(src, fileStatus.getFileId(),
locatedBlock.getBlock(), partLocs,
partStorageIDs, new DatanodeInfo[0],
partStorageIDs, DatanodeInfo.EMPTY_ARRAY,
j, clientMachine);
doTestLocatedBlock(i + j, additionalLocatedBlock);
}

View File

@ -859,7 +859,7 @@ public class TestHASafeMode {
pathString,
client.getClientName(),
new ExtendedBlock(previousBlock),
new DatanodeInfo[0],
DatanodeInfo.EMPTY_ARRAY,
DFSClientAdapter.getFileId((DFSOutputStream) create
.getWrappedStream()), null, null);
cluster.restartNameNode(0, true);