From c488abbc79cc1ad2596cbf509a0cde14acc5ad6b Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Mon, 21 Jun 2021 06:55:12 +0530 Subject: [PATCH] HDFS-16075. Use empty array constants present in StorageType and DatanodeInfo to avoid creating redundant objects (#3115) Reviewed-by: Hui Fei --- .../main/java/org/apache/hadoop/fs/BlockLocation.java | 2 +- .../java/org/apache/hadoop/fs/TestBlockLocation.java | 2 +- .../main/java/org/apache/hadoop/hdfs/DataStreamer.java | 2 +- .../org/apache/hadoop/hdfs/protocol/LocatedBlocks.java | 2 +- .../hadoop/hdfs/server/federation/MockNamenode.java | 2 +- .../hdfs/server/federation/router/TestRouterRpc.java | 2 +- .../federation/router/TestRouterRpcMultiDestination.java | 2 +- .../hadoop/hdfs/server/namenode/FSDirAppendOp.java | 2 +- .../org/apache/hadoop/hdfs/TestDFSClientRetries.java | 2 +- .../java/org/apache/hadoop/hdfs/TestDFSInputStream.java | 4 ++-- .../java/org/apache/hadoop/hdfs/TestLeaseRecovery.java | 6 +++--- .../apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java | 2 +- .../apache/hadoop/hdfs/protocol/TestLocatedBlock.java | 2 +- .../hadoop/hdfs/security/token/block/TestBlockToken.java | 2 +- .../hdfs/server/blockmanagement/TestDatanodeManager.java | 9 ++++++--- .../server/datanode/TestDataXceiverLazyPersistHint.java | 4 ++-- .../hadoop/hdfs/server/datanode/TestDiskError.java | 2 +- .../TestBlockPlacementPolicyRackFaultTolerant.java | 4 ++-- .../hadoop/hdfs/server/namenode/ha/TestHASafeMode.java | 2 +- 19 files changed, 29 insertions(+), 26 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java index c6dde52d83d..29358dd7d10 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java @@ -74,7 +74,7 @@ public class BlockLocation implements Serializable { private static final String[] EMPTY_STR_ARRAY = new String[0]; private static final StorageType[] EMPTY_STORAGE_TYPE_ARRAY = - new StorageType[0]; + StorageType.EMPTY_ARRAY; /** * Default Constructor. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestBlockLocation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestBlockLocation.java index 8569ea7cf78..72e850b1313 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestBlockLocation.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestBlockLocation.java @@ -27,7 +27,7 @@ public class TestBlockLocation { private static final String[] EMPTY_STR_ARRAY = new String[0]; private static final StorageType[] EMPTY_STORAGE_TYPE_ARRAY = - new StorageType[0]; + StorageType.EMPTY_ARRAY; private static void checkBlockLocation(final BlockLocation loc) throws Exception { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java index e04268eddce..4b5f3c33982 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java @@ -1683,7 +1683,7 @@ public void updatePipeline(long newGS) throws IOException { DatanodeInfo[] getExcludedNodes() { return excludedNodes.getAllPresent(excludedNodes.asMap().keySet()) - .keySet().toArray(new DatanodeInfo[0]); + .keySet().toArray(DatanodeInfo.EMPTY_ARRAY); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java index baf59ce6136..1f5b85e315f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java @@ -131,7 +131,7 @@ public ErasureCodingPolicy getErasureCodingPolicy() { public int findBlock(long offset) { // create fake block of size 0 as a key LocatedBlock key = new LocatedBlock( - new ExtendedBlock(), new DatanodeInfo[0]); + new ExtendedBlock(), DatanodeInfo.EMPTY_ARRAY); key.setStartOffset(offset); key.getBlock().setNumBytes(1); Comparator comp = diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java index f9080653841..a4755c20fca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java @@ -535,7 +535,7 @@ private static HdfsFileStatus getMockHdfsFileStatus( */ private static LocatedBlock getMockLocatedBlock(final String nsId) { LocatedBlock lb = mock(LocatedBlock.class); - when(lb.getCachedLocations()).thenReturn(new DatanodeInfo[0]); + when(lb.getCachedLocations()).thenReturn(DatanodeInfo.EMPTY_ARRAY); DatanodeID nodeId = new DatanodeID("localhost", "localhost", "dn0", 1111, 1112, 1113, 1114); DatanodeInfo dnInfo = new DatanodeDescriptor(nodeId); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java index 4f772cc4dc8..21329c81423 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java @@ -1241,7 +1241,7 @@ public void testProxyGetAdditionalDatanode() newRouterFile, clientName, null, null, status.getFileId(), null, null); - DatanodeInfo[] exclusions = new DatanodeInfo[0]; + DatanodeInfo[] exclusions = DatanodeInfo.EMPTY_ARRAY; LocatedBlock newBlock = routerProtocol.getAdditionalDatanode( newRouterFile, status.getFileId(), block.getBlock(), block.getLocations(), block.getStorageIDs(), exclusions, 1, clientName); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java index 4f112ba9b72..e50464c0be7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java @@ -314,7 +314,7 @@ testPath, new FsPermission("777"), clientName, assertEquals(1, proxyNumAddBlock2 - proxyNumAddBlock); // Get additionalDatanode via router and block is not null. - DatanodeInfo[] exclusions = new DatanodeInfo[0]; + DatanodeInfo[] exclusions = DatanodeInfo.EMPTY_ARRAY; LocatedBlock newBlock = clientProtocol.getAdditionalDatanode( testPath, status.getFileId(), blockTwo.getBlock(), blockTwo.getLocations(), blockTwo.getStorageIDs(), exclusions, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java index 2586a257bef..7e90d4bafa8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java @@ -209,7 +209,7 @@ static LocatedBlock prepareFileForAppend(final FSNamesystem fsn, BlockInfo lastBlock = file.getLastBlock(); if (lastBlock != null) { ExtendedBlock blk = new ExtendedBlock(fsn.getBlockPoolId(), lastBlock); - ret = new LocatedBlock(blk, new DatanodeInfo[0]); + ret = new LocatedBlock(blk, DatanodeInfo.EMPTY_ARRAY); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index 30115efc384..970003b0e58 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -883,7 +883,7 @@ public void testClientDNProtocolTimeout() throws IOException { DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort()); ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L)); - LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]); + LocatedBlock fakeBlock = new LocatedBlock(b, DatanodeInfo.EMPTY_ARRAY); ClientDatanodeProtocol proxy = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java index f2d580576cf..2f9e0d319cd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java @@ -242,7 +242,7 @@ public void testReadWithPreferredCachingReplica() throws IOException { DFSInputStream dfsInputStream = (DFSInputStream) fs.open(filePath).getWrappedStream(); LocatedBlock lb = mock(LocatedBlock.class); - when(lb.getCachedLocations()).thenReturn(new DatanodeInfo[0]); + when(lb.getCachedLocations()).thenReturn(DatanodeInfo.EMPTY_ARRAY); DatanodeID nodeId = new DatanodeID("localhost", "localhost", "dn0", 1111, 1112, 1113, 1114); DatanodeInfo dnInfo = new DatanodeDescriptor(nodeId); @@ -271,7 +271,7 @@ public void testReadWithoutPreferredCachingReplica() throws IOException { DFSInputStream dfsInputStream = (DFSInputStream) fs.open(filePath).getWrappedStream(); LocatedBlock lb = mock(LocatedBlock.class); - when(lb.getCachedLocations()).thenReturn(new DatanodeInfo[0]); + when(lb.getCachedLocations()).thenReturn(DatanodeInfo.EMPTY_ARRAY); DatanodeID nodeId = new DatanodeID("localhost", "localhost", "dn0", 1111, 1112, 1113, 1114); DatanodeInfo dnInfo = new DatanodeDescriptor(nodeId); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java index ca3065088c4..e2c956ecd27 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java @@ -461,8 +461,8 @@ public void testAbortedRecovery() throws Exception { // Add a block to the file ExtendedBlock block = client.getNamenode().addBlock( - file, client.clientName, null, new DatanodeInfo[0], stat.getFileId(), - new String[0], null).getBlock(); + file, client.clientName, null, DatanodeInfo.EMPTY_ARRAY, + stat.getFileId(), new String[0], null).getBlock(); // update the pipeline to get a new genstamp. ExtendedBlock updatedBlock = client.getNamenode() @@ -578,7 +578,7 @@ private void createCommittedNotCompleteFile(DFSClient client, String file, // Add a block to the file LocatedBlock blk = client.getNamenode() .addBlock(file, client.clientName, null, - new DatanodeInfo[0], stat.getFileId(), new String[0], null); + DatanodeInfo.EMPTY_ARRAY, stat.getFileId(), new String[0], null); ExtendedBlock finalBlock = blk.getBlock(); if (bytesToWrite != null) { // Here we create a output stream and then abort it so the block gets diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java index 925f93648d4..5015722c61f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java @@ -65,7 +65,7 @@ public void testDefaultPolicy() throws Exception { final DatanodeInfo[] infos = new DatanodeInfo[5]; final DatanodeInfo[][] datanodes = new DatanodeInfo[infos.length + 1][]; - datanodes[0] = new DatanodeInfo[0]; + datanodes[0] = DatanodeInfo.EMPTY_ARRAY; for(int i = 0; i < infos.length; ) { infos[i] = DFSTestUtil.getLocalDatanodeInfo(9867 + i); i++; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLocatedBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLocatedBlock.java index 3546c89938f..33b5bd90b2a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLocatedBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLocatedBlock.java @@ -31,7 +31,7 @@ public class TestLocatedBlock { @Test(timeout = 10000) public void testAddCachedLocWhenEmpty() { - DatanodeInfo[] ds = new DatanodeInfo[0]; + DatanodeInfo[] ds = DatanodeInfo.EMPTY_ARRAY; ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 1, 1); LocatedBlock l1 = new LocatedBlock(b1, ds); DatanodeDescriptor dn = new DatanodeDescriptor( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java index 524656aa763..d08276b0698 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java @@ -391,7 +391,7 @@ private void testBlockTokenRpcLeak(boolean enableProtobuf) throws Exception { DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort()); ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L)); - LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]); + LocatedBlock fakeBlock = new LocatedBlock(b, DatanodeInfo.EMPTY_ARRAY); fakeBlock.setBlockToken(token); // Create another RPC proxy with the same configuration - this will never diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java index 4fbfbcfa729..5f5452ac16d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java @@ -392,7 +392,8 @@ public void HelperFunction(String scriptFileName, int providedStorages) storageTypesList.add(StorageType.PROVIDED); } - StorageType[] storageTypes= storageTypesList.toArray(new StorageType[0]); + StorageType[] storageTypes = storageTypesList.toArray( + StorageType.EMPTY_ARRAY); for (int i = 0; i < totalDNs; i++) { // register new datanode @@ -694,7 +695,8 @@ public void testGetBlockLocationConsiderStorageType() List storageTypesList = new ArrayList<>(Arrays.asList(StorageType.ARCHIVE, StorageType.DISK, StorageType.SSD, StorageType.DEFAULT, StorageType.SSD)); - StorageType[] storageTypes = storageTypesList.toArray(new StorageType[0]); + StorageType[] storageTypes = storageTypesList.toArray( + StorageType.EMPTY_ARRAY); for (int i = 0; i < totalDNs; i++) { // Register new datanode. @@ -779,7 +781,8 @@ public void testGetBlockLocationConsiderStorageTypeAndLoad() List storageTypesList = new ArrayList<>(Arrays.asList(StorageType.DISK, StorageType.DISK, StorageType.DEFAULT, StorageType.SSD, StorageType.SSD)); - StorageType[] storageTypes = storageTypesList.toArray(new StorageType[0]); + StorageType[] storageTypes = storageTypesList.toArray( + StorageType.EMPTY_ARRAY); for (int i = 0; i < totalDNs; i++) { // Register new datanode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverLazyPersistHint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverLazyPersistHint.java index dab33b2c037..611360d6cb1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverLazyPersistHint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverLazyPersistHint.java @@ -127,8 +127,8 @@ private void issueWriteBlockCall(DataXceiver xceiver, boolean lazyPersist) StorageType.RAM_DISK, null, "Dummy-Client", - new DatanodeInfo[0], - new StorageType[0], + DatanodeInfo.EMPTY_ARRAY, + StorageType.EMPTY_ARRAY, mock(DatanodeInfo.class), BlockConstructionStage.PIPELINE_SETUP_CREATE, 0, 0, 0, 0, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java index 64c5a5b93aa..53be71f9a34 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java @@ -165,7 +165,7 @@ public void testReplicationError() throws Exception { DataChecksum.Type.CRC32, 512); new Sender(out).writeBlock(block.getBlock(), StorageType.DEFAULT, BlockTokenSecretManager.DUMMY_TOKEN, "", - new DatanodeInfo[0], new StorageType[0], null, + DatanodeInfo.EMPTY_ARRAY, StorageType.EMPTY_ARRAY, null, BlockConstructionStage.PIPELINE_SETUP_CREATE, 1, 0L, 0L, 0L, checksum, CachingStrategy.newDefaultStrategy(), false, false, null, null, new String[0]); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java index 9825f13cea5..88b7d2bf7f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java @@ -119,7 +119,7 @@ private void doTestChooseTargetNormalCase() throws Exception { LocatedBlock additionalLocatedBlock = nameNodeRpc.getAdditionalDatanode(src, fileStatus.getFileId(), locatedBlock.getBlock(), locatedBlock.getLocations(), - locatedBlock.getStorageIDs(), new DatanodeInfo[0], + locatedBlock.getStorageIDs(), DatanodeInfo.EMPTY_ARRAY, additionalReplication, clientMachine); doTestLocatedBlock(replication + additionalReplication, additionalLocatedBlock); } @@ -159,7 +159,7 @@ private void doTestChooseTargetSpecialCase() throws Exception { LocatedBlock additionalLocatedBlock = nameNodeRpc.getAdditionalDatanode(src, fileStatus.getFileId(), locatedBlock.getBlock(), partLocs, - partStorageIDs, new DatanodeInfo[0], + partStorageIDs, DatanodeInfo.EMPTY_ARRAY, j, clientMachine); doTestLocatedBlock(i + j, additionalLocatedBlock); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java index affa348c993..537e6a34bd0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java @@ -859,7 +859,7 @@ public void testOpenFileWhenNNAndClientCrashAfterAddBlock() throws Exception { pathString, client.getClientName(), new ExtendedBlock(previousBlock), - new DatanodeInfo[0], + DatanodeInfo.EMPTY_ARRAY, DFSClientAdapter.getFileId((DFSOutputStream) create .getWrappedStream()), null, null); cluster.restartNameNode(0, true);