From 01a073099601def157a2dd08d233bbd0923dc3d1 Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Wed, 18 May 2016 09:20:57 -0700 Subject: [PATCH] HDFS-9926. MiniDFSCluster leaks dependency Mockito via DataNodeTestUtils. (Contributed by Josh Elser) --- .../hadoop/hdfs/TestDataTransferProtocol.java | 4 +- .../hadoop/hdfs/TestFileCorruption.java | 6 +- .../apache/hadoop/hdfs/TestReplication.java | 3 +- .../blockmanagement/TestBlockManager.java | 4 +- .../TestHeartbeatHandling.java | 14 +- .../TestOverReplicatedBlocks.java | 8 +- .../server/datanode/BlockReportTestBase.java | 2 +- .../server/datanode/DataNodeTestUtils.java | 119 +------------ .../datanode/InternalDataNodeTestUtils.java | 157 ++++++++++++++++++ .../datanode/TestDataNodeHotSwapVolumes.java | 2 +- .../datanode/TestDataNodeMetricsLogger.java | 2 +- .../datanode/TestDataNodeReconfiguration.java | 2 +- ...stDnRespectsBlockReportSplitThreshold.java | 6 +- .../server/datanode/TestFsDatasetCache.java | 2 +- .../datanode/TestIncrementalBlockReports.java | 2 +- .../server/datanode/TestStorageReport.java | 2 +- .../datanode/TestTriggerBlockReport.java | 2 +- .../server/namenode/TestDeadDatanode.java | 12 +- .../hdfs/server/namenode/TestDeleteRace.java | 4 +- .../server/namenode/ha/TestDNFencing.java | 5 +- .../namenode/ha/TestPipelinesFailover.java | 6 +- 21 files changed, 206 insertions(+), 158 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java index cede2077497..2335b9a8c6b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java @@ -60,7 +60,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumIn import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; -import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.util.DataChecksum; @@ -217,7 +217,7 @@ public class TestDataTransferProtocol { try { cluster.waitActive(); String poolId = cluster.getNamesystem().getBlockPoolId(); - datanode = DataNodeTestUtils.getDNRegistrationForBP( + datanode = InternalDataNodeTestUtils.getDNRegistrationForBP( cluster.getDataNodes().get(0), poolId); dnAddr = NetUtils.createSocketAddr(datanode.getXferAddr()); FileSystem fileSys = cluster.getFileSystem(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java index c1a7ebbd99e..1ed860308ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; @@ -145,8 +145,8 @@ public class TestFileCorruption { DataNode dataNode = datanodes.get(2); // report corrupted block by the third datanode - DatanodeRegistration dnR = - DataNodeTestUtils.getDNRegistrationForBP(dataNode, blk.getBlockPoolId()); + DatanodeRegistration dnR = InternalDataNodeTestUtils. + getDNRegistrationForBP(dataNode, blk.getBlockPoolId()); FSNamesystem ns = cluster.getNamesystem(); ns.writeLock(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java index 357287df968..daa0ad05bb7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica; +import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; @@ -580,7 +581,7 @@ public class TestReplication { NameNode nn = cluster.getNameNode(); DataNode dn = cluster.getDataNodes().get(0); DatanodeProtocolClientSideTranslatorPB spy = - DataNodeTestUtils.spyOnBposToNN(dn, nn); + InternalDataNodeTestUtils.spyOnBposToNN(dn, nn); DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG); Mockito.doAnswer(delayer).when(spy).blockReceivedAndDeleted( Mockito.anyObject(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index efe36c5aa56..ada75c884f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -73,7 +73,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; -import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; @@ -882,7 +882,7 @@ public class TestBlockManager { final FSNamesystem namesystem = cluster.getNamesystem(); final String poolId = namesystem.getBlockPoolId(); final DatanodeRegistration nodeReg = - DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes(). + InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes(). get(0), poolId); final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem, nodeReg); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java index b77c413f068..ab607eaa8d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; -import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.Namesystem; @@ -66,7 +66,8 @@ public class TestHeartbeatHandling { ).getDatanodeManager().getHeartbeatManager(); final String poolId = namesystem.getBlockPoolId(); final DatanodeRegistration nodeReg = - DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId); + InternalDataNodeTestUtils. + getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId); final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem, nodeReg); final String storageID = DatanodeStorage.generateUuid(); dd.updateStorage(new DatanodeStorage(storageID)); @@ -149,15 +150,18 @@ public class TestHeartbeatHandling { ).getDatanodeManager().getHeartbeatManager(); final String poolId = namesystem.getBlockPoolId(); final DatanodeRegistration nodeReg1 = - DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId); + InternalDataNodeTestUtils. + getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId); final DatanodeDescriptor dd1 = NameNodeAdapter.getDatanode(namesystem, nodeReg1); dd1.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid())); final DatanodeRegistration nodeReg2 = - DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(1), poolId); + InternalDataNodeTestUtils. + getDNRegistrationForBP(cluster.getDataNodes().get(1), poolId); final DatanodeDescriptor dd2 = NameNodeAdapter.getDatanode(namesystem, nodeReg2); dd2.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid())); final DatanodeRegistration nodeReg3 = - DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(2), poolId); + InternalDataNodeTestUtils. + getDNRegistrationForBP(cluster.getDataNodes().get(2), poolId); final DatanodeDescriptor dd3 = NameNodeAdapter.getDatanode(namesystem, nodeReg3); dd3.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid())); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java index b32312b37b5..3bdc882cb06 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; @@ -91,7 +91,7 @@ public class TestOverReplicatedBlocks { String blockPoolId = cluster.getNamesystem().getBlockPoolId(); final DatanodeID corruptDataNode = - DataNodeTestUtils.getDNRegistrationForBP( + InternalDataNodeTestUtils.getDNRegistrationForBP( cluster.getDataNodes().get(2), blockPoolId); final FSNamesystem namesystem = cluster.getNamesystem(); @@ -159,8 +159,8 @@ public class TestOverReplicatedBlocks { conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 300); cluster.startDataNodes(conf, 1, true, null, null, null); DataNode lastDN = cluster.getDataNodes().get(3); - DatanodeRegistration dnReg = DataNodeTestUtils.getDNRegistrationForBP( - lastDN, namesystem.getBlockPoolId()); + DatanodeRegistration dnReg = InternalDataNodeTestUtils. + getDNRegistrationForBP(lastDN, namesystem.getBlockPoolId()); String lastDNid = dnReg.getDatanodeUuid(); final Path fileName = new Path("/foo2"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java index 4279f96a001..53b92636941 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java @@ -614,7 +614,7 @@ public abstract class BlockReportTestBase { // from this node. DataNode dn = cluster.getDataNodes().get(0); DatanodeProtocolClientSideTranslatorPB spy = - DataNodeTestUtils.spyOnBposToNN(dn, nn); + InternalDataNodeTestUtils.spyOnBposToNN(dn, nn); Mockito.doAnswer(delayer) .when(spy).blockReport( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java index ec45dac1de7..17d1b5bd93f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java @@ -19,44 +19,21 @@ package org.apache.hadoop.hdfs.server.datanode; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - import java.io.File; import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.concurrent.ThreadLocalRandom; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeID; -import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil; -import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; -import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; -import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat; -import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; -import org.apache.hadoop.hdfs.server.protocol.StorageReport; -import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary; -import org.junit.Assert; -import org.mockito.Mockito; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; - -import com.google.common.base.Preconditions; /** * Utility class for accessing package-private DataNode information during tests. - * + * Must not contain usage of classes that are not explicitly listed as + * dependencies to {@link MiniDFSCluster}. */ public class DataNodeTestUtils { private static final String DIR_FAILURE_SUFFIX = ".origin"; @@ -101,41 +78,6 @@ public class DataNodeTestUtils { bpos.triggerBlockReportForTests(); } } - - /** - * Insert a Mockito spy object between the given DataNode and - * the given NameNode. This can be used to delay or wait for - * RPC calls on the datanode->NN path. - */ - public static DatanodeProtocolClientSideTranslatorPB spyOnBposToNN( - DataNode dn, NameNode nn) { - String bpid = nn.getNamesystem().getBlockPoolId(); - - BPOfferService bpos = null; - for (BPOfferService thisBpos : dn.getAllBpOs()) { - if (thisBpos.getBlockPoolId().equals(bpid)) { - bpos = thisBpos; - break; - } - } - Preconditions.checkArgument(bpos != null, - "No such bpid: %s", bpid); - - BPServiceActor bpsa = null; - for (BPServiceActor thisBpsa : bpos.getBPServiceActors()) { - if (thisBpsa.getNNSocketAddress().equals(nn.getServiceRpcAddress())) { - bpsa = thisBpsa; - break; - } - } - Preconditions.checkArgument(bpsa != null, - "No service actor to NN at %s", nn.getServiceRpcAddress()); - - DatanodeProtocolClientSideTranslatorPB origNN = bpsa.getNameNodeProxy(); - DatanodeProtocolClientSideTranslatorPB spy = Mockito.spy(origNN); - bpsa.setNameNode(spy); - return spy; - } public static InterDatanodeProtocol createInterDatanodeProtocolProxy( DataNode dn, DatanodeID datanodeid, final Configuration conf, @@ -237,61 +179,4 @@ public class DataNodeTestUtils { dn.getDirectoryScanner().reconcile(); } } - - /** - * Starts an instance of DataNode with NN mocked. Called should ensure to - * shutdown the DN - * - * @throws IOException - */ - public static DataNode startDNWithMockNN(Configuration conf, - final InetSocketAddress nnSocketAddr, final String dnDataDir) - throws IOException { - - FileSystem.setDefaultUri(conf, "hdfs://" + nnSocketAddr.getHostName() + ":" - + nnSocketAddr.getPort()); - ArrayList locations = new ArrayList(); - File dataDir = new File(dnDataDir); - FileUtil.fullyDelete(dataDir); - dataDir.mkdirs(); - StorageLocation location = StorageLocation.parse(dataDir.getPath()); - locations.add(location); - - final DatanodeProtocolClientSideTranslatorPB namenode = - mock(DatanodeProtocolClientSideTranslatorPB.class); - - Mockito.doAnswer(new Answer() { - @Override - public DatanodeRegistration answer(InvocationOnMock invocation) - throws Throwable { - return (DatanodeRegistration) invocation.getArguments()[0]; - } - }).when(namenode).registerDatanode(Mockito.any(DatanodeRegistration.class)); - - when(namenode.versionRequest()).thenReturn( - new NamespaceInfo(1, TEST_CLUSTER_ID, TEST_POOL_ID, 1L)); - - when( - namenode.sendHeartbeat(Mockito.any(DatanodeRegistration.class), - Mockito.any(StorageReport[].class), Mockito.anyLong(), - Mockito.anyLong(), Mockito.anyInt(), Mockito.anyInt(), - Mockito.anyInt(), Mockito.any(VolumeFailureSummary.class), - Mockito.anyBoolean())).thenReturn( - new HeartbeatResponse(new DatanodeCommand[0], new NNHAStatusHeartbeat( - HAServiceState.ACTIVE, 1), null, ThreadLocalRandom.current() - .nextLong() | 1L)); - - DataNode dn = new DataNode(conf, locations, null) { - @Override - DatanodeProtocolClientSideTranslatorPB connectToNN( - InetSocketAddress nnAddr) throws IOException { - Assert.assertEquals(nnSocketAddr, nnAddr); - return namenode; - } - }; - // Trigger a heartbeat so that it acknowledges the NN as active. - dn.getAllBpOs().get(0).triggerHeartbeatForTests(); - - return dn; - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java new file mode 100644 index 00000000000..2f400e2198b --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java @@ -0,0 +1,157 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.datanode; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.File; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.concurrent.ThreadLocalRandom; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; +import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; +import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; +import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat; +import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.hdfs.server.protocol.StorageReport; +import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary; +import org.junit.Assert; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import com.google.common.base.Preconditions; + +/** + * An internal-facing only collection of test utilities for the DataNode. This + * is to ensure that test-scope dependencies aren't inadvertently leaked + * to clients, e.g. Mockito. + */ +public class InternalDataNodeTestUtils { + + public final static String TEST_CLUSTER_ID = "testClusterID"; + public final static String TEST_POOL_ID = "BP-TEST"; + + public static DatanodeRegistration + getDNRegistrationForBP(DataNode dn, String bpid) throws IOException { + return dn.getDNRegistrationForBP(bpid); + } + + /** + * Insert a Mockito spy object between the given DataNode and + * the given NameNode. This can be used to delay or wait for + * RPC calls on the datanode->NN path. + */ + public static DatanodeProtocolClientSideTranslatorPB spyOnBposToNN( + DataNode dn, NameNode nn) { + String bpid = nn.getNamesystem().getBlockPoolId(); + + BPOfferService bpos = null; + for (BPOfferService thisBpos : dn.getAllBpOs()) { + if (thisBpos.getBlockPoolId().equals(bpid)) { + bpos = thisBpos; + break; + } + } + Preconditions.checkArgument(bpos != null, + "No such bpid: %s", bpid); + + BPServiceActor bpsa = null; + for (BPServiceActor thisBpsa : bpos.getBPServiceActors()) { + if (thisBpsa.getNNSocketAddress().equals(nn.getServiceRpcAddress())) { + bpsa = thisBpsa; + break; + } + } + Preconditions.checkArgument(bpsa != null, + "No service actor to NN at %s", nn.getServiceRpcAddress()); + + DatanodeProtocolClientSideTranslatorPB origNN = bpsa.getNameNodeProxy(); + DatanodeProtocolClientSideTranslatorPB spy = Mockito.spy(origNN); + bpsa.setNameNode(spy); + return spy; + } + + /** + * Starts an instance of DataNode with NN mocked. Called should ensure to + * shutdown the DN + * + * @throws IOException + */ + public static DataNode startDNWithMockNN(Configuration conf, + final InetSocketAddress nnSocketAddr, final String dnDataDir) + throws IOException { + + FileSystem.setDefaultUri(conf, "hdfs://" + nnSocketAddr.getHostName() + ":" + + nnSocketAddr.getPort()); + ArrayList locations = new ArrayList(); + File dataDir = new File(dnDataDir); + FileUtil.fullyDelete(dataDir); + dataDir.mkdirs(); + StorageLocation location = StorageLocation.parse(dataDir.getPath()); + locations.add(location); + + final DatanodeProtocolClientSideTranslatorPB namenode = + mock(DatanodeProtocolClientSideTranslatorPB.class); + + Mockito.doAnswer(new Answer() { + @Override + public DatanodeRegistration answer(InvocationOnMock invocation) + throws Throwable { + return (DatanodeRegistration) invocation.getArguments()[0]; + } + }).when(namenode).registerDatanode(Mockito.any(DatanodeRegistration.class)); + + when(namenode.versionRequest()).thenReturn( + new NamespaceInfo(1, TEST_CLUSTER_ID, TEST_POOL_ID, + 1L)); + + when( + namenode.sendHeartbeat(Mockito.any(DatanodeRegistration.class), + Mockito.any(StorageReport[].class), Mockito.anyLong(), + Mockito.anyLong(), Mockito.anyInt(), Mockito.anyInt(), + Mockito.anyInt(), Mockito.any(VolumeFailureSummary.class), + Mockito.anyBoolean())).thenReturn( + new HeartbeatResponse(new DatanodeCommand[0], new NNHAStatusHeartbeat( + HAServiceState.ACTIVE, 1), null, ThreadLocalRandom.current() + .nextLong() | 1L)); + + DataNode dn = new DataNode(conf, locations, null) { + @Override + DatanodeProtocolClientSideTranslatorPB connectToNN( + InetSocketAddress nnAddr) throws IOException { + Assert.assertEquals(nnSocketAddr, nnAddr); + return namenode; + } + }; + // Trigger a heartbeat so that it acknowledges the NN as active. + dn.getAllBpOs().get(0).triggerHeartbeatForTests(); + + return dn; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java index 725bc6ba878..0328e109acf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java @@ -821,7 +821,7 @@ public class TestDataNodeHotSwapVolumes { final DataNode dn = cluster.getDataNodes().get(0); DatanodeProtocolClientSideTranslatorPB spy = - DataNodeTestUtils.spyOnBposToNN(dn, cluster.getNameNode()); + InternalDataNodeTestUtils.spyOnBposToNN(dn, cluster.getNameNode()); // Remove a data dir from datanode File dataDirToKeep = new File(cluster.getDataDirectory(), "data1"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java index 1177a457fae..32fda370161 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java @@ -89,7 +89,7 @@ public class TestDataNodeMetricsLogger { conf.setInt(DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_KEY, enableMetricsLogging ? 1 : 0); // If enabled, log early and log often - dn = DataNodeTestUtils.startDNWithMockNN(conf, NN_ADDR, DATA_DIR); + dn = InternalDataNodeTestUtils.startDNWithMockNN(conf, NN_ADDR, DATA_DIR); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java index 249b5c69cbb..1dfd3c31db6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java @@ -102,7 +102,7 @@ public class TestDataNodeReconfiguration { DataNode[] result = new DataNode[numDateNode]; for (int i = 0; i < numDateNode; i++) { - result[i] = DataNodeTestUtils.startDNWithMockNN(conf, NN_ADDR, DATA_DIR); + result[i] = InternalDataNodeTestUtils.startDNWithMockNN(conf, NN_ADDR, DATA_DIR); } return result; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java index aadd9b2020e..ad5862a353c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java @@ -122,7 +122,7 @@ public class TestDnRespectsBlockReportSplitThreshold { // Insert a spy object for the NN RPC. DatanodeProtocolClientSideTranslatorPB nnSpy = - DataNodeTestUtils.spyOnBposToNN(dn, nn); + InternalDataNodeTestUtils.spyOnBposToNN(dn, nn); // Trigger a block report so there is an interaction with the spy // object. @@ -154,7 +154,7 @@ public class TestDnRespectsBlockReportSplitThreshold { // Insert a spy object for the NN RPC. DatanodeProtocolClientSideTranslatorPB nnSpy = - DataNodeTestUtils.spyOnBposToNN(dn, nn); + InternalDataNodeTestUtils.spyOnBposToNN(dn, nn); // Trigger a block report so there is an interaction with the spy // object. @@ -186,7 +186,7 @@ public class TestDnRespectsBlockReportSplitThreshold { // Insert a spy object for the NN RPC. DatanodeProtocolClientSideTranslatorPB nnSpy = - DataNodeTestUtils.spyOnBposToNN(dn, nn); + InternalDataNodeTestUtils.spyOnBposToNN(dn, nn); // Trigger a block report so there is an interaction with the spy // object. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java index d82e383d8c8..e5acb1cc3ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java @@ -141,7 +141,7 @@ public class TestFsDatasetCache { dn = cluster.getDataNodes().get(0); fsd = dn.getFSDataset(); - spyNN = DataNodeTestUtils.spyOnBposToNN(dn, nn); + spyNN = InternalDataNodeTestUtils.spyOnBposToNN(dn, nn); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java index 228897418e9..03553fed4f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java @@ -106,7 +106,7 @@ public class TestIncrementalBlockReports { * @return spy object that can be used for Mockito verification. */ DatanodeProtocolClientSideTranslatorPB spyOnDnCallsToNn() { - return DataNodeTestUtils.spyOnBposToNN(singletonDn, singletonNn); + return InternalDataNodeTestUtils.spyOnBposToNN(singletonDn, singletonNn); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java index a6032c1ce2a..d8418d46b6a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java @@ -92,7 +92,7 @@ public class TestStorageReport { // Insert a spy object for the NN RPC. DatanodeProtocolClientSideTranslatorPB nnSpy = - DataNodeTestUtils.spyOnBposToNN(dn, nn); + InternalDataNodeTestUtils.spyOnBposToNN(dn, nn); // Trigger a heartbeat so there is an interaction with the spy // object. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTriggerBlockReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTriggerBlockReport.java index f04a196a9bf..defe7c74432 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTriggerBlockReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTriggerBlockReport.java @@ -62,7 +62,7 @@ public final class TestTriggerBlockReport { cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); DatanodeProtocolClientSideTranslatorPB spy = - DataNodeTestUtils.spyOnBposToNN( + InternalDataNodeTestUtils.spyOnBposToNN( cluster.getDataNodes().get(0), cluster.getNameNode()); DFSTestUtil.createFile(fs, new Path("/abc"), 16, (short) 1, 1L); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java index 1455dae8596..f1245f58f4d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils; import org.apache.hadoop.hdfs.server.protocol.BlockReportContext; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; @@ -86,9 +86,9 @@ public class TestDeadDatanode { String poolId = cluster.getNamesystem().getBlockPoolId(); // wait for datanode to be marked live DataNode dn = cluster.getDataNodes().get(0); - DatanodeRegistration reg = - DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId); - + DatanodeRegistration reg = InternalDataNodeTestUtils. + getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId); + DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), true, 20000); // Shutdown and wait for datanode to be marked dead @@ -147,8 +147,8 @@ public class TestDeadDatanode { String poolId = cluster.getNamesystem().getBlockPoolId(); // wait for datanode to be marked live DataNode dn = cluster.getDataNodes().get(0); - DatanodeRegistration reg = DataNodeTestUtils.getDNRegistrationForBP(cluster - .getDataNodes().get(0), poolId); + DatanodeRegistration reg = InternalDataNodeTestUtils. + getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId); // Get the updated datanode descriptor BlockManager bm = cluster.getNamesystem().getBlockManager(); DatanodeManager dm = bm.getDatanodeManager(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java index 15f697a9a06..133a18e72d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java @@ -48,7 +48,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.Node; @@ -299,7 +299,7 @@ public class TestDeleteRace { DataNode primaryDN = cluster.getDataNode(expectedPrimary.getIpcPort()); DatanodeProtocolClientSideTranslatorPB nnSpy = dnMap.get(primaryDN); if (nnSpy == null) { - nnSpy = DataNodeTestUtils.spyOnBposToNN(primaryDN, nn); + nnSpy = InternalDataNodeTestUtils.spyOnBposToNN(primaryDN, nn); dnMap.put(primaryDN, nnSpy); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java index 96822d625b1..72afea9d7f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java @@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.protocol.BlockReportContext; @@ -539,8 +540,8 @@ public class TestDNFencing { DataNode dn = cluster.getDataNodes().get(0); DatanodeProtocolClientSideTranslatorPB spy = - DataNodeTestUtils.spyOnBposToNN(dn, nn2); - + InternalDataNodeTestUtils.spyOnBposToNN(dn, nn2); + Mockito.doAnswer(delayer) .when(spy).blockReport( Mockito.anyObject(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java index 429b2bf5c4c..b93004c9b99 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java @@ -45,7 +45,7 @@ import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.io.IOUtils; @@ -361,8 +361,8 @@ public class TestPipelinesFailover { // active. DataNode primaryDN = cluster.getDataNode(expectedPrimary.getIpcPort()); DatanodeProtocolClientSideTranslatorPB nnSpy = - DataNodeTestUtils.spyOnBposToNN(primaryDN, nn0); - + InternalDataNodeTestUtils.spyOnBposToNN(primaryDN, nn0); + // Delay the commitBlockSynchronization call DelayAnswer delayer = new DelayAnswer(LOG); Mockito.doAnswer(delayer).when(nnSpy).commitBlockSynchronization(