diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java index e0ee3e70303..aeff16da016 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java @@ -59,7 +59,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; -import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.util.DataChecksum; @@ -209,7 +209,7 @@ public void testOpWrite() throws IOException { try { cluster.waitActive(); String poolId = cluster.getNamesystem().getBlockPoolId(); - datanode = DataNodeTestUtils.getDNRegistrationForBP( + datanode = InternalDataNodeTestUtils.getDNRegistrationForBP( cluster.getDataNodes().get(0), poolId); dnAddr = NetUtils.createSocketAddr(datanode.getXferAddr()); FileSystem fileSys = cluster.getFileSystem(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java index 011baa1ea3a..2437e38acc9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java @@ -46,7 +46,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; @@ -154,8 +154,8 @@ public void testArrayOutOfBoundsException() throws Exception { DataNode dataNode = datanodes.get(2); // report corrupted block by the third datanode - DatanodeRegistration dnR = - DataNodeTestUtils.getDNRegistrationForBP(dataNode, blk.getBlockPoolId()); + DatanodeRegistration dnR = InternalDataNodeTestUtils. + getDNRegistrationForBP(dataNode, blk.getBlockPoolId()); FSNamesystem ns = cluster.getNamesystem(); ns.writeLock(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java index e0e04569cbb..3e23db3fb82 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica; +import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; @@ -571,7 +572,7 @@ public void testNoExtraReplicationWhenBlockReceivedIsLate() NameNode nn = cluster.getNameNode(); DataNode dn = cluster.getDataNodes().get(0); DatanodeProtocolClientSideTranslatorPB spy = - DataNodeTestUtils.spyOnBposToNN(dn, nn); + InternalDataNodeTestUtils.spyOnBposToNN(dn, nn); DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG); Mockito.doAnswer(delayer).when(spy).blockReceivedAndDeleted( Mockito.anyObject(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 3ee08102137..394fae96558 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -74,7 +74,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; -import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; @@ -950,7 +950,7 @@ public void testStorageWithRemainingCapacity() throws Exception { final FSNamesystem namesystem = cluster.getNamesystem(); final String poolId = namesystem.getBlockPoolId(); final DatanodeRegistration nodeReg = - DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes(). + InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes(). get(0), poolId); final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem, nodeReg); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java index b77c413f068..ab607eaa8d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; -import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.Namesystem; @@ -66,7 +66,8 @@ public void testHeartbeat() throws Exception { ).getDatanodeManager().getHeartbeatManager(); final String poolId = namesystem.getBlockPoolId(); final DatanodeRegistration nodeReg = - DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId); + InternalDataNodeTestUtils. + getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId); final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem, nodeReg); final String storageID = DatanodeStorage.generateUuid(); dd.updateStorage(new DatanodeStorage(storageID)); @@ -149,15 +150,18 @@ public void testHeartbeatBlockRecovery() throws Exception { ).getDatanodeManager().getHeartbeatManager(); final String poolId = namesystem.getBlockPoolId(); final DatanodeRegistration nodeReg1 = - DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId); + InternalDataNodeTestUtils. + getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId); final DatanodeDescriptor dd1 = NameNodeAdapter.getDatanode(namesystem, nodeReg1); dd1.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid())); final DatanodeRegistration nodeReg2 = - DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(1), poolId); + InternalDataNodeTestUtils. + getDNRegistrationForBP(cluster.getDataNodes().get(1), poolId); final DatanodeDescriptor dd2 = NameNodeAdapter.getDatanode(namesystem, nodeReg2); dd2.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid())); final DatanodeRegistration nodeReg3 = - DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(2), poolId); + InternalDataNodeTestUtils. + getDNRegistrationForBP(cluster.getDataNodes().get(2), poolId); final DatanodeDescriptor dd3 = NameNodeAdapter.getDatanode(namesystem, nodeReg3); dd3.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid())); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java index e33c7a3a15c..d5f2fb99b78 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; @@ -88,7 +88,7 @@ public void testProcesOverReplicateBlock() throws Exception { String blockPoolId = cluster.getNamesystem().getBlockPoolId(); final DatanodeID corruptDataNode = - DataNodeTestUtils.getDNRegistrationForBP( + InternalDataNodeTestUtils.getDNRegistrationForBP( cluster.getDataNodes().get(2), blockPoolId); final FSNamesystem namesystem = cluster.getNamesystem(); @@ -157,8 +157,8 @@ public void testChooseReplicaToDelete() throws Exception { conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 300); cluster.startDataNodes(conf, 1, true, null, null, null); DataNode lastDN = cluster.getDataNodes().get(3); - DatanodeRegistration dnReg = DataNodeTestUtils.getDNRegistrationForBP( - lastDN, namesystem.getBlockPoolId()); + DatanodeRegistration dnReg = InternalDataNodeTestUtils. + getDNRegistrationForBP(lastDN, namesystem.getBlockPoolId()); String lastDNid = dnReg.getDatanodeUuid(); final Path fileName = new Path("/foo2"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java index 4279f96a001..53b92636941 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java @@ -614,7 +614,7 @@ protected Object passThrough(InvocationOnMock invocation) // from this node. DataNode dn = cluster.getDataNodes().get(0); DatanodeProtocolClientSideTranslatorPB spy = - DataNodeTestUtils.spyOnBposToNN(dn, nn); + InternalDataNodeTestUtils.spyOnBposToNN(dn, nn); Mockito.doAnswer(delayer) .when(spy).blockReport( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java index 1d4719279fe..e2755f9f033 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java @@ -19,44 +19,21 @@ package org.apache.hadoop.hdfs.server.datanode; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - import java.io.File; import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.concurrent.ThreadLocalRandom; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeID; -import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil; -import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; -import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; -import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat; -import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; -import org.apache.hadoop.hdfs.server.protocol.StorageReport; -import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary; -import org.junit.Assert; -import org.mockito.Mockito; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; - -import com.google.common.base.Preconditions; /** * Utility class for accessing package-private DataNode information during tests. - * + * Must not contain usage of classes that are not explicitly listed as + * dependencies to {@link MiniDFSCluster}. */ public class DataNodeTestUtils { private static final String DIR_FAILURE_SUFFIX = ".origin"; @@ -101,41 +78,6 @@ public static void triggerBlockReport(DataNode dn) throws IOException { bpos.triggerBlockReportForTests(); } } - - /** - * Insert a Mockito spy object between the given DataNode and - * the given NameNode. This can be used to delay or wait for - * RPC calls on the datanode->NN path. - */ - public static DatanodeProtocolClientSideTranslatorPB spyOnBposToNN( - DataNode dn, NameNode nn) { - String bpid = nn.getNamesystem().getBlockPoolId(); - - BPOfferService bpos = null; - for (BPOfferService thisBpos : dn.getAllBpOs()) { - if (thisBpos.getBlockPoolId().equals(bpid)) { - bpos = thisBpos; - break; - } - } - Preconditions.checkArgument(bpos != null, - "No such bpid: %s", bpid); - - BPServiceActor bpsa = null; - for (BPServiceActor thisBpsa : bpos.getBPServiceActors()) { - if (thisBpsa.getNNSocketAddress().equals(nn.getServiceRpcAddress())) { - bpsa = thisBpsa; - break; - } - } - Preconditions.checkArgument(bpsa != null, - "No service actor to NN at %s", nn.getServiceRpcAddress()); - - DatanodeProtocolClientSideTranslatorPB origNN = bpsa.getNameNodeProxy(); - DatanodeProtocolClientSideTranslatorPB spy = Mockito.spy(origNN); - bpsa.setNameNode(spy); - return spy; - } public static InterDatanodeProtocol createInterDatanodeProtocolProxy( DataNode dn, DatanodeID datanodeid, final Configuration conf, @@ -233,61 +175,4 @@ public static void runDirectoryScanner(DataNode dn) throws IOException { dn.getDirectoryScanner().reconcile(); } } - - /** - * Starts an instance of DataNode with NN mocked. Called should ensure to - * shutdown the DN - * - * @throws IOException - */ - public static DataNode startDNWithMockNN(Configuration conf, - final InetSocketAddress nnSocketAddr, final String dnDataDir) - throws IOException { - - FileSystem.setDefaultUri(conf, "hdfs://" + nnSocketAddr.getHostName() + ":" - + nnSocketAddr.getPort()); - ArrayList locations = new ArrayList(); - File dataDir = new File(dnDataDir); - FileUtil.fullyDelete(dataDir); - dataDir.mkdirs(); - StorageLocation location = StorageLocation.parse(dataDir.getPath()); - locations.add(location); - - final DatanodeProtocolClientSideTranslatorPB namenode = - mock(DatanodeProtocolClientSideTranslatorPB.class); - - Mockito.doAnswer(new Answer() { - @Override - public DatanodeRegistration answer(InvocationOnMock invocation) - throws Throwable { - return (DatanodeRegistration) invocation.getArguments()[0]; - } - }).when(namenode).registerDatanode(Mockito.any(DatanodeRegistration.class)); - - when(namenode.versionRequest()).thenReturn( - new NamespaceInfo(1, TEST_CLUSTER_ID, TEST_POOL_ID, 1L)); - - when( - namenode.sendHeartbeat(Mockito.any(DatanodeRegistration.class), - Mockito.any(StorageReport[].class), Mockito.anyLong(), - Mockito.anyLong(), Mockito.anyInt(), Mockito.anyInt(), - Mockito.anyInt(), Mockito.any(VolumeFailureSummary.class), - Mockito.anyBoolean())).thenReturn( - new HeartbeatResponse(new DatanodeCommand[0], new NNHAStatusHeartbeat( - HAServiceState.ACTIVE, 1), null, ThreadLocalRandom.current() - .nextLong() | 1L)); - - DataNode dn = new DataNode(conf, locations, null) { - @Override - DatanodeProtocolClientSideTranslatorPB connectToNN( - InetSocketAddress nnAddr) throws IOException { - Assert.assertEquals(nnSocketAddr, nnAddr); - return namenode; - } - }; - // Trigger a heartbeat so that it acknowledges the NN as active. - dn.getAllBpOs().get(0).triggerHeartbeatForTests(); - - return dn; - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java new file mode 100644 index 00000000000..2f400e2198b --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java @@ -0,0 +1,157 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.datanode; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.File; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.concurrent.ThreadLocalRandom; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; +import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; +import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; +import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat; +import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.hdfs.server.protocol.StorageReport; +import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary; +import org.junit.Assert; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import com.google.common.base.Preconditions; + +/** + * An internal-facing only collection of test utilities for the DataNode. This + * is to ensure that test-scope dependencies aren't inadvertently leaked + * to clients, e.g. Mockito. + */ +public class InternalDataNodeTestUtils { + + public final static String TEST_CLUSTER_ID = "testClusterID"; + public final static String TEST_POOL_ID = "BP-TEST"; + + public static DatanodeRegistration + getDNRegistrationForBP(DataNode dn, String bpid) throws IOException { + return dn.getDNRegistrationForBP(bpid); + } + + /** + * Insert a Mockito spy object between the given DataNode and + * the given NameNode. This can be used to delay or wait for + * RPC calls on the datanode->NN path. + */ + public static DatanodeProtocolClientSideTranslatorPB spyOnBposToNN( + DataNode dn, NameNode nn) { + String bpid = nn.getNamesystem().getBlockPoolId(); + + BPOfferService bpos = null; + for (BPOfferService thisBpos : dn.getAllBpOs()) { + if (thisBpos.getBlockPoolId().equals(bpid)) { + bpos = thisBpos; + break; + } + } + Preconditions.checkArgument(bpos != null, + "No such bpid: %s", bpid); + + BPServiceActor bpsa = null; + for (BPServiceActor thisBpsa : bpos.getBPServiceActors()) { + if (thisBpsa.getNNSocketAddress().equals(nn.getServiceRpcAddress())) { + bpsa = thisBpsa; + break; + } + } + Preconditions.checkArgument(bpsa != null, + "No service actor to NN at %s", nn.getServiceRpcAddress()); + + DatanodeProtocolClientSideTranslatorPB origNN = bpsa.getNameNodeProxy(); + DatanodeProtocolClientSideTranslatorPB spy = Mockito.spy(origNN); + bpsa.setNameNode(spy); + return spy; + } + + /** + * Starts an instance of DataNode with NN mocked. Called should ensure to + * shutdown the DN + * + * @throws IOException + */ + public static DataNode startDNWithMockNN(Configuration conf, + final InetSocketAddress nnSocketAddr, final String dnDataDir) + throws IOException { + + FileSystem.setDefaultUri(conf, "hdfs://" + nnSocketAddr.getHostName() + ":" + + nnSocketAddr.getPort()); + ArrayList locations = new ArrayList(); + File dataDir = new File(dnDataDir); + FileUtil.fullyDelete(dataDir); + dataDir.mkdirs(); + StorageLocation location = StorageLocation.parse(dataDir.getPath()); + locations.add(location); + + final DatanodeProtocolClientSideTranslatorPB namenode = + mock(DatanodeProtocolClientSideTranslatorPB.class); + + Mockito.doAnswer(new Answer() { + @Override + public DatanodeRegistration answer(InvocationOnMock invocation) + throws Throwable { + return (DatanodeRegistration) invocation.getArguments()[0]; + } + }).when(namenode).registerDatanode(Mockito.any(DatanodeRegistration.class)); + + when(namenode.versionRequest()).thenReturn( + new NamespaceInfo(1, TEST_CLUSTER_ID, TEST_POOL_ID, + 1L)); + + when( + namenode.sendHeartbeat(Mockito.any(DatanodeRegistration.class), + Mockito.any(StorageReport[].class), Mockito.anyLong(), + Mockito.anyLong(), Mockito.anyInt(), Mockito.anyInt(), + Mockito.anyInt(), Mockito.any(VolumeFailureSummary.class), + Mockito.anyBoolean())).thenReturn( + new HeartbeatResponse(new DatanodeCommand[0], new NNHAStatusHeartbeat( + HAServiceState.ACTIVE, 1), null, ThreadLocalRandom.current() + .nextLong() | 1L)); + + DataNode dn = new DataNode(conf, locations, null) { + @Override + DatanodeProtocolClientSideTranslatorPB connectToNN( + InetSocketAddress nnAddr) throws IOException { + Assert.assertEquals(nnSocketAddr, nnAddr); + return namenode; + } + }; + // Trigger a heartbeat so that it acknowledges the NN as active. + dn.getAllBpOs().get(0).triggerHeartbeatForTests(); + + return dn; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java index 83153929b9f..659806beb7d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java @@ -822,7 +822,7 @@ public void testFullBlockReportAfterRemovingVolumes() final DataNode dn = cluster.getDataNodes().get(0); DatanodeProtocolClientSideTranslatorPB spy = - DataNodeTestUtils.spyOnBposToNN(dn, cluster.getNameNode()); + InternalDataNodeTestUtils.spyOnBposToNN(dn, cluster.getNameNode()); // Remove a data dir from datanode File dataDirToKeep = new File(cluster.getDataDirectory(), "data1"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java index 1177a457fae..32fda370161 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java @@ -89,7 +89,7 @@ public void startDNForTest(boolean enableMetricsLogging) throws IOException { conf.setInt(DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_KEY, enableMetricsLogging ? 1 : 0); // If enabled, log early and log often - dn = DataNodeTestUtils.startDNWithMockNN(conf, NN_ADDR, DATA_DIR); + dn = InternalDataNodeTestUtils.startDNWithMockNN(conf, NN_ADDR, DATA_DIR); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java index 249b5c69cbb..1dfd3c31db6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java @@ -102,7 +102,7 @@ public DataNode[] createDNsForTest(int numDateNode) throws IOException { DataNode[] result = new DataNode[numDateNode]; for (int i = 0; i < numDateNode; i++) { - result[i] = DataNodeTestUtils.startDNWithMockNN(conf, NN_ADDR, DATA_DIR); + result[i] = InternalDataNodeTestUtils.startDNWithMockNN(conf, NN_ADDR, DATA_DIR); } return result; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java index badd59353c5..bf0e3c11bdd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java @@ -122,7 +122,7 @@ public void testAlwaysSplit() throws IOException, InterruptedException { // Insert a spy object for the NN RPC. DatanodeProtocolClientSideTranslatorPB nnSpy = - DataNodeTestUtils.spyOnBposToNN(dn, nn); + InternalDataNodeTestUtils.spyOnBposToNN(dn, nn); // Trigger a block report so there is an interaction with the spy // object. @@ -154,7 +154,7 @@ public void testCornerCaseUnderThreshold() throws IOException, InterruptedExcept // Insert a spy object for the NN RPC. DatanodeProtocolClientSideTranslatorPB nnSpy = - DataNodeTestUtils.spyOnBposToNN(dn, nn); + InternalDataNodeTestUtils.spyOnBposToNN(dn, nn); // Trigger a block report so there is an interaction with the spy // object. @@ -186,7 +186,7 @@ public void testCornerCaseAtThreshold() throws IOException, InterruptedException // Insert a spy object for the NN RPC. DatanodeProtocolClientSideTranslatorPB nnSpy = - DataNodeTestUtils.spyOnBposToNN(dn, nn); + InternalDataNodeTestUtils.spyOnBposToNN(dn, nn); // Trigger a block report so there is an interaction with the spy // object. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java index 10d974c0c9a..6557055f783 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java @@ -140,7 +140,7 @@ public void setUp() throws Exception { dn = cluster.getDataNodes().get(0); fsd = dn.getFSDataset(); - spyNN = DataNodeTestUtils.spyOnBposToNN(dn, nn); + spyNN = InternalDataNodeTestUtils.spyOnBposToNN(dn, nn); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java index 228897418e9..03553fed4f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java @@ -106,7 +106,7 @@ private void injectBlockDeleted() { * @return spy object that can be used for Mockito verification. */ DatanodeProtocolClientSideTranslatorPB spyOnDnCallsToNn() { - return DataNodeTestUtils.spyOnBposToNN(singletonDn, singletonNn); + return InternalDataNodeTestUtils.spyOnBposToNN(singletonDn, singletonNn); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java index a6032c1ce2a..d8418d46b6a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java @@ -92,7 +92,7 @@ public void testStorageReportHasStorageTypeAndState() throws IOException { // Insert a spy object for the NN RPC. DatanodeProtocolClientSideTranslatorPB nnSpy = - DataNodeTestUtils.spyOnBposToNN(dn, nn); + InternalDataNodeTestUtils.spyOnBposToNN(dn, nn); // Trigger a heartbeat so there is an interaction with the spy // object. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTriggerBlockReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTriggerBlockReport.java index 83ebeebb650..f7c9911218a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTriggerBlockReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTriggerBlockReport.java @@ -63,7 +63,7 @@ private void testTriggerBlockReport(boolean incremental) throws Exception { cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); DatanodeProtocolClientSideTranslatorPB spy = - DataNodeTestUtils.spyOnBposToNN( + InternalDataNodeTestUtils.spyOnBposToNN( cluster.getDataNodes().get(0), cluster.getNameNode()); DFSTestUtil.createFile(fs, new Path("/abc"), 16, (short) 1, 1L); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java index 04f0b91a2af..341a14978bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils; import org.apache.hadoop.hdfs.server.protocol.BlockReportContext; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; @@ -86,9 +86,9 @@ public void testDeadDatanode() throws Exception { String poolId = cluster.getNamesystem().getBlockPoolId(); // wait for datanode to be marked live DataNode dn = cluster.getDataNodes().get(0); - DatanodeRegistration reg = - DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId); - + DatanodeRegistration reg = InternalDataNodeTestUtils. + getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId); + DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), true, 20000); // Shutdown and wait for datanode to be marked dead @@ -147,8 +147,8 @@ public void testDeadNodeAsBlockTarget() throws Exception { String poolId = cluster.getNamesystem().getBlockPoolId(); // wait for datanode to be marked live DataNode dn = cluster.getDataNodes().get(0); - DatanodeRegistration reg = DataNodeTestUtils.getDNRegistrationForBP(cluster - .getDataNodes().get(0), poolId); + DatanodeRegistration reg = InternalDataNodeTestUtils. + getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId); // Get the updated datanode descriptor BlockManager bm = cluster.getNamesystem().getBlockManager(); DatanodeManager dm = bm.getDatanodeManager(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java index 15f697a9a06..133a18e72d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java @@ -48,7 +48,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.Node; @@ -299,7 +299,7 @@ private void testDeleteAndCommitBlockSynchronizationRace(boolean hasSnapshot) DataNode primaryDN = cluster.getDataNode(expectedPrimary.getIpcPort()); DatanodeProtocolClientSideTranslatorPB nnSpy = dnMap.get(primaryDN); if (nnSpy == null) { - nnSpy = DataNodeTestUtils.spyOnBposToNN(primaryDN, nn); + nnSpy = InternalDataNodeTestUtils.spyOnBposToNN(primaryDN, nn); dnMap.put(primaryDN, nnSpy); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java index 3b3c35fb5fe..15357b05c6d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java @@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.protocol.BlockReportContext; @@ -539,8 +540,8 @@ protected Object passThrough(InvocationOnMock invocation) DataNode dn = cluster.getDataNodes().get(0); DatanodeProtocolClientSideTranslatorPB spy = - DataNodeTestUtils.spyOnBposToNN(dn, nn2); - + InternalDataNodeTestUtils.spyOnBposToNN(dn, nn2); + Mockito.doAnswer(delayer) .when(spy).blockReport( Mockito.anyObject(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java index 9ece12173f8..4fc5029146d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java @@ -46,7 +46,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.io.IOUtils; @@ -358,8 +358,8 @@ public void testFailoverRightBeforeCommitSynchronization() throws Exception { // active. DataNode primaryDN = cluster.getDataNode(expectedPrimary.getIpcPort()); DatanodeProtocolClientSideTranslatorPB nnSpy = - DataNodeTestUtils.spyOnBposToNN(primaryDN, nn0); - + InternalDataNodeTestUtils.spyOnBposToNN(primaryDN, nn0); + // Delay the commitBlockSynchronization call DelayAnswer delayer = new DelayAnswer(LOG); Mockito.doAnswer(delayer).when(nnSpy).commitBlockSynchronization(