HDFS-12599. Remove Mockito dependency from DataNodeTestUtils. Contributed by Ted Yu.

This commit is contained in:
Arpit Agarwal 2017-10-09 10:29:01 -07:00
parent 46644319e1
commit 09ad848b64
4 changed files with 36 additions and 31 deletions

View File

@ -29,7 +29,6 @@ import org.apache.hadoop.conf.ReconfigurationException;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
@ -37,16 +36,16 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.apache.hadoop.test.GenericTestUtils;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import com.google.common.base.Supplier;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.doAnswer;
/**
* DO NOT ADD MOCKITO IMPORTS HERE Or Downstream projects may not
* be able to start mini dfs cluster. THANKS.
*/
/**
* Utility class for accessing package-private DataNode information during tests.
@ -196,27 +195,6 @@ public class DataNodeTestUtils {
}
}
/**
* This method is used to mock the data node block pinning API.
*
* @param dn datanode
* @param pinned true if the block is pinned, false otherwise
* @throws IOException
*/
public static void mockDatanodeBlkPinning(final DataNode dn,
final boolean pinned) throws IOException {
final FsDatasetSpi<? extends FsVolumeSpi> data = dn.data;
dn.data = Mockito.spy(data);
doAnswer(new Answer<Object>() {
public Object answer(InvocationOnMock invocation) throws IOException {
// Bypass the argument to FsDatasetImpl#getPinning to show that
// the block is pinned.
return pinned;
}
}).when(dn.data).getPinning(any(ExtendedBlock.class));
}
/**
* Reconfigure a DataNode by setting a new list of volumes.
*

View File

@ -32,7 +32,10 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@ -49,6 +52,9 @@ import org.mockito.stubbing.Answer;
import com.google.common.base.Preconditions;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.doAnswer;
/**
* An internal-facing only collection of test utilities for the DataNode. This
* is to ensure that test-scope dependencies aren't inadvertently leaked
@ -64,6 +70,27 @@ public class InternalDataNodeTestUtils {
return dn.getDNRegistrationForBP(bpid);
}
/**
* This method is used to mock the data node block pinning API.
*
* @param dn datanode
* @param pinned true if the block is pinned, false otherwise
* @throws IOException
*/
public static void mockDatanodeBlkPinning(final DataNode dn,
final boolean pinned) throws IOException {
final FsDatasetSpi<? extends FsVolumeSpi> data = dn.data;
dn.data = Mockito.spy(data);
doAnswer(new Answer<Object>() {
public Object answer(InvocationOnMock invocation) throws IOException {
// Bypass the argument to FsDatasetImpl#getPinning to show that
// the block is pinned.
return pinned;
}
}).when(dn.data).getPinning(any(ExtendedBlock.class));
}
/**
* Insert a Mockito spy object between the given DataNode and
* the given NameNode. This can be used to delay or wait for

View File

@ -252,7 +252,7 @@ public class TestBlockReplacement {
for (int i = 0; i < cluster.getDataNodes().size(); i++) {
DataNode dn = cluster.getDataNodes().get(i);
LOG.info("Simulate block pinning in datanode " + dn);
DataNodeTestUtils.mockDatanodeBlkPinning(dn, true);
InternalDataNodeTestUtils.mockDatanodeBlkPinning(dn, true);
}
// Block movement to a different datanode should fail as the block is

View File

@ -76,7 +76,7 @@ import org.apache.hadoop.hdfs.server.balancer.ExitStatus;
import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector;
import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
import org.apache.hadoop.hdfs.server.mover.Mover.MLocation;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.http.HttpConfig;
@ -758,7 +758,7 @@ public class TestMover {
for (int i = 0; i < cluster.getDataNodes().size(); i++) {
DataNode dn = cluster.getDataNodes().get(i);
LOG.info("Simulate block pinning in datanode {}", dn);
DataNodeTestUtils.mockDatanodeBlkPinning(dn, true);
InternalDataNodeTestUtils.mockDatanodeBlkPinning(dn, true);
}
// move file blocks to ONE_SSD policy
@ -896,7 +896,7 @@ public class TestMover {
if (dn.getDatanodeId().getDatanodeUuid()
.equals(datanodeInfo.getDatanodeUuid())) {
LOG.info("Simulate block pinning in datanode {}", datanodeInfo);
DataNodeTestUtils.mockDatanodeBlkPinning(dn, true);
InternalDataNodeTestUtils.mockDatanodeBlkPinning(dn, true);
break;
}
}