HDFS-9363. Add fetchReplica to FsDatasetTestUtils to return FsDataset-agnostic replica. (Tony Wu via lei)

This commit is contained in:
Lei Xu 2015-11-04 10:46:19 -08:00
parent 0fb1867fd6
commit 5667129276
5 changed files with 20 additions and 6 deletions

View File

@ -1623,6 +1623,9 @@ Release 2.8.0 - UNRELEASED
HDFS-9331. Modify TestNameNodeMXBean#testNameNodeMXBeanInfo() to account for
filesystem entirely allocated for DFS use. (Tony Wu via lei)
HDFS-9363. Add fetchReplica() to FsDatasetTestUtils to return FsDataset-agnostic
replica. (Tony Wu via lei)
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

View File

@ -33,7 +33,6 @@
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.Replica;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
@ -102,10 +101,9 @@ public void pipeline_01() throws IOException {
List<LocatedBlock> lb = cluster.getNameNodeRpc().getBlockLocations(
filePath.toString(), FILE_SIZE - 1, FILE_SIZE).getLocatedBlocks();
String bpid = cluster.getNamesystem().getBlockPoolId();
for (DataNode dn : cluster.getDataNodes()) {
Replica r = DataNodeTestUtils.fetchReplicaInfo(dn, bpid, lb.get(0)
.getBlock().getBlockId());
Replica r =
cluster.getFsDatasetTestUtils(dn).fetchReplica(lb.get(0).getBlock());
assertTrue("Replica on DN " + dn + " shouldn't be null", r != null);
assertEquals("Should be RBW replica on " + dn

View File

@ -206,4 +206,11 @@ Replica createReplicaUnderRecovery(ExtendedBlock block, long recoveryId)
* @throws IOException on I/O error.
*/
void injectCorruptReplica(ExtendedBlock block) throws IOException;
/**
* Get the replica of a block. Returns null if it does not exist.
* @param block the block whose replica will be returned.
* @return Replica for the block.
*/
Replica fetchReplica(ExtendedBlock block);
}

View File

@ -317,4 +317,9 @@ public void injectCorruptReplica(ExtendedBlock block) throws IOException {
}
}
}
@Override
public Replica fetchReplica(ExtendedBlock block) {
return dataset.fetchReplicaInfo(block.getBlockPoolId(), block.getBlockId());
}
}

View File

@ -46,6 +46,7 @@
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
import org.apache.hadoop.hdfs.server.datanode.Replica;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
import org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
@ -354,8 +355,8 @@ public void testUpdateReplicaUnderRecovery() throws IOException {
new RecoveringBlock(b, null, recoveryid));
//check replica
final ReplicaInfo replica = FsDatasetTestUtil.fetchReplicaInfo(
fsdataset, bpid, b.getBlockId());
final Replica replica =
cluster.getFsDatasetTestUtils(datanode).fetchReplica(b);
Assert.assertEquals(ReplicaState.RUR, replica.getState());
//check meta data before update