HDFS-9363. Add fetchReplica to FsDatasetTestUtils to return FsDataset-agnostic replica. (Tony Wu via lei)

This commit is contained in:
Lei Xu 2015-11-04 10:46:19 -08:00
parent 0fb1867fd6
commit 5667129276
5 changed files with 20 additions and 6 deletions

View File

@ -1623,6 +1623,9 @@ Release 2.8.0 - UNRELEASED
HDFS-9331. Modify TestNameNodeMXBean#testNameNodeMXBeanInfo() to account for HDFS-9331. Modify TestNameNodeMXBean#testNameNodeMXBeanInfo() to account for
filesystem entirely allocated for DFS use. (Tony Wu via lei) filesystem entirely allocated for DFS use. (Tony Wu via lei)
HDFS-9363. Add fetchReplica() to FsDatasetTestUtils to return FsDataset-agnostic
replica. (Tony Wu via lei)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

View File

@ -33,7 +33,6 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.Replica; import org.apache.hadoop.hdfs.server.datanode.Replica;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level; import org.apache.log4j.Level;
@ -102,10 +101,9 @@ public class TestPipelines {
List<LocatedBlock> lb = cluster.getNameNodeRpc().getBlockLocations( List<LocatedBlock> lb = cluster.getNameNodeRpc().getBlockLocations(
filePath.toString(), FILE_SIZE - 1, FILE_SIZE).getLocatedBlocks(); filePath.toString(), FILE_SIZE - 1, FILE_SIZE).getLocatedBlocks();
String bpid = cluster.getNamesystem().getBlockPoolId();
for (DataNode dn : cluster.getDataNodes()) { for (DataNode dn : cluster.getDataNodes()) {
Replica r = DataNodeTestUtils.fetchReplicaInfo(dn, bpid, lb.get(0) Replica r =
.getBlock().getBlockId()); cluster.getFsDatasetTestUtils(dn).fetchReplica(lb.get(0).getBlock());
assertTrue("Replica on DN " + dn + " shouldn't be null", r != null); assertTrue("Replica on DN " + dn + " shouldn't be null", r != null);
assertEquals("Should be RBW replica on " + dn assertEquals("Should be RBW replica on " + dn

View File

@ -206,4 +206,11 @@ public interface FsDatasetTestUtils {
* @throws IOException on I/O error. * @throws IOException on I/O error.
*/ */
void injectCorruptReplica(ExtendedBlock block) throws IOException; void injectCorruptReplica(ExtendedBlock block) throws IOException;
/**
* Get the replica of a block. Returns null if it does not exist.
* @param block the block whose replica will be returned.
* @return Replica for the block.
*/
Replica fetchReplica(ExtendedBlock block);
} }

View File

@ -317,4 +317,9 @@ public class FsDatasetImplTestUtils implements FsDatasetTestUtils {
} }
} }
} }
@Override
public Replica fetchReplica(ExtendedBlock block) {
return dataset.fetchReplicaInfo(block.getBlockPoolId(), block.getBlockId());
}
} }

View File

@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
import org.apache.hadoop.hdfs.server.datanode.Replica;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
import org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery; import org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
@ -354,8 +355,8 @@ public class TestInterDatanodeProtocol {
new RecoveringBlock(b, null, recoveryid)); new RecoveringBlock(b, null, recoveryid));
//check replica //check replica
final ReplicaInfo replica = FsDatasetTestUtil.fetchReplicaInfo( final Replica replica =
fsdataset, bpid, b.getBlockId()); cluster.getFsDatasetTestUtils(datanode).fetchReplica(b);
Assert.assertEquals(ReplicaState.RUR, replica.getState()); Assert.assertEquals(ReplicaState.RUR, replica.getState());
//check meta data before update //check meta data before update