HDFS-9363. Add fetchReplica to FsDatasetTestUtils to return FsDataset-agnostic replica. (Tony Wu via lei)
(cherry picked from commit 4976a02841592b61f2d2b40d2aadd5146571322c)
This commit is contained in:
parent
bb3b2977cd
commit
7a2a17cb5f
|
@ -776,6 +776,9 @@ Release 2.8.0 - UNRELEASED
|
|||
HDFS-9331. Modify TestNameNodeMXBean#testNameNodeMXBeanInfo() to account for
|
||||
filesystem entirely allocated for DFS use. (Tony Wu via lei)
|
||||
|
||||
HDFS-9363. Add fetchReplica() to FsDatasetTestUtils to return FsDataset-agnostic
|
||||
replica. (Tony Wu via lei)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
|||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||
import org.apache.hadoop.hdfs.server.datanode.Replica;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.log4j.Level;
|
||||
|
@ -102,10 +101,9 @@ public class TestPipelines {
|
|||
List<LocatedBlock> lb = cluster.getNameNodeRpc().getBlockLocations(
|
||||
filePath.toString(), FILE_SIZE - 1, FILE_SIZE).getLocatedBlocks();
|
||||
|
||||
String bpid = cluster.getNamesystem().getBlockPoolId();
|
||||
for (DataNode dn : cluster.getDataNodes()) {
|
||||
Replica r = DataNodeTestUtils.fetchReplicaInfo(dn, bpid, lb.get(0)
|
||||
.getBlock().getBlockId());
|
||||
Replica r =
|
||||
cluster.getFsDatasetTestUtils(dn).fetchReplica(lb.get(0).getBlock());
|
||||
|
||||
assertTrue("Replica on DN " + dn + " shouldn't be null", r != null);
|
||||
assertEquals("Should be RBW replica on " + dn
|
||||
|
|
|
@ -206,4 +206,11 @@ public interface FsDatasetTestUtils {
|
|||
* @throws IOException on I/O error.
|
||||
*/
|
||||
void injectCorruptReplica(ExtendedBlock block) throws IOException;
|
||||
|
||||
/**
|
||||
* Get the replica of a block. Returns null if it does not exist.
|
||||
* @param block the block whose replica will be returned.
|
||||
* @return Replica for the block.
|
||||
*/
|
||||
Replica fetchReplica(ExtendedBlock block);
|
||||
}
|
||||
|
|
|
@ -317,4 +317,9 @@ public class FsDatasetImplTestUtils implements FsDatasetTestUtils {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Replica fetchReplica(ExtendedBlock block) {
|
||||
return dataset.fetchReplicaInfo(block.getBlockPoolId(), block.getBlockId());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
|||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
|
||||
import org.apache.hadoop.hdfs.server.datanode.Replica;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||
|
@ -354,8 +355,8 @@ public class TestInterDatanodeProtocol {
|
|||
new RecoveringBlock(b, null, recoveryid));
|
||||
|
||||
//check replica
|
||||
final ReplicaInfo replica = FsDatasetTestUtil.fetchReplicaInfo(
|
||||
fsdataset, bpid, b.getBlockId());
|
||||
final Replica replica =
|
||||
cluster.getFsDatasetTestUtils(datanode).fetchReplica(b);
|
||||
Assert.assertEquals(ReplicaState.RUR, replica.getState());
|
||||
|
||||
//check meta data before update
|
||||
|
|
Loading…
Reference in New Issue