HDFS-10976. Report erasure coding policy of EC files in Fsck. Contributed by Wei-Chiu Chuang.

This commit is contained in:
Wei-Chiu Chuang 2016-10-20 13:02:16 -07:00
parent 3fbf4cd5da
commit 5e83a21cb6
2 changed files with 25 additions and 3 deletions

View File

@ -55,6 +55,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@ -540,11 +541,20 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
res.totalFiles++;
res.totalSize += fileLen;
res.totalBlocks += blocks.locatedBlockCount();
String redundancyPolicy;
ErasureCodingPolicy ecPolicy = file.getErasureCodingPolicy();
if (ecPolicy == null) { // a replicated file
redundancyPolicy = "replicated: replication=" +
file.getReplication() + ",";
} else {
redundancyPolicy = "erasure-coded: policy=" + ecPolicy.getName() + ",";
}
if (showOpenFiles && isOpen) {
out.print(path + " " + fileLen + " bytes, " +
out.print(path + " " + fileLen + " bytes, " + redundancyPolicy + " " +
blocks.locatedBlockCount() + " block(s), OPENFORWRITE: ");
} else if (showFiles) {
out.print(path + " " + fileLen + " bytes, " +
out.print(path + " " + fileLen + " bytes, " + redundancyPolicy + " " +
blocks.locatedBlockCount() + " block(s): ");
} else if (showprogress) {
out.print('.');

View File

@ -1700,9 +1700,21 @@ public class TestFsck {
// restart the cluster; bring up namenode but not the data nodes
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0).format(false).build();
outStr = runFsck(conf, 1, true, "/");
outStr = runFsck(conf, 1, true, "/", "-files", "-blocks");
// expect the result is corrupt
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
String[] outLines = outStr.split("\\r?\\n");
for (String line: outLines) {
if (line.contains(largeFilePath.toString())) {
final HdfsFileStatus file = cluster.getNameNode().getRpcServer().
getFileInfo(largeFilePath.toString());
assertTrue(line.contains("policy=" +
file.getErasureCodingPolicy().getName()));
} else if (line.contains(replFilePath.toString())) {
assertTrue(line.contains("replication=" + cluster.getFileSystem().
getFileStatus(replFilePath).getReplication()));
}
}
System.out.println(outStr);
}