HDFS-14266. EC : Fsck -blockId shows null for EC Blocks if One Block Is Not Available. Contributed by Ayush Saxena.

This commit is contained in:
Vinayakumar B 2019-02-12 21:57:57 +05:30
parent f3c1e456b0
commit 1f3e737407
2 changed files with 80 additions and 21 deletions

View File

@ -302,29 +302,22 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
if (blockManager.getCorruptReplicas(block) != null) {
corruptionRecord = blockManager.getCorruptReplicas(block);
}
//report block replicas status on datanodes
for(int idx = (blockInfo.numNodes()-1); idx >= 0; idx--) {
DatanodeDescriptor dn = blockInfo.getDatanode(idx);
out.print("Block replica on datanode/rack: " + dn.getHostName() +
dn.getNetworkLocation() + " ");
if (corruptionRecord != null && corruptionRecord.contains(dn)) {
out.print(CORRUPT_STATUS + "\t ReasonCode: " +
blockManager.getCorruptReason(block, dn));
} else if (dn.isDecommissioned() ){
out.print(DECOMMISSIONED_STATUS);
} else if (dn.isDecommissionInProgress()) {
out.print(DECOMMISSIONING_STATUS);
} else if (this.showMaintenanceState && dn.isEnteringMaintenance()) {
out.print(ENTERING_MAINTENANCE_STATUS);
} else if (this.showMaintenanceState && dn.isInMaintenance()) {
out.print(IN_MAINTENANCE_STATUS);
} else {
out.print(HEALTHY_STATUS);
// report block replicas status on datanodes
if (blockInfo.isStriped()) {
for (int idx = (blockInfo.getCapacity() - 1); idx >= 0; idx--) {
DatanodeDescriptor dn = blockInfo.getDatanode(idx);
if (dn == null) {
continue;
}
printDatanodeReplicaStatus(block, corruptionRecord, dn);
}
} else {
for (int idx = (blockInfo.numNodes() - 1); idx >= 0; idx--) {
DatanodeDescriptor dn = blockInfo.getDatanode(idx);
printDatanodeReplicaStatus(block, corruptionRecord, dn);
}
out.print("\n");
}
} catch (Exception e){
} catch (Exception e) {
String errMsg = "Fsck on blockId '" + blockId;
LOG.warn(errMsg, e);
out.println(e.getMessage());
@ -335,6 +328,27 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
}
}
private void printDatanodeReplicaStatus(Block block,
Collection<DatanodeDescriptor> corruptionRecord, DatanodeDescriptor dn) {
out.print("Block replica on datanode/rack: " + dn.getHostName() +
dn.getNetworkLocation() + " ");
if (corruptionRecord != null && corruptionRecord.contains(dn)) {
out.print(CORRUPT_STATUS + "\t ReasonCode: " +
blockManager.getCorruptReason(block, dn));
} else if (dn.isDecommissioned()){
out.print(DECOMMISSIONED_STATUS);
} else if (dn.isDecommissionInProgress()) {
out.print(DECOMMISSIONING_STATUS);
} else if (this.showMaintenanceState && dn.isEnteringMaintenance()) {
out.print(ENTERING_MAINTENANCE_STATUS);
} else if (this.showMaintenanceState && dn.isInMaintenance()) {
out.print(IN_MAINTENANCE_STATUS);
} else {
out.print(HEALTHY_STATUS);
}
out.print("\n");
}
/**
* Check files on DFS, starting from the indicated path.
*/

View File

@ -17,10 +17,18 @@
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.tools.DFSck;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.ToolRunner;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
@ -31,12 +39,15 @@ import org.mockito.internal.util.reflection.Whitebox;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.File;
import java.io.PrintStream;
import java.io.ByteArrayOutputStream;
import java.nio.ByteBuffer;
import java.util.Collection;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.fail;
/**
@ -212,6 +223,40 @@ public class TestBlockInfoStriped {
}
}
@Test
public void testGetBlockInfo() throws IllegalArgumentException, Exception {
int dataBlocks = testECPolicy.getNumDataUnits();
int parityBlocks = testECPolicy.getNumParityUnits();
int totalSize = dataBlocks + parityBlocks;
File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
Configuration conf = new Configuration();
try (MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf, builderBaseDir).numDataNodes(totalSize)
.build()) {
DistributedFileSystem fs = cluster.getFileSystem();
fs.enableErasureCodingPolicy(
StripedFileTestUtil.getDefaultECPolicy().getName());
fs.enableErasureCodingPolicy(testECPolicy.getName());
fs.mkdirs(new Path("/ecDir"));
fs.setErasureCodingPolicy(new Path("/ecDir"), testECPolicy.getName());
DFSTestUtil.createFile(fs, new Path("/ecDir/ecFile"),
fs.getDefaultBlockSize() * dataBlocks, (short) 1, 1024);
ExtendedBlock blk = DFSTestUtil
.getAllBlocks(fs, new Path("/ecDir/ecFile")).get(0).getBlock();
String id = "blk_" + Long.toString(blk.getBlockId());
BlockInfo bInfo = cluster.getNameNode().getNamesystem().getBlockManager()
.getStoredBlock(blk.getLocalBlock());
DatanodeStorageInfo[] dnStorageInfo = cluster.getNameNode()
.getNamesystem().getBlockManager().getStorages(bInfo);
bInfo.removeStorage(dnStorageInfo[1]);
ByteArrayOutputStream bStream = new ByteArrayOutputStream();
PrintStream out = new PrintStream(bStream, true);
assertEquals(0, ToolRunner.run(new DFSck(conf, out), new String[] {
new Path("/ecDir/ecFile").toString(), "-blockId", id }));
assertFalse(out.toString().contains("null"));
}
}
@Test
public void testWrite() {
long blkID = 1;