HDFS-14266. EC : Fsck -blockId shows null for EC Blocks if One Block Is Not Available. Contributed by Ayush Saxena.
This commit is contained in:
parent
f3c1e456b0
commit
1f3e737407
|
@ -302,16 +302,40 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
|
||||||
if (blockManager.getCorruptReplicas(block) != null) {
|
if (blockManager.getCorruptReplicas(block) != null) {
|
||||||
corruptionRecord = blockManager.getCorruptReplicas(block);
|
corruptionRecord = blockManager.getCorruptReplicas(block);
|
||||||
}
|
}
|
||||||
|
// report block replicas status on datanodes
|
||||||
//report block replicas status on datanodes
|
if (blockInfo.isStriped()) {
|
||||||
for(int idx = (blockInfo.numNodes()-1); idx >= 0; idx--) {
|
for (int idx = (blockInfo.getCapacity() - 1); idx >= 0; idx--) {
|
||||||
DatanodeDescriptor dn = blockInfo.getDatanode(idx);
|
DatanodeDescriptor dn = blockInfo.getDatanode(idx);
|
||||||
|
if (dn == null) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
printDatanodeReplicaStatus(block, corruptionRecord, dn);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for (int idx = (blockInfo.numNodes() - 1); idx >= 0; idx--) {
|
||||||
|
DatanodeDescriptor dn = blockInfo.getDatanode(idx);
|
||||||
|
printDatanodeReplicaStatus(block, corruptionRecord, dn);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
String errMsg = "Fsck on blockId '" + blockId;
|
||||||
|
LOG.warn(errMsg, e);
|
||||||
|
out.println(e.getMessage());
|
||||||
|
out.print("\n\n" + errMsg);
|
||||||
|
LOG.warn("Error in looking up block", e);
|
||||||
|
} finally {
|
||||||
|
namenode.getNamesystem().readUnlock("fsck");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void printDatanodeReplicaStatus(Block block,
|
||||||
|
Collection<DatanodeDescriptor> corruptionRecord, DatanodeDescriptor dn) {
|
||||||
out.print("Block replica on datanode/rack: " + dn.getHostName() +
|
out.print("Block replica on datanode/rack: " + dn.getHostName() +
|
||||||
dn.getNetworkLocation() + " ");
|
dn.getNetworkLocation() + " ");
|
||||||
if (corruptionRecord != null && corruptionRecord.contains(dn)) {
|
if (corruptionRecord != null && corruptionRecord.contains(dn)) {
|
||||||
out.print(CORRUPT_STATUS + "\t ReasonCode: " +
|
out.print(CORRUPT_STATUS + "\t ReasonCode: " +
|
||||||
blockManager.getCorruptReason(block, dn));
|
blockManager.getCorruptReason(block, dn));
|
||||||
} else if (dn.isDecommissioned() ){
|
} else if (dn.isDecommissioned()){
|
||||||
out.print(DECOMMISSIONED_STATUS);
|
out.print(DECOMMISSIONED_STATUS);
|
||||||
} else if (dn.isDecommissionInProgress()) {
|
} else if (dn.isDecommissionInProgress()) {
|
||||||
out.print(DECOMMISSIONING_STATUS);
|
out.print(DECOMMISSIONING_STATUS);
|
||||||
|
@ -324,16 +348,6 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
|
||||||
}
|
}
|
||||||
out.print("\n");
|
out.print("\n");
|
||||||
}
|
}
|
||||||
} catch (Exception e){
|
|
||||||
String errMsg = "Fsck on blockId '" + blockId;
|
|
||||||
LOG.warn(errMsg, e);
|
|
||||||
out.println(e.getMessage());
|
|
||||||
out.print("\n\n" + errMsg);
|
|
||||||
LOG.warn("Error in looking up block", e);
|
|
||||||
} finally {
|
|
||||||
namenode.getNamesystem().readUnlock("fsck");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check files on DFS, starting from the indicated path.
|
* Check files on DFS, starting from the indicated path.
|
||||||
|
|
|
@ -17,10 +17,18 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.blockmanagement;
|
package org.apache.hadoop.hdfs.server.blockmanagement;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.tools.DFSck;
|
||||||
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
|
import org.apache.hadoop.util.ToolRunner;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Rule;
|
import org.junit.Rule;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -31,12 +39,15 @@ import org.mockito.internal.util.reflection.Whitebox;
|
||||||
|
|
||||||
import java.io.DataOutput;
|
import java.io.DataOutput;
|
||||||
import java.io.DataOutputStream;
|
import java.io.DataOutputStream;
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.PrintStream;
|
||||||
import java.io.ByteArrayOutputStream;
|
import java.io.ByteArrayOutputStream;
|
||||||
import java.nio.ByteBuffer;
|
import java.nio.ByteBuffer;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
|
||||||
import static org.junit.Assert.assertArrayEquals;
|
import static org.junit.Assert.assertArrayEquals;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertFalse;
|
||||||
import static org.junit.Assert.fail;
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -212,6 +223,40 @@ public class TestBlockInfoStriped {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testGetBlockInfo() throws IllegalArgumentException, Exception {
|
||||||
|
int dataBlocks = testECPolicy.getNumDataUnits();
|
||||||
|
int parityBlocks = testECPolicy.getNumParityUnits();
|
||||||
|
int totalSize = dataBlocks + parityBlocks;
|
||||||
|
File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
try (MiniDFSCluster cluster =
|
||||||
|
new MiniDFSCluster.Builder(conf, builderBaseDir).numDataNodes(totalSize)
|
||||||
|
.build()) {
|
||||||
|
DistributedFileSystem fs = cluster.getFileSystem();
|
||||||
|
fs.enableErasureCodingPolicy(
|
||||||
|
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||||
|
fs.enableErasureCodingPolicy(testECPolicy.getName());
|
||||||
|
fs.mkdirs(new Path("/ecDir"));
|
||||||
|
fs.setErasureCodingPolicy(new Path("/ecDir"), testECPolicy.getName());
|
||||||
|
DFSTestUtil.createFile(fs, new Path("/ecDir/ecFile"),
|
||||||
|
fs.getDefaultBlockSize() * dataBlocks, (short) 1, 1024);
|
||||||
|
ExtendedBlock blk = DFSTestUtil
|
||||||
|
.getAllBlocks(fs, new Path("/ecDir/ecFile")).get(0).getBlock();
|
||||||
|
String id = "blk_" + Long.toString(blk.getBlockId());
|
||||||
|
BlockInfo bInfo = cluster.getNameNode().getNamesystem().getBlockManager()
|
||||||
|
.getStoredBlock(blk.getLocalBlock());
|
||||||
|
DatanodeStorageInfo[] dnStorageInfo = cluster.getNameNode()
|
||||||
|
.getNamesystem().getBlockManager().getStorages(bInfo);
|
||||||
|
bInfo.removeStorage(dnStorageInfo[1]);
|
||||||
|
ByteArrayOutputStream bStream = new ByteArrayOutputStream();
|
||||||
|
PrintStream out = new PrintStream(bStream, true);
|
||||||
|
assertEquals(0, ToolRunner.run(new DFSck(conf, out), new String[] {
|
||||||
|
new Path("/ecDir/ecFile").toString(), "-blockId", id }));
|
||||||
|
assertFalse(out.toString().contains("null"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testWrite() {
|
public void testWrite() {
|
||||||
long blkID = 1;
|
long blkID = 1;
|
||||||
|
|
Loading…
Reference in New Issue