HDFS-8585. Erasure Coding: Remove dataBlockNum and parityBlockNum from StripedBlockProto. Contributed by Yi Liu.

This commit is contained in:
Jing Zhao 2015-06-12 14:48:53 -07:00
parent 98d340745b
commit 683332b36d
7 changed files with 23 additions and 65 deletions

View File

@ -296,3 +296,6 @@
HDFS-8450. Erasure Coding: Consolidate erasure coding zone related HDFS-8450. Erasure Coding: Consolidate erasure coding zone related
implementation into a single class (Rakesh R via vinayakumarb) implementation into a single class (Rakesh R via vinayakumarb)
HDFS-8585. Erasure Coding: Remove dataBlockNum and parityBlockNum from
StripedBlockProto. (Yi Liu via jing9)

View File

@ -183,7 +183,6 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StripedBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto; import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsResponseProto;
@ -195,7 +194,6 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
@ -444,20 +442,6 @@ public class PBHelper {
return new Block(b.getBlockId(), b.getNumBytes(), b.getGenStamp()); return new Block(b.getBlockId(), b.getNumBytes(), b.getGenStamp());
} }
public static BlockInfoStriped convert(StripedBlockProto p, ECSchema schema) {
return new BlockInfoStriped(convert(p.getBlock()), schema);
}
public static StripedBlockProto convert(BlockInfoStriped blk) {
BlockProto bp = BlockProto.newBuilder().setBlockId(blk.getBlockId())
.setGenStamp(blk.getGenerationStamp()).setNumBytes(blk.getNumBytes())
.build();
return StripedBlockProto.newBuilder()
.setDataBlockNum(blk.getDataBlockNum())
.setParityBlockNum(blk.getParityBlockNum())
.setBlock(bp).build();
}
public static BlockWithLocationsProto convert(BlockWithLocations blk) { public static BlockWithLocationsProto convert(BlockWithLocations blk) {
BlockWithLocationsProto.Builder builder = BlockWithLocationsProto BlockWithLocationsProto.Builder builder = BlockWithLocationsProto
.newBuilder().setBlock(convert(blk.getBlock())) .newBuilder().setBlock(convert(blk.getBlock()))

View File

@ -25,7 +25,6 @@ import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -42,7 +41,6 @@ import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StripedBlockProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
@ -330,10 +328,14 @@ public final class FSImageFormatPBINode {
short replication = (short) f.getReplication(); short replication = (short) f.getReplication();
LoaderContext state = parent.getLoaderContext(); LoaderContext state = parent.getLoaderContext();
BlockInfoContiguous[] blocks = new BlockInfoContiguous[bp.size()]; BlockInfoContiguous[] blocks = null;
for (int i = 0, e = bp.size(); i < e; ++i) { if (!f.hasStripedBlocks()) {
blocks[i] = new BlockInfoContiguous(PBHelper.convert(bp.get(i)), replication); blocks = new BlockInfoContiguous[bp.size()];
for (int i = 0, e = bp.size(); i < e; ++i) {
blocks[i] = new BlockInfoContiguous(PBHelper.convert(bp.get(i)), replication);
}
} }
final PermissionStatus permissions = loadPermission(f.getPermission(), final PermissionStatus permissions = loadPermission(f.getPermission(),
parent.getLoaderContext().getStringTable()); parent.getLoaderContext().getStringTable());
@ -357,10 +359,9 @@ public final class FSImageFormatPBINode {
if (f.hasStripedBlocks()) { if (f.hasStripedBlocks()) {
// TODO: HDFS-7859 // TODO: HDFS-7859
ECSchema schema = ErasureCodingSchemaManager.getSystemDefaultSchema(); ECSchema schema = ErasureCodingSchemaManager.getSystemDefaultSchema();
StripedBlocksFeature sb = f.getStripedBlocks();
stripeFeature = file.addStripedBlocksFeature(); stripeFeature = file.addStripedBlocksFeature();
for (StripedBlockProto sp : sb.getBlocksList()) { for (BlockProto b : bp) {
stripeFeature.addBlock(PBHelper.convert(sp, schema)); stripeFeature.addBlock(new BlockInfoStriped(PBHelper.convert(b), schema));
} }
} }
@ -658,14 +659,14 @@ public final class FSImageFormatPBINode {
FileWithStripedBlocksFeature sb = n.getStripedBlocksFeature(); FileWithStripedBlocksFeature sb = n.getStripedBlocksFeature();
if (sb != null) { if (sb != null) {
StripedBlocksFeature.Builder builder =
StripedBlocksFeature.newBuilder();
BlockInfoStriped[] sblocks = sb.getBlocks(); BlockInfoStriped[] sblocks = sb.getBlocks();
if (sblocks != null) { if (sblocks != null) {
for (BlockInfoStriped sblk : sblocks) { for (BlockInfoStriped sblk : sblocks) {
builder.addBlocks(PBHelper.convert(sblk)); b.addBlocks(PBHelper.convert(sblk));
} }
} }
StripedBlocksFeature.Builder builder =
StripedBlocksFeature.newBuilder();
b.setStripedBlocks(builder.build()); b.setStripedBlocks(builder.build());
} }

View File

@ -41,15 +41,12 @@ import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StripedBlockProto;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode; import org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf; import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf;
import org.apache.hadoop.hdfs.server.namenode.FSImageUtil; import org.apache.hadoop.hdfs.server.namenode.FSImageUtil;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto; import org.apache.hadoop.hdfs.server.namenode.FsImageProto;
import org.apache.hadoop.hdfs.server.namenode.INodeId; import org.apache.hadoop.hdfs.server.namenode.INodeId;
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
import org.apache.hadoop.hdfs.web.JsonUtil; import org.apache.hadoop.hdfs.web.JsonUtil;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.LimitInputStream; import org.apache.hadoop.util.LimitInputStream;
@ -485,21 +482,8 @@ class FSImageLoader {
static long getFileSize(FsImageProto.INodeSection.INodeFile f) { static long getFileSize(FsImageProto.INodeSection.INodeFile f) {
long size = 0; long size = 0;
if (f.hasStripedBlocks()) { for (HdfsProtos.BlockProto p : f.getBlocksList()) {
List<StripedBlockProto> blocksList = f.getStripedBlocks().getBlocksList(); size += p.getNumBytes();
// Get total of actual data block size
for (StripedBlockProto p : blocksList) {
// Total usage by this striped blocks should be the total of data
// blocks and parity blocks
size += StripedBlockUtil.spaceConsumedByStripedBlock(p.getBlock()
.getNumBytes(), p.getDataBlockNum(), p.getParityBlockNum(),
HdfsConstants.BLOCK_STRIPED_CELL_SIZE);
}
} else {
for (HdfsProtos.BlockProto p : f.getBlocksList()) {
size += p.getNumBytes();
}
} }
return size; return size;
} }

View File

@ -93,7 +93,7 @@ message INodeSection {
} }
message StripedBlocksFeature { message StripedBlocksFeature {
repeated StripedBlockProto blocks = 1; // store striped blocks related information
} }
message AclFeatureProto { message AclFeatureProto {

View File

@ -514,16 +514,6 @@ message BlockProto {
optional uint64 numBytes = 3 [default = 0]; optional uint64 numBytes = 3 [default = 0];
} }
/**
* Striped block information. Besides the basic information for a block,
* it also contains the number of data/parity blocks.
*/
message StripedBlockProto {
required BlockProto block = 1;
optional uint32 dataBlockNum = 2;
optional uint32 parityBlockNum = 3;
}
/** /**
* Block and datanodes where is it located * Block and datanodes where is it located
*/ */

View File

@ -39,7 +39,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.Test; import org.junit.Test;
@ -136,28 +135,25 @@ public class TestOfflineImageViewerWithStripedBlocks {
} }
FSImageLoader loader = FSImageLoader.load(orgFsimage.getAbsolutePath()); FSImageLoader loader = FSImageLoader.load(orgFsimage.getAbsolutePath());
String fileStatus = loader.getFileStatus("/eczone/striped"); String fileStatus = loader.getFileStatus("/eczone/striped");
long expectedSpaceConsumed = StripedBlockUtil.spaceConsumedByStripedBlock( long expectedFileSize = bytes.length;
bytes.length, HdfsConstants.NUM_DATA_BLOCKS,
HdfsConstants.NUM_PARITY_BLOCKS, HdfsConstants.BLOCK_STRIPED_CELL_SIZE);
// Verify space consumed present in BlockInfoStriped // Verify space consumed present in BlockInfoStriped
FSDirectory fsdir = cluster.getNamesystem().getFSDirectory(); FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile(); INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
assertTrue("Invalid block size", fileNode.getBlocks().length > 0); assertTrue("Invalid block size", fileNode.getBlocks().length > 0);
long actualSpaceConsumed = 0; long actualFileSize = 0;
for (BlockInfo blockInfo : fileNode.getBlocks()) { for (BlockInfo blockInfo : fileNode.getBlocks()) {
assertTrue("Didn't find block striped information", assertTrue("Didn't find block striped information",
blockInfo instanceof BlockInfoStriped); blockInfo instanceof BlockInfoStriped);
BlockInfoStriped b = (BlockInfoStriped) blockInfo; actualFileSize += blockInfo.getNumBytes();
actualSpaceConsumed += b.spaceConsumed();
} }
assertEquals("Wrongly computed file size contains striped blocks", assertEquals("Wrongly computed file size contains striped blocks",
expectedSpaceConsumed, actualSpaceConsumed); expectedFileSize, actualFileSize);
// Verify space consumed present in filestatus // Verify space consumed present in filestatus
String EXPECTED_FILE_SIZE = "\"length\":" String EXPECTED_FILE_SIZE = "\"length\":"
+ String.valueOf(expectedSpaceConsumed); + String.valueOf(expectedFileSize);
assertTrue( assertTrue(
"Wrongly computed file size contains striped blocks, file status:" "Wrongly computed file size contains striped blocks, file status:"
+ fileStatus + ". Expected file size is : " + EXPECTED_FILE_SIZE, + fileStatus + ". Expected file size is : " + EXPECTED_FILE_SIZE,