diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java index 84c3be60e8f..cef83185b16 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; /** @@ -34,6 +35,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; * array to record the block index for each triplet. */ public class BlockInfoStriped extends BlockInfo { + private final int chunkSize = HdfsConstants.BLOCK_STRIPED_CHUNK_SIZE; private final short dataBlockNum; private final short parityBlockNum; /** @@ -56,7 +58,7 @@ public class BlockInfoStriped extends BlockInfo { this.setBlockCollection(b.getBlockCollection()); } - short getTotalBlockNum() { + public short getTotalBlockNum() { return (short) (dataBlockNum + parityBlockNum); } @@ -178,6 +180,14 @@ public class BlockInfoStriped extends BlockInfo { } } + public long spaceConsumed() { + // In case striped blocks, total usage by this striped blocks should + // be the total of data blocks and parity blocks because + // `getNumBytes` is the total of actual data block size. + return ((getNumBytes() - 1) / (dataBlockNum * chunkSize) + 1) + * chunkSize * parityBlockNum + getNumBytes(); + } + @Override public final boolean isStriped() { return true; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 31562f28f02..b2c4ae8ef43 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStripedUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff; @@ -676,6 +677,11 @@ public class INodeFile extends INodeWithAdditionalFields final long ssDeltaNoReplication; short replication; + + if (isStriped()) { + return computeQuotaUsageWithStriped(bsps, counts); + } + if (last < lastSnapshotId) { ssDeltaNoReplication = computeFileSize(true, false); replication = getFileReplication(); @@ -698,6 +704,23 @@ public class INodeFile extends INodeWithAdditionalFields return counts; } + /** + * Compute quota of striped file + * @param bsps + * @param counts + * @param useCache + * @param lastSnapshotId + * @return quota counts + */ + public final QuotaCounts computeQuotaUsageWithStriped( + BlockStoragePolicySuite bsps, QuotaCounts counts) { + long nsDelta = 1; + final long ssDelta = storagespaceConsumed(); + counts.addNameSpace(nsDelta); + counts.addStorageSpace(ssDelta); + return counts; + } + @Override public final ContentSummaryComputationContext computeContentSummary( final ContentSummaryComputationContext summary) { @@ -776,23 +799,37 @@ public class INodeFile extends INodeWithAdditionalFields * @return file size */ public final long computeFileSize(boolean includesLastUcBlock, - boolean usePreferredBlockSize4LastUcBlock) { - if (blocks == null || blocks.length == 0) { + boolean usePreferredBlockSize4LastUcBlock) { + BlockInfo[] blockInfos = getBlocks(); + // In case of contiguous blocks + if (blockInfos == null || blockInfos.length == 0) { return 0; } - final int last = blocks.length - 1; + final int last = blockInfos.length - 1; //check if the last block is BlockInfoUnderConstruction - long size = blocks[last].getNumBytes(); - if (blocks[last] instanceof BlockInfoContiguousUnderConstruction) { - if (!includesLastUcBlock) { - size = 0; - } else if (usePreferredBlockSize4LastUcBlock) { - size = getPreferredBlockSize(); - } + long size = blockInfos[last].getNumBytes(); + if (blockInfos[last] instanceof BlockInfoContiguousUnderConstruction) { + if (!includesLastUcBlock) { + size = 0; + } else if (usePreferredBlockSize4LastUcBlock) { + size = getPreferredBlockSize(); + } + } else if (blockInfos[last] instanceof BlockInfoStripedUnderConstruction) { + if (!includesLastUcBlock) { + size = 0; + } else if (usePreferredBlockSize4LastUcBlock) { + // Striped blocks keeps block group which counts + // (data blocks num + parity blocks num). When you + // count actual used size by BlockInfoStripedUC must + // be multiplied by these blocks number. + BlockInfoStripedUnderConstruction blockInfoStripedUC + = (BlockInfoStripedUnderConstruction) blockInfos[last]; + size = getPreferredBlockSize() * blockInfoStripedUC.getTotalBlockNum(); + } } //sum other blocks - for(int i = 0; i < last; i++) { - size += blocks[i].getNumBytes(); + for (int i = 0; i < last; i++) { + size += blockInfos[i].getNumBytes(); } return size; }