HDFS-7826. Erasure Coding: Update INodeFile quota computation for striped blocks. Contributed by Kai Sasaki.

This commit is contained in:
Jing Zhao 2015-03-16 16:37:08 -07:00 committed by Zhe Zhang
parent 11585883a9
commit 26773d9d6c
2 changed files with 60 additions and 13 deletions

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.blockmanagement; package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
/** /**
@ -34,6 +35,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
* array to record the block index for each triplet. * array to record the block index for each triplet.
*/ */
public class BlockInfoStriped extends BlockInfo { public class BlockInfoStriped extends BlockInfo {
private final int chunkSize = HdfsConstants.BLOCK_STRIPED_CHUNK_SIZE;
private final short dataBlockNum; private final short dataBlockNum;
private final short parityBlockNum; private final short parityBlockNum;
/** /**
@ -56,7 +58,7 @@ public class BlockInfoStriped extends BlockInfo {
this.setBlockCollection(b.getBlockCollection()); this.setBlockCollection(b.getBlockCollection());
} }
short getTotalBlockNum() { public short getTotalBlockNum() {
return (short) (dataBlockNum + parityBlockNum); return (short) (dataBlockNum + parityBlockNum);
} }
@ -178,6 +180,14 @@ public class BlockInfoStriped extends BlockInfo {
} }
} }
public long spaceConsumed() {
// In case striped blocks, total usage by this striped blocks should
// be the total of data blocks and parity blocks because
// `getNumBytes` is the total of actual data block size.
return ((getNumBytes() - 1) / (dataBlockNum * chunkSize) + 1)
* chunkSize * parityBlockNum + getNumBytes();
}
@Override @Override
public final boolean isStriped() { public final boolean isStriped() {
return true; return true;

View File

@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStripedUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
@ -676,6 +677,11 @@ public class INodeFile extends INodeWithAdditionalFields
final long ssDeltaNoReplication; final long ssDeltaNoReplication;
short replication; short replication;
if (isStriped()) {
return computeQuotaUsageWithStriped(bsps, counts);
}
if (last < lastSnapshotId) { if (last < lastSnapshotId) {
ssDeltaNoReplication = computeFileSize(true, false); ssDeltaNoReplication = computeFileSize(true, false);
replication = getFileReplication(); replication = getFileReplication();
@ -698,6 +704,23 @@ public class INodeFile extends INodeWithAdditionalFields
return counts; return counts;
} }
/**
* Compute quota of striped file
* @param bsps
* @param counts
* @param useCache
* @param lastSnapshotId
* @return quota counts
*/
public final QuotaCounts computeQuotaUsageWithStriped(
BlockStoragePolicySuite bsps, QuotaCounts counts) {
long nsDelta = 1;
final long ssDelta = storagespaceConsumed();
counts.addNameSpace(nsDelta);
counts.addStorageSpace(ssDelta);
return counts;
}
@Override @Override
public final ContentSummaryComputationContext computeContentSummary( public final ContentSummaryComputationContext computeContentSummary(
final ContentSummaryComputationContext summary) { final ContentSummaryComputationContext summary) {
@ -776,23 +799,37 @@ public class INodeFile extends INodeWithAdditionalFields
* @return file size * @return file size
*/ */
public final long computeFileSize(boolean includesLastUcBlock, public final long computeFileSize(boolean includesLastUcBlock,
boolean usePreferredBlockSize4LastUcBlock) { boolean usePreferredBlockSize4LastUcBlock) {
if (blocks == null || blocks.length == 0) { BlockInfo[] blockInfos = getBlocks();
// In case of contiguous blocks
if (blockInfos == null || blockInfos.length == 0) {
return 0; return 0;
} }
final int last = blocks.length - 1; final int last = blockInfos.length - 1;
//check if the last block is BlockInfoUnderConstruction //check if the last block is BlockInfoUnderConstruction
long size = blocks[last].getNumBytes(); long size = blockInfos[last].getNumBytes();
if (blocks[last] instanceof BlockInfoContiguousUnderConstruction) { if (blockInfos[last] instanceof BlockInfoContiguousUnderConstruction) {
if (!includesLastUcBlock) { if (!includesLastUcBlock) {
size = 0; size = 0;
} else if (usePreferredBlockSize4LastUcBlock) { } else if (usePreferredBlockSize4LastUcBlock) {
size = getPreferredBlockSize(); size = getPreferredBlockSize();
} }
} else if (blockInfos[last] instanceof BlockInfoStripedUnderConstruction) {
if (!includesLastUcBlock) {
size = 0;
} else if (usePreferredBlockSize4LastUcBlock) {
// Striped blocks keeps block group which counts
// (data blocks num + parity blocks num). When you
// count actual used size by BlockInfoStripedUC must
// be multiplied by these blocks number.
BlockInfoStripedUnderConstruction blockInfoStripedUC
= (BlockInfoStripedUnderConstruction) blockInfos[last];
size = getPreferredBlockSize() * blockInfoStripedUC.getTotalBlockNum();
}
} }
//sum other blocks //sum other blocks
for(int i = 0; i < last; i++) { for (int i = 0; i < last; i++) {
size += blocks[i].getNumBytes(); size += blockInfos[i].getNumBytes();
} }
return size; return size;
} }