diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt index 0acf746194f..f026a5cde76 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt @@ -198,3 +198,6 @@ HDFS-7678. Erasure coding: DFSInputStream with decode functionality (pread). (Zhe Zhang) + + HDFS-8372. Erasure coding: compute storage type quotas for striped files, + to be consistent with HDFS-8327. (Zhe Zhang via jing9) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileWithStripedBlocksFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileWithStripedBlocksFeature.java index 47445be5b6b..94ab527e40c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileWithStripedBlocksFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileWithStripedBlocksFeature.java @@ -21,6 +21,7 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStripedUnderConstruction; /** * Feature for file with striped blocks @@ -78,20 +79,23 @@ class FileWithStripedBlocksFeature implements INode.Feature { } } - boolean removeLastBlock(Block oldblock) { + BlockInfoStripedUnderConstruction removeLastBlock( + Block oldblock) { if (blocks == null || blocks.length == 0) { - return false; + return null; } int newSize = blocks.length - 1; if (!blocks[newSize].equals(oldblock)) { - return false; + return null; } + BlockInfoStripedUnderConstruction uc = + (BlockInfoStripedUnderConstruction) blocks[newSize]; //copy to a new list BlockInfoStriped[] newlist = new BlockInfoStriped[newSize]; System.arraycopy(blocks, 0, newlist, 0, newSize); setBlocks(newlist); - return true; + return uc; } void truncateStripedBlocks(int n) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index cc187700e4b..154198c5206 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStripedUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff; @@ -295,7 +296,7 @@ public class INodeFile extends INodeWithAdditionalFields * Remove a block from the block list. This block should be * the last one on the list. */ - BlockInfoContiguousUnderConstruction removeLastBlock(Block oldblock) { + BlockInfoUnderConstruction removeLastBlock(Block oldblock) { Preconditions.checkState(isUnderConstruction(), "file is no longer under construction"); FileWithStripedBlocksFeature sb = getStripedBlocksFeature(); @@ -317,7 +318,7 @@ public class INodeFile extends INodeWithAdditionalFields return uc; } else { assert hasNoContiguousBlock(); - return null; + return sb.removeLastBlock(oldblock); } } @@ -676,7 +677,7 @@ public class INodeFile extends INodeWithAdditionalFields final long ssDeltaNoReplication; short replication; if (isStriped()) { - return computeQuotaUsageWithStriped(bsps, counts); + return computeQuotaUsageWithStriped(bsp, counts); } if (last < lastSnapshotId) { @@ -702,11 +703,15 @@ public class INodeFile extends INodeWithAdditionalFields } /** - * Compute quota of striped file + * Compute quota of striped file. Note that currently EC files do not support + * append/hflush/hsync, thus the file length recorded in snapshots should be + * the same with the current file length. */ public final QuotaCounts computeQuotaUsageWithStriped( - BlockStoragePolicySuite bsps, QuotaCounts counts) { - return null; + BlockStoragePolicy bsp, QuotaCounts counts) { + counts.addNameSpace(1); + counts.add(storagespaceConsumed(bsp)); + return counts; } @Override @@ -828,21 +833,44 @@ public class INodeFile extends INodeWithAdditionalFields * Use preferred block size for the last block if it is under construction. */ public final QuotaCounts storagespaceConsumed(BlockStoragePolicy bsp) { - QuotaCounts counts = new QuotaCounts.Builder().build(); if (isStriped()) { - return storagespaceConsumedWithStriped(bsp); + return storagespaceConsumedWithStriped(); } else { return storagespaceConsumedWithReplication(bsp); } } - public final QuotaCounts storagespaceConsumedWithStriped( - BlockStoragePolicy bsp) { - return null; + // TODO: support EC with heterogeneous storage + public final QuotaCounts storagespaceConsumedWithStriped() { + QuotaCounts counts = new QuotaCounts.Builder().build(); + BlockInfo[] blockInfos = getBlocks(); + if (blockInfos == null || blockInfos.length == 0) { + return counts; + } + + long size; + final int last = blockInfos.length - 1; + if (blockInfos[last] instanceof BlockInfoStripedUnderConstruction) { + BlockInfoStripedUnderConstruction blockInfoStripedUC + =(BlockInfoStripedUnderConstruction)blockInfos[last]; + size = getPreferredBlockSize() * blockInfoStripedUC.getTotalBlockNum(); + } else { + // In case of last block is complete + BlockInfoStriped blockInfoStriped = (BlockInfoStriped)blockInfos[last]; + size = blockInfoStriped.spaceConsumed(); + } + for (int i = 0; i < last; i++) { + BlockInfoStriped blockInfoStriped = (BlockInfoStriped)blockInfos[i]; + size += blockInfoStriped.spaceConsumed(); + } + + counts.addStorageSpace(size); + return counts; } public final QuotaCounts storagespaceConsumedWithReplication( - BlockStoragePolicy bsp) { QuotaCounts counts = new QuotaCounts.Builder().build(); + BlockStoragePolicy bsp) { + QuotaCounts counts = new QuotaCounts.Builder().build(); final Iterable blocks; FileWithSnapshotFeature sf = getFileWithSnapshotFeature(); if (sf == null) { @@ -965,6 +993,7 @@ public class INodeFile extends INodeWithAdditionalFields /** * compute the quota usage change for a truncate op * @param newLength the length for truncation + * TODO: properly handle striped blocks (HDFS-7622) **/ void computeQuotaDeltaForTruncate( long newLength, BlockStoragePolicy bsps, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java index a725e6ba3da..e24bc54a6c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java @@ -27,6 +27,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStripedUnderConstruction; @@ -45,6 +46,11 @@ public class TestStripedINodeFile { private static final PermissionStatus perm = new PermissionStatus( "userName", null, FsPermission.getDefault()); + private final BlockStoragePolicySuite defaultSuite = + BlockStoragePolicySuite.createDefaultSuite(); + private final BlockStoragePolicy defaultPolicy = + defaultSuite.getDefaultPolicy(); + private static INodeFile createStripedINodeFile() { return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, (short)0, 1024L, HdfsServerConstants.COLD_STORAGE_POLICY_ID); @@ -109,8 +115,8 @@ public class TestStripedINodeFile { // a. * ( - 1) * = 0 // b. % = 1 // c. * = 1 * 3 - assertEquals(4, inf.storagespaceConsumedWithStriped(null)); - assertEquals(4, inf.storagespaceConsumed(null)); + assertEquals(4, inf.storagespaceConsumedWithStriped().getStorageSpace()); + assertEquals(4, inf.storagespaceConsumed(defaultPolicy).getStorageSpace()); } @Test @@ -134,8 +140,8 @@ public class TestStripedINodeFile { inf.addBlock(blockInfoStriped1); inf.addBlock(blockInfoStriped2); // This is the double size of one block in above case. - assertEquals(4 * 2, inf.storagespaceConsumedWithStriped(null)); - assertEquals(4 * 2, inf.storagespaceConsumed(null)); + assertEquals(4 * 2, inf.storagespaceConsumedWithStriped().getStorageSpace()); + assertEquals(4 * 2, inf.storagespaceConsumed(defaultPolicy).getStorageSpace()); } @Test @@ -188,10 +194,8 @@ public class TestStripedINodeFile { blockInfoStriped.setNumBytes(100); inf.addBlock(blockInfoStriped); - BlockStoragePolicySuite suite = - BlockStoragePolicySuite.createDefaultSuite(); QuotaCounts counts = - inf.computeQuotaUsageWithStriped(suite, + inf.computeQuotaUsageWithStriped(defaultPolicy, new QuotaCounts.Builder().build()); assertEquals(1, counts.getNameSpace()); // The total consumed space is the sum of @@ -215,10 +219,8 @@ public class TestStripedINodeFile { bInfoStripedUC.setNumBytes(100); inf.addBlock(bInfoStripedUC); - BlockStoragePolicySuite suite - = BlockStoragePolicySuite.createDefaultSuite(); QuotaCounts counts - = inf.computeQuotaUsageWithStriped(suite, + = inf.computeQuotaUsageWithStriped(defaultPolicy, new QuotaCounts.Builder().build()); assertEquals(1024, inf.getPreferredBlockSize()); assertEquals(1, counts.getNameSpace());