HDFS-8195. Erasure coding: Fix file quota change when we complete/commit the striped blocks. Contributed by Takuya Fukudome.

This commit is contained in:
Zhe Zhang 2015-05-12 23:10:25 -07:00 committed by Zhe Zhang
parent 8da9e18dad
commit 9798065cbb
4 changed files with 151 additions and 4 deletions

View File

@ -206,3 +206,6 @@
handled properly (Rakesh R via zhz) handled properly (Rakesh R via zhz)
HDFS-8363. Erasure Coding: DFSStripedInputStream#seekToNewSource. (yliu) HDFS-8363. Erasure Coding: DFSStripedInputStream#seekToNewSource. (yliu)
HDFS-8195. Erasure coding: Fix file quota change when we complete/commit
the striped blocks. (Takuya Fukudome via zhz)

View File

@ -520,7 +520,7 @@ void updateCount(INodesInPath iip, long nsDelta, long ssDelta, short replication
final INodeFile fileINode = iip.getLastINode().asFile(); final INodeFile fileINode = iip.getLastINode().asFile();
EnumCounters<StorageType> typeSpaceDeltas = EnumCounters<StorageType> typeSpaceDeltas =
getStorageTypeDeltas(fileINode.getStoragePolicyID(), ssDelta, getStorageTypeDeltas(fileINode.getStoragePolicyID(), ssDelta,
replication, replication);; replication, replication);
updateCount(iip, iip.length() - 1, updateCount(iip, iip.length() - 1,
new QuotaCounts.Builder().nameSpace(nsDelta).storageSpace(ssDelta * replication). new QuotaCounts.Builder().nameSpace(nsDelta).storageSpace(ssDelta * replication).
typeSpaces(typeSpaceDeltas).build(), typeSpaces(typeSpaceDeltas).build(),

View File

@ -3656,11 +3656,30 @@ void commitOrCompleteLastBlock(
} }
// Adjust disk space consumption if required // Adjust disk space consumption if required
// TODO: support EC files final long diff;
final long diff = fileINode.getPreferredBlockSize() - commitBlock.getNumBytes(); final short replicationFactor;
if (fileINode.isStriped()) {
final ECSchema ecSchema = dir.getECSchema(iip);
final short numDataUnits = (short) ecSchema.getNumDataUnits();
final short numParityUnits = (short) ecSchema.getNumParityUnits();
final long numBlocks = numDataUnits + numParityUnits;
final long fullBlockGroupSize =
fileINode.getPreferredBlockSize() * numBlocks;
final BlockInfoStriped striped = new BlockInfoStriped(commitBlock,
numDataUnits, numParityUnits);
final long actualBlockGroupSize = striped.spaceConsumed();
diff = fullBlockGroupSize - actualBlockGroupSize;
replicationFactor = (short) 1;
} else {
diff = fileINode.getPreferredBlockSize() - commitBlock.getNumBytes();
replicationFactor = fileINode.getFileReplication();
}
if (diff > 0) { if (diff > 0) {
try { try {
dir.updateSpaceConsumed(iip, 0, -diff, fileINode.getFileReplication()); dir.updateSpaceConsumed(iip, 0, -diff, replicationFactor);
} catch (IOException e) { } catch (IOException e) {
LOG.warn("Unexpected exception while updating disk space.", e); LOG.warn("Unexpected exception while updating disk space.", e);
} }

View File

@ -0,0 +1,125 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.erasurecode.ECSchema;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
/**
* Make sure we correctly update the quota usage with the striped blocks.
*/
public class TestQuotaWithStripedBlocks {
private static final int BLOCK_SIZE = 1024 * 1024;
private static final long DISK_QUOTA = BLOCK_SIZE * 10;
private static final ECSchema ecSchema =
ErasureCodingSchemaManager.getSystemDefaultSchema();
private static final int NUM_DATA_BLOCKS = ecSchema.getNumDataUnits();
private static final int NUM_PARITY_BLOCKS = ecSchema.getNumParityUnits();
private static final int GROUP_SIZE = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
private static final Path ecDir = new Path("/ec");
private MiniDFSCluster cluster;
private FSDirectory dir;
private DistributedFileSystem dfs;
@Before
public void setUp() throws IOException {
final Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE).build();
cluster.waitActive();
dir = cluster.getNamesystem().getFSDirectory();
dfs = cluster.getFileSystem();
dfs.mkdirs(ecDir);
dfs.getClient().createErasureCodingZone(ecDir.toString(), ecSchema);
dfs.setQuota(ecDir, Long.MAX_VALUE - 1, DISK_QUOTA);
dfs.setQuotaByStorageType(ecDir, StorageType.DISK, DISK_QUOTA);
dfs.setStoragePolicy(ecDir, HdfsServerConstants.HOT_STORAGE_POLICY_NAME);
}
@After
public void tearDown() {
if (cluster != null) {
cluster.shutdown();
}
}
@Test
public void testUpdatingQuotaCount() throws Exception {
final Path file = new Path(ecDir, "file");
FSDataOutputStream out = null;
try {
out = dfs.create(file, (short) 1);
INodeFile fileNode = dir.getINode4Write(file.toString()).asFile();
ExtendedBlock previous = null;
// Create striped blocks which have a cell in each block.
Block newBlock = DFSTestUtil.addStripedBlockToFile(cluster.getDataNodes(),
dfs, cluster.getNamesystem(), file.toString(), fileNode,
dfs.getClient().getClientName(), previous, 1);
previous = new ExtendedBlock(cluster.getNamesystem().getBlockPoolId(),
newBlock);
final INodeDirectory dirNode = dir.getINode4Write(ecDir.toString())
.asDirectory();
final long spaceUsed = dirNode.getDirectoryWithQuotaFeature()
.getSpaceConsumed().getStorageSpace();
final long diskUsed = dirNode.getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.DISK);
// When we add a new block we update the quota using the full block size.
Assert.assertEquals(BLOCK_SIZE * GROUP_SIZE, spaceUsed);
Assert.assertEquals(BLOCK_SIZE * GROUP_SIZE, diskUsed);
dfs.getClient().getNamenode().complete(file.toString(),
dfs.getClient().getClientName(), previous, fileNode.getId());
final long actualSpaceUsed = dirNode.getDirectoryWithQuotaFeature()
.getSpaceConsumed().getStorageSpace();
final long actualDiskUsed = dirNode.getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.DISK);
// In this case the file's real size is cell size * block group size.
Assert.assertEquals(HdfsConstants.BLOCK_STRIPED_CELL_SIZE * GROUP_SIZE,
actualSpaceUsed);
Assert.assertEquals(HdfsConstants.BLOCK_STRIPED_CELL_SIZE * GROUP_SIZE,
actualDiskUsed);
} finally {
IOUtils.cleanup(null, out);
}
}
}