HDFS-6535. Merge r1604226 from trunk.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1604228 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
be0fae3996
commit
eb7ea60a9a
|
@ -427,6 +427,9 @@ Release 2.5.0 - UNRELEASED
|
|||
HDFS-6549. Add support for accessing the NFS gateway from the AIX NFS
|
||||
client. (atm)
|
||||
|
||||
HDFS-6535. HDFS quota update is wrong when file is appended. (George Wong
|
||||
via jing9)
|
||||
|
||||
BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
|
||||
|
||||
HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
|
||||
|
|
|
@ -2505,7 +2505,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
if (ret != null) {
|
||||
// update the quota: use the preferred block size for UC block
|
||||
final long diff = file.getPreferredBlockSize() - ret.getBlockSize();
|
||||
dir.updateSpaceConsumed(src, 0, diff);
|
||||
dir.updateSpaceConsumed(src, 0, diff * file.getBlockReplication());
|
||||
}
|
||||
|
||||
if (writeToEditLog) {
|
||||
|
|
|
@ -18,10 +18,12 @@
|
|||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.util.EnumSet;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.ContentSummary;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
|
@ -36,7 +38,9 @@ import org.junit.Test;
|
|||
|
||||
public class TestDiskspaceQuotaUpdate {
|
||||
private static final int BLOCKSIZE = 1024;
|
||||
private static final short REPLICATION = 1;
|
||||
private static final short REPLICATION = 4;
|
||||
static final long seed = 0L;
|
||||
private static final Path dir = new Path("/TestQuotaUpdate");
|
||||
|
||||
private Configuration conf;
|
||||
private MiniDFSCluster cluster;
|
||||
|
@ -62,42 +66,84 @@ public class TestDiskspaceQuotaUpdate {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test if the quota can be correctly updated for create file
|
||||
*/
|
||||
@Test (timeout=60000)
|
||||
public void testQuotaUpdateWithFileCreate() throws Exception {
|
||||
final Path foo = new Path(dir, "foo");
|
||||
Path createdFile = new Path(foo, "created_file.data");
|
||||
dfs.mkdirs(foo);
|
||||
dfs.setQuota(foo, Long.MAX_VALUE-1, Long.MAX_VALUE-1);
|
||||
long fileLen = BLOCKSIZE * 2 + BLOCKSIZE / 2;
|
||||
DFSTestUtil.createFile(dfs, createdFile, BLOCKSIZE / 16,
|
||||
fileLen, BLOCKSIZE, REPLICATION, seed);
|
||||
INode fnode = fsdir.getINode4Write(foo.toString());
|
||||
assertTrue(fnode.isDirectory());
|
||||
assertTrue(fnode.isQuotaSet());
|
||||
Quota.Counts cnt = fnode.asDirectory().getDirectoryWithQuotaFeature()
|
||||
.getSpaceConsumed();
|
||||
assertEquals(2, cnt.get(Quota.NAMESPACE));
|
||||
assertEquals(fileLen * REPLICATION, cnt.get(Quota.DISKSPACE));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test if the quota can be correctly updated for append
|
||||
*/
|
||||
@Test
|
||||
@Test (timeout=60000)
|
||||
public void testUpdateQuotaForAppend() throws Exception {
|
||||
final Path foo = new Path("/foo");
|
||||
final Path foo = new Path(dir ,"foo");
|
||||
final Path bar = new Path(foo, "bar");
|
||||
DFSTestUtil.createFile(dfs, bar, BLOCKSIZE, REPLICATION, 0L);
|
||||
long currentFileLen = BLOCKSIZE;
|
||||
DFSTestUtil.createFile(dfs, bar, currentFileLen, REPLICATION, seed);
|
||||
dfs.setQuota(foo, Long.MAX_VALUE-1, Long.MAX_VALUE-1);
|
||||
|
||||
// append half of the block data
|
||||
// append half of the block data, the previous file length is at block
|
||||
// boundary
|
||||
DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE / 2);
|
||||
currentFileLen += (BLOCKSIZE / 2);
|
||||
|
||||
INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
|
||||
assertTrue(fooNode.isQuotaSet());
|
||||
Quota.Counts quota = fooNode.getDirectoryWithQuotaFeature()
|
||||
.getSpaceConsumed();
|
||||
long ns = quota.get(Quota.NAMESPACE);
|
||||
long ds = quota.get(Quota.DISKSPACE);
|
||||
assertEquals(2, ns); // foo and bar
|
||||
assertEquals((BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION, ds);
|
||||
assertEquals(currentFileLen * REPLICATION, ds);
|
||||
ContentSummary c = dfs.getContentSummary(foo);
|
||||
assertEquals(c.getSpaceConsumed(), ds);
|
||||
|
||||
// append another block
|
||||
// append another block, the previous file length is not at block boundary
|
||||
DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE);
|
||||
currentFileLen += BLOCKSIZE;
|
||||
|
||||
quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
|
||||
ns = quota.get(Quota.NAMESPACE);
|
||||
ds = quota.get(Quota.DISKSPACE);
|
||||
assertEquals(2, ns); // foo and bar
|
||||
assertEquals((BLOCKSIZE * 2 + BLOCKSIZE / 2) * REPLICATION, ds);
|
||||
assertEquals(currentFileLen * REPLICATION, ds);
|
||||
c = dfs.getContentSummary(foo);
|
||||
assertEquals(c.getSpaceConsumed(), ds);
|
||||
|
||||
// append several blocks
|
||||
DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE * 3 + BLOCKSIZE / 8);
|
||||
currentFileLen += (BLOCKSIZE * 3 + BLOCKSIZE / 8);
|
||||
|
||||
quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
|
||||
ns = quota.get(Quota.NAMESPACE);
|
||||
ds = quota.get(Quota.DISKSPACE);
|
||||
assertEquals(2, ns); // foo and bar
|
||||
assertEquals(currentFileLen * REPLICATION, ds);
|
||||
c = dfs.getContentSummary(foo);
|
||||
assertEquals(c.getSpaceConsumed(), ds);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test if the quota can be correctly updated when file length is updated
|
||||
* through fsync
|
||||
*/
|
||||
@Test
|
||||
@Test (timeout=60000)
|
||||
public void testUpdateQuotaForFSync() throws Exception {
|
||||
final Path foo = new Path("/foo");
|
||||
final Path bar = new Path(foo, "bar");
|
||||
|
|
Loading…
Reference in New Issue