HDFS-4216. Do not ignore QuotaExceededException when adding symlinks.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1412237 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
d6af507199
commit
0f1899ee19
|
@ -624,6 +624,9 @@ Release 2.0.3-alpha - Unreleased
|
|||
|
||||
HDFS-4179. BackupNode: allow reads, fix checkpointing, safeMode. (shv)
|
||||
|
||||
HDFS-4216. Do not ignore QuotaExceededException when adding symlinks.
|
||||
(szetszwo)
|
||||
|
||||
Release 2.0.2-alpha - 2012-09-07
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -2041,7 +2041,7 @@ public class FSDirectory implements Closeable {
|
|||
INodeSymlink addSymlink(String path, String target,
|
||||
PermissionStatus dirPerms, boolean createParent)
|
||||
throws UnresolvedLinkException, FileAlreadyExistsException,
|
||||
QuotaExceededException, IOException {
|
||||
QuotaExceededException {
|
||||
waitForReady();
|
||||
|
||||
final long modTime = now();
|
||||
|
@ -2055,7 +2055,7 @@ public class FSDirectory implements Closeable {
|
|||
INodeSymlink newNode = null;
|
||||
writeLock();
|
||||
try {
|
||||
newNode = unprotectedSymlink(path, target, modTime, modTime,
|
||||
newNode = unprotectedAddSymlink(path, target, modTime, modTime,
|
||||
new PermissionStatus(userName, null, FsPermission.getDefault()));
|
||||
} finally {
|
||||
writeUnlock();
|
||||
|
@ -2075,23 +2075,12 @@ public class FSDirectory implements Closeable {
|
|||
/**
|
||||
* Add the specified path into the namespace. Invoked from edit log processing.
|
||||
*/
|
||||
INodeSymlink unprotectedSymlink(String path, String target, long modTime,
|
||||
INodeSymlink unprotectedAddSymlink(String path, String target, long mtime,
|
||||
long atime, PermissionStatus perm)
|
||||
throws UnresolvedLinkException {
|
||||
throws UnresolvedLinkException, QuotaExceededException {
|
||||
assert hasWriteLock();
|
||||
INodeSymlink newNode = new INodeSymlink(target, modTime, atime, perm);
|
||||
try {
|
||||
newNode = addNode(path, newNode, UNKNOWN_DISK_SPACE);
|
||||
} catch (UnresolvedLinkException e) {
|
||||
/* All UnresolvedLinkExceptions should have been resolved by now, but we
|
||||
* should re-throw them in case that changes so they are not swallowed
|
||||
* by catching IOException below.
|
||||
*/
|
||||
throw e;
|
||||
} catch (IOException e) {
|
||||
return null;
|
||||
}
|
||||
return newNode;
|
||||
final INodeSymlink symlink = new INodeSymlink(target, mtime, atime, perm);
|
||||
return addNode(path, symlink, UNKNOWN_DISK_SPACE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -426,7 +426,7 @@ public class FSEditLogLoader {
|
|||
}
|
||||
case OP_SYMLINK: {
|
||||
SymlinkOp symlinkOp = (SymlinkOp)op;
|
||||
fsDir.unprotectedSymlink(symlinkOp.path, symlinkOp.value,
|
||||
fsDir.unprotectedAddSymlink(symlinkOp.path, symlinkOp.value,
|
||||
symlinkOp.mtime, symlinkOp.atime,
|
||||
symlinkOp.permissionStatus);
|
||||
break;
|
||||
|
|
|
@ -28,9 +28,11 @@ import org.apache.commons.logging.impl.Log4JLogger;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
||||
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
|
||||
|
@ -51,6 +53,7 @@ public class TestFcHdfsSymlink extends FileContextSymlinkBaseTest {
|
|||
|
||||
private static MiniDFSCluster cluster;
|
||||
private static WebHdfsFileSystem webhdfs;
|
||||
private static DistributedFileSystem dfs;
|
||||
|
||||
|
||||
@Override
|
||||
|
@ -89,6 +92,7 @@ public class TestFcHdfsSymlink extends FileContextSymlinkBaseTest {
|
|||
cluster = new MiniDFSCluster.Builder(conf).build();
|
||||
fc = FileContext.getFileContext(cluster.getURI(0));
|
||||
webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf);
|
||||
dfs = cluster.getFileSystem();
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
|
@ -317,4 +321,27 @@ public class TestFcHdfsSymlink extends FileContextSymlinkBaseTest {
|
|||
assertEquals(2, fc.getFileStatus(link).getReplication());
|
||||
assertEquals(2, fc.getFileStatus(file).getReplication());
|
||||
}
|
||||
|
||||
@Test
|
||||
/** Test craeteSymlink(..) with quota. */
|
||||
public void testQuota() throws IOException {
|
||||
final Path dir = new Path(testBaseDir1());
|
||||
dfs.setQuota(dir, 3, HdfsConstants.QUOTA_DONT_SET);
|
||||
|
||||
final Path file = new Path(dir, "file");
|
||||
createAndWriteFile(file);
|
||||
|
||||
//creating the first link should succeed
|
||||
final Path link1 = new Path(dir, "link1");
|
||||
fc.createSymlink(file, link1, false);
|
||||
|
||||
try {
|
||||
//creating the second link should fail with QuotaExceededException.
|
||||
final Path link2 = new Path(dir, "link2");
|
||||
fc.createSymlink(file, link2, false);
|
||||
fail("Created symlink despite quota violation");
|
||||
} catch(QuotaExceededException qee) {
|
||||
//expected
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue