svn merge -c 1412237 from trunk for HDFS-4216. Do not ignore QuotaExceededException when adding symlinks.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1412244 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-11-21 19:33:46 +00:00
parent 7a8bc99858
commit c14334d536
4 changed files with 36 additions and 18 deletions

View File

@ -260,6 +260,8 @@ Release 2.0.3-alpha - Unreleased
HDFS-4179. BackupNode: allow reads, fix checkpointing, safeMode. (shv) HDFS-4179. BackupNode: allow reads, fix checkpointing, safeMode. (shv)
HDFS-4216. Do not ignore QuotaExceededException when adding symlinks.
(szetszwo)
Release 2.0.2-alpha - 2012-09-07 Release 2.0.2-alpha - 2012-09-07

View File

@ -2110,7 +2110,7 @@ public class FSDirectory implements Closeable {
INodeSymlink addSymlink(String path, String target, INodeSymlink addSymlink(String path, String target,
PermissionStatus dirPerms, boolean createParent) PermissionStatus dirPerms, boolean createParent)
throws UnresolvedLinkException, FileAlreadyExistsException, throws UnresolvedLinkException, FileAlreadyExistsException,
QuotaExceededException, IOException { QuotaExceededException {
waitForReady(); waitForReady();
final long modTime = now(); final long modTime = now();
@ -2124,7 +2124,7 @@ public class FSDirectory implements Closeable {
INodeSymlink newNode = null; INodeSymlink newNode = null;
writeLock(); writeLock();
try { try {
newNode = unprotectedSymlink(path, target, modTime, modTime, newNode = unprotectedAddSymlink(path, target, modTime, modTime,
new PermissionStatus(userName, null, FsPermission.getDefault())); new PermissionStatus(userName, null, FsPermission.getDefault()));
} finally { } finally {
writeUnlock(); writeUnlock();
@ -2144,23 +2144,12 @@ public class FSDirectory implements Closeable {
/** /**
* Add the specified path into the namespace. Invoked from edit log processing. * Add the specified path into the namespace. Invoked from edit log processing.
*/ */
INodeSymlink unprotectedSymlink(String path, String target, long modTime, INodeSymlink unprotectedAddSymlink(String path, String target, long mtime,
long atime, PermissionStatus perm) long atime, PermissionStatus perm)
throws UnresolvedLinkException { throws UnresolvedLinkException, QuotaExceededException {
assert hasWriteLock(); assert hasWriteLock();
INodeSymlink newNode = new INodeSymlink(target, modTime, atime, perm); final INodeSymlink symlink = new INodeSymlink(target, mtime, atime, perm);
try { return addNode(path, symlink, UNKNOWN_DISK_SPACE);
newNode = addNode(path, newNode, UNKNOWN_DISK_SPACE);
} catch (UnresolvedLinkException e) {
/* All UnresolvedLinkExceptions should have been resolved by now, but we
* should re-throw them in case that changes so they are not swallowed
* by catching IOException below.
*/
throw e;
} catch (IOException e) {
return null;
}
return newNode;
} }
/** /**

View File

@ -428,7 +428,7 @@ public class FSEditLogLoader {
} }
case OP_SYMLINK: { case OP_SYMLINK: {
SymlinkOp symlinkOp = (SymlinkOp)op; SymlinkOp symlinkOp = (SymlinkOp)op;
fsDir.unprotectedSymlink(symlinkOp.path, symlinkOp.value, fsDir.unprotectedAddSymlink(symlinkOp.path, symlinkOp.value,
symlinkOp.mtime, symlinkOp.atime, symlinkOp.mtime, symlinkOp.atime,
symlinkOp.permissionStatus); symlinkOp.permissionStatus);
break; break;

View File

@ -28,9 +28,11 @@ import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil; import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
@ -51,6 +53,7 @@ public class TestFcHdfsSymlink extends FileContextSymlinkBaseTest {
private static MiniDFSCluster cluster; private static MiniDFSCluster cluster;
private static WebHdfsFileSystem webhdfs; private static WebHdfsFileSystem webhdfs;
private static DistributedFileSystem dfs;
@Override @Override
@ -89,6 +92,7 @@ public class TestFcHdfsSymlink extends FileContextSymlinkBaseTest {
cluster = new MiniDFSCluster.Builder(conf).build(); cluster = new MiniDFSCluster.Builder(conf).build();
fc = FileContext.getFileContext(cluster.getURI(0)); fc = FileContext.getFileContext(cluster.getURI(0));
webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf); webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf);
dfs = cluster.getFileSystem();
} }
@AfterClass @AfterClass
@ -317,4 +321,27 @@ public class TestFcHdfsSymlink extends FileContextSymlinkBaseTest {
assertEquals(2, fc.getFileStatus(link).getReplication()); assertEquals(2, fc.getFileStatus(link).getReplication());
assertEquals(2, fc.getFileStatus(file).getReplication()); assertEquals(2, fc.getFileStatus(file).getReplication());
} }
@Test
/** Test craeteSymlink(..) with quota. */
public void testQuota() throws IOException {
final Path dir = new Path(testBaseDir1());
dfs.setQuota(dir, 3, HdfsConstants.QUOTA_DONT_SET);
final Path file = new Path(dir, "file");
createAndWriteFile(file);
//creating the first link should succeed
final Path link1 = new Path(dir, "link1");
fc.createSymlink(file, link1, false);
try {
//creating the second link should fail with QuotaExceededException.
final Path link2 = new Path(dir, "link2");
fc.createSymlink(file, link2, false);
fail("Created symlink despite quota violation");
} catch(QuotaExceededException qee) {
//expected
}
}
} }