HDFS-5300. Merge change r1529294 from trunk.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1529305 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
bab129d135
commit
f6a21000d4
|
@ -150,6 +150,9 @@ Release 2.1.2 - UNRELEASED
|
|||
HDFS-5289. Race condition in TestRetryCacheWithHA#testCreateSymlink causes
|
||||
spurious test failure. (atm)
|
||||
|
||||
HDFS-5300. FSNameSystem#deleteSnapshot() should not check owner in case of
|
||||
permissions disabled. (Vinay via jing9)
|
||||
|
||||
Release 2.1.1-beta - 2013-09-23
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -6779,7 +6779,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
throw new SafeModeException(
|
||||
"Cannot delete snapshot for " + snapshotRoot, safeMode);
|
||||
}
|
||||
if (isPermissionEnabled) {
|
||||
checkOwner(pc, snapshotRoot);
|
||||
}
|
||||
|
||||
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
|
||||
List<INode> removedINodes = new ChunkedArrayList<INode>();
|
||||
|
|
|
@ -25,10 +25,12 @@ import static org.junit.Assert.fail;
|
|||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.security.PrivilegedAction;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
|
@ -45,7 +47,9 @@ import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
|||
import org.apache.hadoop.hdfs.server.namenode.Quota;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiffList;
|
||||
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
@ -778,6 +782,39 @@ public class TestSnapshotDeletion {
|
|||
assertEquals("group1", statusOfS1.getGroup());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDeleteSnapshotWithPermissionsDisabled() throws Exception {
|
||||
cluster.shutdown();
|
||||
Configuration newConf = new Configuration(conf);
|
||||
newConf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
|
||||
cluster = new MiniDFSCluster.Builder(newConf).numDataNodes(0).build();
|
||||
cluster.waitActive();
|
||||
hdfs = cluster.getFileSystem();
|
||||
|
||||
final Path path = new Path("/dir");
|
||||
hdfs.mkdirs(path);
|
||||
hdfs.allowSnapshot(path);
|
||||
hdfs.mkdirs(new Path(path, "/test"));
|
||||
hdfs.createSnapshot(path, "s1");
|
||||
UserGroupInformation anotherUser = UserGroupInformation
|
||||
.createRemoteUser("anotheruser");
|
||||
anotherUser.doAs(new PrivilegedAction<Object>() {
|
||||
@Override
|
||||
public Object run() {
|
||||
DistributedFileSystem anotherUserFS = null;
|
||||
try {
|
||||
anotherUserFS = cluster.getFileSystem();
|
||||
anotherUserFS.deleteSnapshot(path, "s1");
|
||||
} catch (IOException e) {
|
||||
fail("Failed to delete snapshot : " + e.getLocalizedMessage());
|
||||
} finally {
|
||||
IOUtils.closeStream(anotherUserFS);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* A test covering the case where the snapshot diff to be deleted is renamed
|
||||
* to its previous snapshot.
|
||||
|
|
Loading…
Reference in New Issue