HDFS-11432. Federation : Support fully qualified path for Quota/Snapshot/cacheadmin/cryptoadmin commands. Contributed by Brahma Reddy Battula.

This commit is contained in:
Brahma Reddy Battula 2017-03-01 10:45:56 +05:30
parent 989bd56b9f
commit dcd03df9f9
7 changed files with 136 additions and 40 deletions

View File

@ -101,7 +101,17 @@ abstract public class Command extends Configured {
* @throws IOException if any error occurs * @throws IOException if any error occurs
*/ */
abstract protected void run(Path path) throws IOException; abstract protected void run(Path path) throws IOException;
/**
* Execute the command on the input path data. Commands can override to make
* use of the resolved filesystem.
* @param pathData The input path with resolved filesystem
* @throws IOException
*/
protected void run(PathData pathData) throws IOException {
run(pathData.path);
}
/** /**
* For each source path, execute the command * For each source path, execute the command
* *
@ -113,7 +123,7 @@ abstract public class Command extends Configured {
try { try {
PathData[] srcs = PathData.expandAsGlob(src, getConf()); PathData[] srcs = PathData.expandAsGlob(src, getConf());
for (PathData s : srcs) { for (PathData s : srcs) {
run(s.path); run(s);
} }
} catch (IOException e) { } catch (IOException e) {
exitCode = -1; exitCode = -1;

View File

@ -189,8 +189,9 @@ public class CacheAdmin extends Configured implements Tool {
System.err.println("Can't understand argument: " + args.get(0)); System.err.println("Can't understand argument: " + args.get(0));
return 1; return 1;
} }
DistributedFileSystem dfs = AdminHelper.getDFS(conf); DistributedFileSystem dfs =
AdminHelper.getDFS(new Path(path).toUri(), conf);
CacheDirectiveInfo directive = builder.build(); CacheDirectiveInfo directive = builder.build();
EnumSet<CacheFlag> flags = EnumSet.noneOf(CacheFlag.class); EnumSet<CacheFlag> flags = EnumSet.noneOf(CacheFlag.class);
if (force) { if (force) {
@ -409,7 +410,8 @@ public class CacheAdmin extends Configured implements Tool {
} }
int exitCode = 0; int exitCode = 0;
try { try {
DistributedFileSystem dfs = AdminHelper.getDFS(conf); DistributedFileSystem dfs =
AdminHelper.getDFS(new Path(path).toUri(), conf);
RemoteIterator<CacheDirectiveEntry> iter = RemoteIterator<CacheDirectiveEntry> iter =
dfs.listCacheDirectives( dfs.listCacheDirectives(
new CacheDirectiveInfo.Builder(). new CacheDirectiveInfo.Builder().

View File

@ -139,12 +139,12 @@ public class CryptoAdmin extends Configured implements Tool {
System.err.println("Can't understand argument: " + args.get(0)); System.err.println("Can't understand argument: " + args.get(0));
return 1; return 1;
} }
Path p = new Path(path);
HdfsAdmin admin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); HdfsAdmin admin = new HdfsAdmin(p.toUri(), conf);
EnumSet<CreateEncryptionZoneFlag> flags = EnumSet<CreateEncryptionZoneFlag> flags =
EnumSet.of(CreateEncryptionZoneFlag.PROVISION_TRASH); EnumSet.of(CreateEncryptionZoneFlag.PROVISION_TRASH);
try { try {
admin.createEncryptionZone(new Path(path), keyName, flags); admin.createEncryptionZone(p, keyName, flags);
System.out.println("Added encryption zone " + path); System.out.println("Added encryption zone " + path);
} catch (IOException e) { } catch (IOException e) {
System.err.println(prettifyException(e)); System.err.println(prettifyException(e));
@ -226,12 +226,12 @@ public class CryptoAdmin extends Configured implements Tool {
System.err.println("Can't understand argument: " + args.get(0)); System.err.println("Can't understand argument: " + args.get(0));
return 1; return 1;
} }
Path p = new Path(path);
final HdfsAdmin admin = final HdfsAdmin admin =
new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); new HdfsAdmin(p.toUri(), conf);
try { try {
final FileEncryptionInfo fei = final FileEncryptionInfo fei =
admin.getFileEncryptionInfo(new Path(path)); admin.getFileEncryptionInfo(p);
if (fei == null) { if (fei == null) {
System.out.println("No FileEncryptionInfo found for path " + path); System.out.println("No FileEncryptionInfo found for path " + path);
return 2; return 2;
@ -273,10 +273,10 @@ public class CryptoAdmin extends Configured implements Tool {
System.err.println("Can't understand argument: " + args.get(0)); System.err.println("Can't understand argument: " + args.get(0));
return 1; return 1;
} }
Path p = new Path(path);
HdfsAdmin admin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); HdfsAdmin admin = new HdfsAdmin(p.toUri(), conf);
try { try {
admin.provisionEncryptionZoneTrash(new Path(path)); admin.provisionEncryptionZoneTrash(p);
System.out.println("Created a trash directory for " + path); System.out.println("Created a trash directory for " + path);
} catch (IOException ioe) { } catch (IOException ioe) {
System.err.println(prettifyException(ioe)); System.err.println(prettifyException(ioe));

View File

@ -52,6 +52,7 @@ import org.apache.hadoop.fs.FsStatus;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.shell.Command; import org.apache.hadoop.fs.shell.Command;
import org.apache.hadoop.fs.shell.CommandFormat; import org.apache.hadoop.fs.shell.CommandFormat;
import org.apache.hadoop.fs.shell.PathData;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.HAUtilClient; import org.apache.hadoop.hdfs.HAUtilClient;
@ -109,15 +110,21 @@ public class DFSAdmin extends FsShell {
* An abstract class for the execution of a file system command * An abstract class for the execution of a file system command
*/ */
abstract private static class DFSAdminCommand extends Command { abstract private static class DFSAdminCommand extends Command {
final DistributedFileSystem dfs; protected DistributedFileSystem dfs;
/** Constructor */ /** Constructor */
public DFSAdminCommand(FileSystem fs) { public DFSAdminCommand(Configuration conf) {
super(fs.getConf()); super(conf);
}
@Override
public void run(PathData pathData) throws IOException {
FileSystem fs = pathData.fs;
if (!(fs instanceof DistributedFileSystem)) { if (!(fs instanceof DistributedFileSystem)) {
throw new IllegalArgumentException("FileSystem " + fs.getUri() + throw new IllegalArgumentException("FileSystem " + fs.getUri()
" is not an HDFS file system"); + " is not an HDFS file system");
} }
this.dfs = (DistributedFileSystem)fs; this.dfs = (DistributedFileSystem) fs;
run(pathData.path);
} }
} }
@ -133,8 +140,8 @@ public class DFSAdmin extends FsShell {
"\t\tIt does not fault if the directory has no quota."; "\t\tIt does not fault if the directory has no quota.";
/** Constructor */ /** Constructor */
ClearQuotaCommand(String[] args, int pos, FileSystem fs) { ClearQuotaCommand(String[] args, int pos, Configuration conf) {
super(fs); super(conf);
CommandFormat c = new CommandFormat(1, Integer.MAX_VALUE); CommandFormat c = new CommandFormat(1, Integer.MAX_VALUE);
List<String> parameters = c.parse(args, pos); List<String> parameters = c.parse(args, pos);
this.args = parameters.toArray(new String[parameters.size()]); this.args = parameters.toArray(new String[parameters.size()]);
@ -179,8 +186,8 @@ public class DFSAdmin extends FsShell {
private final long quota; // the quota to be set private final long quota; // the quota to be set
/** Constructor */ /** Constructor */
SetQuotaCommand(String[] args, int pos, FileSystem fs) { SetQuotaCommand(String[] args, int pos, Configuration conf) {
super(fs); super(conf);
CommandFormat c = new CommandFormat(2, Integer.MAX_VALUE); CommandFormat c = new CommandFormat(2, Integer.MAX_VALUE);
List<String> parameters = c.parse(args, pos); List<String> parameters = c.parse(args, pos);
this.quota = Long.parseLong(parameters.remove(0)); this.quota = Long.parseLong(parameters.remove(0));
@ -230,8 +237,8 @@ public class DFSAdmin extends FsShell {
private StorageType type; private StorageType type;
/** Constructor */ /** Constructor */
ClearSpaceQuotaCommand(String[] args, int pos, FileSystem fs) { ClearSpaceQuotaCommand(String[] args, int pos, Configuration conf) {
super(fs); super(conf);
CommandFormat c = new CommandFormat(1, Integer.MAX_VALUE); CommandFormat c = new CommandFormat(1, Integer.MAX_VALUE);
c.addOptionWithValue("storageType"); c.addOptionWithValue("storageType");
List<String> parameters = c.parse(args, pos); List<String> parameters = c.parse(args, pos);
@ -294,8 +301,8 @@ public class DFSAdmin extends FsShell {
private StorageType type; private StorageType type;
/** Constructor */ /** Constructor */
SetSpaceQuotaCommand(String[] args, int pos, FileSystem fs) { SetSpaceQuotaCommand(String[] args, int pos, Configuration conf) {
super(fs); super(conf);
CommandFormat c = new CommandFormat(2, Integer.MAX_VALUE); CommandFormat c = new CommandFormat(2, Integer.MAX_VALUE);
List<String> parameters = c.parse(args, pos); List<String> parameters = c.parse(args, pos);
String str = parameters.remove(0).trim(); String str = parameters.remove(0).trim();
@ -705,10 +712,11 @@ public class DFSAdmin extends FsShell {
* @param argv List of of command line parameters. * @param argv List of of command line parameters.
* @exception IOException * @exception IOException
*/ */
public void allowSnapshot(String[] argv) throws IOException { public void allowSnapshot(String[] argv) throws IOException {
DistributedFileSystem dfs = getDFS(); Path p = new Path(argv[1]);
final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), getConf());
try { try {
dfs.allowSnapshot(new Path(argv[1])); dfs.allowSnapshot(p);
} catch (SnapshotException e) { } catch (SnapshotException e) {
throw new RemoteException(e.getClass().getName(), e.getMessage()); throw new RemoteException(e.getClass().getName(), e.getMessage());
} }
@ -721,10 +729,11 @@ public class DFSAdmin extends FsShell {
* @param argv List of of command line parameters. * @param argv List of of command line parameters.
* @exception IOException * @exception IOException
*/ */
public void disallowSnapshot(String[] argv) throws IOException { public void disallowSnapshot(String[] argv) throws IOException {
DistributedFileSystem dfs = getDFS(); Path p = new Path(argv[1]);
final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), getConf());
try { try {
dfs.disallowSnapshot(new Path(argv[1])); dfs.disallowSnapshot(p);
} catch (SnapshotException e) { } catch (SnapshotException e) {
throw new RemoteException(e.getClass().getName(), e.getMessage()); throw new RemoteException(e.getClass().getName(), e.getMessage());
} }
@ -2042,13 +2051,13 @@ public class DFSAdmin extends FsShell {
} else if ("-metasave".equals(cmd)) { } else if ("-metasave".equals(cmd)) {
exitCode = metaSave(argv, i); exitCode = metaSave(argv, i);
} else if (ClearQuotaCommand.matches(cmd)) { } else if (ClearQuotaCommand.matches(cmd)) {
exitCode = new ClearQuotaCommand(argv, i, getDFS()).runAll(); exitCode = new ClearQuotaCommand(argv, i, getConf()).runAll();
} else if (SetQuotaCommand.matches(cmd)) { } else if (SetQuotaCommand.matches(cmd)) {
exitCode = new SetQuotaCommand(argv, i, getDFS()).runAll(); exitCode = new SetQuotaCommand(argv, i, getConf()).runAll();
} else if (ClearSpaceQuotaCommand.matches(cmd)) { } else if (ClearSpaceQuotaCommand.matches(cmd)) {
exitCode = new ClearSpaceQuotaCommand(argv, i, getDFS()).runAll(); exitCode = new ClearSpaceQuotaCommand(argv, i, getConf()).runAll();
} else if (SetSpaceQuotaCommand.matches(cmd)) { } else if (SetSpaceQuotaCommand.matches(cmd)) {
exitCode = new SetSpaceQuotaCommand(argv, i, getDFS()).runAll(); exitCode = new SetSpaceQuotaCommand(argv, i, getConf()).runAll();
} else if ("-refreshServiceAcl".equals(cmd)) { } else if ("-refreshServiceAcl".equals(cmd)) {
exitCode = refreshServiceAcl(); exitCode = refreshServiceAcl();
} else if ("-refreshUserToGroupsMappings".equals(cmd)) { } else if ("-refreshUserToGroupsMappings".equals(cmd)) {

View File

@ -20,10 +20,12 @@ package org.apache.hadoop.hdfs.tools.snapshot;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured; import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.Tool;
@ -41,6 +43,19 @@ import org.apache.hadoop.util.ToolRunner;
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class SnapshotDiff extends Configured implements Tool { public class SnapshotDiff extends Configured implements Tool {
/**
* Construct a SnapshotDiff object.
*/
public SnapshotDiff() {
this(new HdfsConfiguration());
}
/**
* Construct a SnapshotDiff object.
*/
public SnapshotDiff(Configuration conf) {
super(conf);
}
private static String getSnapshotName(String name) { private static String getSnapshotName(String name) {
if (Path.CUR_DIR.equals(name)) { // current directory if (Path.CUR_DIR.equals(name)) { // current directory
return ""; return "";
@ -72,8 +87,8 @@ public class SnapshotDiff extends Configured implements Tool {
System.err.println("Usage: \n" + description); System.err.println("Usage: \n" + description);
return 1; return 1;
} }
FileSystem fs = FileSystem.get(getConf()); FileSystem fs = FileSystem.get(new Path(argv[0]).toUri(), getConf());
if (! (fs instanceof DistributedFileSystem)) { if (! (fs instanceof DistributedFileSystem)) {
System.err.println( System.err.println(
"SnapshotDiff can only be used in DistributedFileSystem"); "SnapshotDiff can only be used in DistributedFileSystem");

View File

@ -1246,6 +1246,32 @@ public class TestQuota {
-1); -1);
} }
/**
* Test to all the commands by passing the fully qualified path.
*/
@Test(timeout = 30000)
public void testQuotaCommandsWithURI() throws Exception {
DFSAdmin dfsAdmin = new DFSAdmin(conf);
final Path dir = new Path("/" + this.getClass().getSimpleName(),
GenericTestUtils.getMethodName());
assertTrue(dfs.mkdirs(dir));
/* set space quota */
testSetAndClearSpaceQuotaRegularInternal(
new String[] { "-setSpaceQuota", "1024",
dfs.getUri() + "/" + dir.toString() }, dir, 0, 1024);
/* clear space quota */
testSetAndClearSpaceQuotaRegularInternal(
new String[] { "-clrSpaceQuota", dfs.getUri() + "/" + dir.toString() },
dir, 0, -1);
runCommand(dfsAdmin, false, "-setQuota", "1000",
dfs.getUri() + "/" + dir.toString());
runCommand(dfsAdmin, false, "-clrQuota",
dfs.getUri() + "/" + dir.toString());
}
private void testSetAndClearSpaceQuotaRegularInternal( private void testSetAndClearSpaceQuotaRegularInternal(
final String[] args, final String[] args,
final Path dir, final Path dir,

View File

@ -23,6 +23,8 @@ import java.io.IOException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff;
import org.junit.After; import org.junit.After;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.Before; import org.junit.Before;
@ -60,6 +62,7 @@ public class TestSnapshotCommands {
@Before @Before
public void setUp() throws IOException { public void setUp() throws IOException {
fs.mkdirs(new Path("/sub1")); fs.mkdirs(new Path("/sub1"));
fs.mkdirs(new Path("/Fully/QPath"));
fs.allowSnapshot(new Path("/sub1")); fs.allowSnapshot(new Path("/sub1"));
fs.mkdirs(new Path("/sub1/sub1sub1")); fs.mkdirs(new Path("/sub1/sub1sub1"));
fs.mkdirs(new Path("/sub1/sub1sub2")); fs.mkdirs(new Path("/sub1/sub1sub2"));
@ -161,4 +164,35 @@ public class TestSnapshotCommands {
// now it can be deleted // now it can be deleted
DFSTestUtil.FsShellRun("-rmr /sub1", conf); DFSTestUtil.FsShellRun("-rmr /sub1", conf);
} }
@Test (timeout=60000)
public void testSnapshotCommandsWithURI()throws Exception {
Configuration config = new HdfsConfiguration();
//fs.defaultFS should not be used, when path is fully qualified.
config.set("fs.defaultFS", "hdfs://127.0.0.1:1024");
String path = fs.getUri() + "/Fully/QPath";
DFSTestUtil.DFSAdminRun("-allowSnapshot " + path, 0,
"Allowing snaphot on " + path + " succeeded", config);
DFSTestUtil.FsShellRun("-createSnapshot " + path + " sn1", config);
// create file1
DFSTestUtil
.createFile(fs, new Path(fs.getUri() + "/Fully/QPath/File1"), 1024,
(short) 1, 100);
// create file2
DFSTestUtil
.createFile(fs, new Path(fs.getUri() + "/Fully/QPath/File2"), 1024,
(short) 1, 100);
DFSTestUtil.FsShellRun("-createSnapshot " + path + " sn2", config);
// verify the snapshotdiff using api and command line
SnapshotDiffReport report =
fs.getSnapshotDiffReport(new Path(path), "sn1", "sn2");
DFSTestUtil.toolRun(new SnapshotDiff(config), path + " sn1 sn2", 0,
report.toString());
DFSTestUtil.FsShellRun("-renameSnapshot " + path + " sn2 sn3", config);
DFSTestUtil.FsShellRun("-deleteSnapshot " + path + " sn1", config);
DFSTestUtil.FsShellRun("-deleteSnapshot " + path + " sn3", config);
DFSTestUtil.DFSAdminRun("-disallowSnapshot " + path, 0,
"Disallowing snaphot on " + path + " succeeded", config);
fs.delete(new Path("/Fully/QPath"), true);
}
} }