From 00cf207192ec1ac753405b8dd1ee33f30afe89a8 Mon Sep 17 00:00:00 2001 From: Uma Maheswara Rao G Date: Mon, 19 Jun 2017 17:16:49 -0700 Subject: [PATCH] HDFS-11670: [SPS]: Add CLI command for satisfy storage policy operations. Contributed by Surendra Singh Lilhore. --- .../hadoop/hdfs/tools/StoragePolicyAdmin.java | 93 ++++++++++++++++++- .../src/site/markdown/ArchivalStorage.md | 21 +++++ .../src/site/markdown/HDFSCommands.md | 2 + .../hdfs/tools/TestStoragePolicyCommands.java | 43 ++++++++- 4 files changed, 157 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java index aeb10d91ab9..662957cbd56 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java @@ -23,6 +23,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.BlockStoragePolicySpi; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -32,6 +33,8 @@ import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import java.io.FileNotFoundException; +import com.google.common.base.Joiner; + import java.io.IOException; import java.util.Arrays; import java.util.Collection; @@ -245,6 +248,92 @@ public class StoragePolicyAdmin extends Configured implements Tool { } } + /** Command to schedule blocks to move based on specified policy. */ + private static class SatisfyStoragePolicyCommand implements + AdminHelper.Command { + @Override + public String getName() { + return "-satisfyStoragePolicy"; + } + + @Override + public String getShortUsage() { + return "[" + getName() + " -path ]\n"; + } + + @Override + public String getLongUsage() { + TableListing listing = AdminHelper.getOptionDescriptionListing(); + listing.addRow("", "The path of the file/directory to satisfy" + + " storage policy"); + return getShortUsage() + "\n" + + "Schedule blocks to move based on file/directory policy.\n\n" + + listing.toString(); + } + + @Override + public int run(Configuration conf, List args) throws IOException { + final String path = StringUtils.popOptionWithArgument("-path", args); + if (path == null) { + System.err.println("Please specify the path for setting the storage " + + "policy.\nUsage: " + getLongUsage()); + return 1; + } + + final DistributedFileSystem dfs = AdminHelper.getDFS(conf); + try { + dfs.satisfyStoragePolicy(new Path(path)); + System.out.println("Scheduled blocks to move based on the current" + + " storage policy on " + path); + } catch (Exception e) { + System.err.println(AdminHelper.prettifyException(e)); + return 2; + } + return 0; + } + } + + /** Command to check storage policy satisfier status. */ + private static class IsSPSRunningCommand implements AdminHelper.Command { + @Override + public String getName() { + return "-isSPSRunning"; + } + + @Override + public String getShortUsage() { + return "[" + getName() + "]\n"; + } + + @Override + public String getLongUsage() { + return getShortUsage() + "\n" + + "Check the status of Storage Policy Statisfier.\n\n"; + } + + @Override + public int run(Configuration conf, List args) throws IOException { + if (!args.isEmpty()) { + System.err.print("Can't understand arguments: " + + Joiner.on(" ").join(args) + "\n"); + System.err.println("Usage is " + getLongUsage()); + return 1; + } + final DistributedFileSystem dfs = AdminHelper.getDFS(conf); + try { + if(dfs.getClient().isStoragePolicySatisfierRunning()){ + System.out.println("yes"); + }else{ + System.out.println("no"); + } + } catch (Exception e) { + System.err.println(AdminHelper.prettifyException(e)); + return 2; + } + return 0; + } + } + /* Command to unset the storage policy set for a file/directory */ private static class UnsetStoragePolicyCommand implements AdminHelper.Command { @@ -295,6 +384,8 @@ public class StoragePolicyAdmin extends Configured implements Tool { new ListStoragePoliciesCommand(), new SetStoragePolicyCommand(), new GetStoragePolicyCommand(), - new UnsetStoragePolicyCommand() + new UnsetStoragePolicyCommand(), + new SatisfyStoragePolicyCommand(), + new IsSPSRunningCommand() }; } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md index 3c49cb1666c..a56cf8ba384 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md @@ -171,5 +171,26 @@ Get the storage policy of a file or a directory. |:---- |:---- | | `-path ` | The path referring to either a directory or a file. | +### Satisfy Storage Policy + +Schedule blocks to move based on file/directory policy. This command applicable only to the given path and its immediate children. Sub-directories won't be considered for satisfying the policy. + +* Command: + + hdfs storagepolicies -satisfyStoragePolicy -path + +* Arguments: + +| | | +|:---- |:---- | +| `-path ` | The path referring to either a directory or a file. | + +### SPS Running Status + +Check the running status of Storage Policy Satisfier in namenode. If it is running, return 'yes'. Otherwise return 'no'. + +* Command: + + hdfs storagepolicies -isSPSRunning diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md index 391b71b2082..8234930a2ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md @@ -615,6 +615,8 @@ Usage: [-setStoragePolicy -path -policy ] [-getStoragePolicy -path ] [-unsetStoragePolicy -path ] + [-satisfyStoragePolicy -path ] + [-isSPSRunning] [-help ] Lists out all/Gets/sets/unsets storage policies. See the [HDFS Storage Policy Documentation](./ArchivalStorage.html) for more information. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java index f31c7399503..59f9083f2ad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java @@ -23,6 +23,8 @@ import java.net.URISyntaxException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -46,7 +48,10 @@ public class TestStoragePolicyCommands { @Before public void clusterSetUp() throws IOException, URISyntaxException { conf = new HdfsConfiguration(); - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL).build(); + StorageType[][] newtypes = new StorageType[][] { + {StorageType.ARCHIVE, StorageType.DISK}}; + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL) + .storageTypes(newtypes).build(); cluster.waitActive(); fs = cluster.getFileSystem(); } @@ -158,4 +163,40 @@ public class TestStoragePolicyCommands { DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /fooz", 2, "File/Directory does not exist: /fooz"); } + + @Test + public void testStoragePolicySatisfierCommand() throws Exception { + final String file = "/testStoragePolicySatisfierCommand"; + DFSTestUtil.createFile(fs, new Path(file), SIZE, REPL, 0); + + final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf); + DFSTestUtil.toolRun(admin, "-getStoragePolicy -path " + file, 0, + "The storage policy of " + file + " is unspecified"); + + DFSTestUtil.toolRun(admin, + "-setStoragePolicy -path " + file + " -policy COLD", 0, + "Set storage policy COLD on " + file.toString()); + + DFSTestUtil.toolRun(admin, "-satisfyStoragePolicy -path " + file, 0, + "Scheduled blocks to move based on the current storage policy on " + + file.toString()); + + DFSTestUtil.waitExpectedStorageType(file, StorageType.ARCHIVE, 1, 30000, + fs); + } + + @Test + public void testIsSPSRunningCommand() throws Exception { + final String file = "/testIsSPSRunningCommand"; + DFSTestUtil.createFile(fs, new Path(file), SIZE, REPL, 0); + final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf); + DFSTestUtil.toolRun(admin, "-isSPSRunning", 0, "yes"); + cluster.getNameNode().reconfigureProperty( + DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, "false"); + cluster.waitActive(); + DFSTestUtil.toolRun(admin, "-isSPSRunning", 0, "no"); + // Test with unnecessary args + DFSTestUtil.toolRun(admin, "-isSPSRunning status", 1, + "Can't understand arguments: "); + } }