HDFS-11670: [SPS]: Add CLI command for satisfy storage policy operations. Contributed by Surendra Singh Lilhore.

This commit is contained in:
Uma Maheswara Rao G 2017-06-19 17:16:49 -07:00 committed by Uma Maheswara Rao Gangumalla
parent 46f7523eb8
commit 00cf207192
4 changed files with 157 additions and 2 deletions

View File

@ -23,6 +23,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.BlockStoragePolicySpi;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@ -32,6 +33,8 @@ import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import java.io.FileNotFoundException;
import com.google.common.base.Joiner;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
@ -245,6 +248,92 @@ public class StoragePolicyAdmin extends Configured implements Tool {
}
}
/** Command to schedule blocks to move based on specified policy. */
private static class SatisfyStoragePolicyCommand implements
AdminHelper.Command {
@Override
public String getName() {
return "-satisfyStoragePolicy";
}
@Override
public String getShortUsage() {
return "[" + getName() + " -path <path>]\n";
}
@Override
public String getLongUsage() {
TableListing listing = AdminHelper.getOptionDescriptionListing();
listing.addRow("<path>", "The path of the file/directory to satisfy"
+ " storage policy");
return getShortUsage() + "\n" +
"Schedule blocks to move based on file/directory policy.\n\n" +
listing.toString();
}
@Override
public int run(Configuration conf, List<String> args) throws IOException {
final String path = StringUtils.popOptionWithArgument("-path", args);
if (path == null) {
System.err.println("Please specify the path for setting the storage " +
"policy.\nUsage: " + getLongUsage());
return 1;
}
final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
try {
dfs.satisfyStoragePolicy(new Path(path));
System.out.println("Scheduled blocks to move based on the current"
+ " storage policy on " + path);
} catch (Exception e) {
System.err.println(AdminHelper.prettifyException(e));
return 2;
}
return 0;
}
}
/** Command to check storage policy satisfier status. */
private static class IsSPSRunningCommand implements AdminHelper.Command {
@Override
public String getName() {
return "-isSPSRunning";
}
@Override
public String getShortUsage() {
return "[" + getName() + "]\n";
}
@Override
public String getLongUsage() {
return getShortUsage() + "\n" +
"Check the status of Storage Policy Statisfier.\n\n";
}
@Override
public int run(Configuration conf, List<String> args) throws IOException {
if (!args.isEmpty()) {
System.err.print("Can't understand arguments: "
+ Joiner.on(" ").join(args) + "\n");
System.err.println("Usage is " + getLongUsage());
return 1;
}
final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
try {
if(dfs.getClient().isStoragePolicySatisfierRunning()){
System.out.println("yes");
}else{
System.out.println("no");
}
} catch (Exception e) {
System.err.println(AdminHelper.prettifyException(e));
return 2;
}
return 0;
}
}
/* Command to unset the storage policy set for a file/directory */
private static class UnsetStoragePolicyCommand
implements AdminHelper.Command {
@ -295,6 +384,8 @@ public class StoragePolicyAdmin extends Configured implements Tool {
new ListStoragePoliciesCommand(),
new SetStoragePolicyCommand(),
new GetStoragePolicyCommand(),
new UnsetStoragePolicyCommand()
new UnsetStoragePolicyCommand(),
new SatisfyStoragePolicyCommand(),
new IsSPSRunningCommand()
};
}

View File

@ -171,5 +171,26 @@ Get the storage policy of a file or a directory.
|:---- |:---- |
| `-path <path>` | The path referring to either a directory or a file. |
### Satisfy Storage Policy
Schedule blocks to move based on file/directory policy. This command applicable only to the given path and its immediate children. Sub-directories won't be considered for satisfying the policy.
* Command:
hdfs storagepolicies -satisfyStoragePolicy -path <path>
* Arguments:
| | |
|:---- |:---- |
| `-path <path>` | The path referring to either a directory or a file. |
### SPS Running Status
Check the running status of Storage Policy Satisfier in namenode. If it is running, return 'yes'. Otherwise return 'no'.
* Command:
hdfs storagepolicies -isSPSRunning

View File

@ -615,6 +615,8 @@ Usage:
[-setStoragePolicy -path <path> -policy <policy>]
[-getStoragePolicy -path <path>]
[-unsetStoragePolicy -path <path>]
[-satisfyStoragePolicy -path <path>]
[-isSPSRunning]
[-help <command-name>]
Lists out all/Gets/sets/unsets storage policies. See the [HDFS Storage Policy Documentation](./ArchivalStorage.html) for more information.

View File

@ -23,6 +23,8 @@ import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@ -46,7 +48,10 @@ public class TestStoragePolicyCommands {
@Before
public void clusterSetUp() throws IOException, URISyntaxException {
conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL).build();
StorageType[][] newtypes = new StorageType[][] {
{StorageType.ARCHIVE, StorageType.DISK}};
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL)
.storageTypes(newtypes).build();
cluster.waitActive();
fs = cluster.getFileSystem();
}
@ -158,4 +163,40 @@ public class TestStoragePolicyCommands {
DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /fooz", 2,
"File/Directory does not exist: /fooz");
}
@Test
public void testStoragePolicySatisfierCommand() throws Exception {
final String file = "/testStoragePolicySatisfierCommand";
DFSTestUtil.createFile(fs, new Path(file), SIZE, REPL, 0);
final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf);
DFSTestUtil.toolRun(admin, "-getStoragePolicy -path " + file, 0,
"The storage policy of " + file + " is unspecified");
DFSTestUtil.toolRun(admin,
"-setStoragePolicy -path " + file + " -policy COLD", 0,
"Set storage policy COLD on " + file.toString());
DFSTestUtil.toolRun(admin, "-satisfyStoragePolicy -path " + file, 0,
"Scheduled blocks to move based on the current storage policy on "
+ file.toString());
DFSTestUtil.waitExpectedStorageType(file, StorageType.ARCHIVE, 1, 30000,
fs);
}
@Test
public void testIsSPSRunningCommand() throws Exception {
final String file = "/testIsSPSRunningCommand";
DFSTestUtil.createFile(fs, new Path(file), SIZE, REPL, 0);
final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf);
DFSTestUtil.toolRun(admin, "-isSPSRunning", 0, "yes");
cluster.getNameNode().reconfigureProperty(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, "false");
cluster.waitActive();
DFSTestUtil.toolRun(admin, "-isSPSRunning", 0, "no");
// Test with unnecessary args
DFSTestUtil.toolRun(admin, "-isSPSRunning status", 1,
"Can't understand arguments: ");
}
}