Merge branch 'trunk' into HDFS-6581
This commit is contained in:
commit
9a53c3699b
|
@ -512,6 +512,10 @@ Release 2.6.0 - UNRELEASED
|
||||||
HDFS-6956. Allow dynamically changing the tracing level in Hadoop servers
|
HDFS-6956. Allow dynamically changing the tracing level in Hadoop servers
|
||||||
(cmccabe)
|
(cmccabe)
|
||||||
|
|
||||||
|
HDFS-7156. Update fsck documentation. (Masahiro Yamaguch via shv)
|
||||||
|
|
||||||
|
HDFS-7093. Add config key to restrict setStoragePolicy. (Arpit Agarwal)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-6690. Deduplicate xattr names in memory. (wang)
|
HDFS-6690. Deduplicate xattr names in memory. (wang)
|
||||||
|
|
|
@ -575,6 +575,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||||
public static final String DFS_DOMAIN_SOCKET_PATH_KEY = "dfs.domain.socket.path";
|
public static final String DFS_DOMAIN_SOCKET_PATH_KEY = "dfs.domain.socket.path";
|
||||||
public static final String DFS_DOMAIN_SOCKET_PATH_DEFAULT = "";
|
public static final String DFS_DOMAIN_SOCKET_PATH_DEFAULT = "";
|
||||||
|
|
||||||
|
public static final String DFS_STORAGE_POLICY_ENABLED_KEY = "dfs.storage.policy.enabled";
|
||||||
|
public static final boolean DFS_STORAGE_POLICY_ENABLED_DEFAULT = true;
|
||||||
|
|
||||||
// HA related configuration
|
// HA related configuration
|
||||||
public static final String DFS_HA_NAMENODES_KEY_PREFIX = "dfs.ha.namenodes";
|
public static final String DFS_HA_NAMENODES_KEY_PREFIX = "dfs.ha.namenodes";
|
||||||
public static final String DFS_HA_NAMENODE_ID_KEY = "dfs.ha.namenode.id";
|
public static final String DFS_HA_NAMENODE_ID_KEY = "dfs.ha.namenode.id";
|
||||||
|
|
|
@ -86,6 +86,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROU
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER;
|
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER;
|
||||||
import static org.apache.hadoop.util.Time.now;
|
import static org.apache.hadoop.util.Time.now;
|
||||||
|
|
||||||
|
@ -423,6 +425,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
private final CacheManager cacheManager;
|
private final CacheManager cacheManager;
|
||||||
private final DatanodeStatistics datanodeStatistics;
|
private final DatanodeStatistics datanodeStatistics;
|
||||||
|
|
||||||
|
// whether setStoragePolicy is allowed.
|
||||||
|
private final boolean isStoragePolicyEnabled;
|
||||||
|
|
||||||
private String nameserviceId;
|
private String nameserviceId;
|
||||||
|
|
||||||
private RollingUpgradeInfo rollingUpgradeInfo = null;
|
private RollingUpgradeInfo rollingUpgradeInfo = null;
|
||||||
|
@ -794,6 +799,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
this.datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics();
|
this.datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics();
|
||||||
this.blockIdGenerator = new SequentialBlockIdGenerator(this.blockManager);
|
this.blockIdGenerator = new SequentialBlockIdGenerator(this.blockManager);
|
||||||
|
|
||||||
|
this.isStoragePolicyEnabled =
|
||||||
|
conf.getBoolean(DFS_STORAGE_POLICY_ENABLED_KEY,
|
||||||
|
DFS_STORAGE_POLICY_ENABLED_DEFAULT);
|
||||||
|
|
||||||
this.fsOwner = UserGroupInformation.getCurrentUser();
|
this.fsOwner = UserGroupInformation.getCurrentUser();
|
||||||
this.fsOwnerShortUserName = fsOwner.getShortUserName();
|
this.fsOwnerShortUserName = fsOwner.getShortUserName();
|
||||||
this.supergroup = conf.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY,
|
this.supergroup = conf.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY,
|
||||||
|
@ -2305,8 +2314,17 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
}
|
}
|
||||||
|
|
||||||
private void setStoragePolicyInt(String src, final String policyName)
|
private void setStoragePolicyInt(String src, final String policyName)
|
||||||
throws IOException {
|
throws IOException, UnresolvedLinkException, AccessControlException {
|
||||||
checkSuperuserPrivilege();
|
|
||||||
|
if (!isStoragePolicyEnabled) {
|
||||||
|
throw new IOException("Failed to set storage policy since "
|
||||||
|
+ DFS_STORAGE_POLICY_ENABLED_KEY + " is set to false.");
|
||||||
|
}
|
||||||
|
FSPermissionChecker pc = null;
|
||||||
|
if (isPermissionEnabled) {
|
||||||
|
pc = getPermissionChecker();
|
||||||
|
}
|
||||||
|
|
||||||
checkOperation(OperationCategory.WRITE);
|
checkOperation(OperationCategory.WRITE);
|
||||||
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
|
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
|
||||||
waitForLoadingFSImage();
|
waitForLoadingFSImage();
|
||||||
|
@ -2315,6 +2333,12 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
try {
|
try {
|
||||||
checkOperation(OperationCategory.WRITE);
|
checkOperation(OperationCategory.WRITE);
|
||||||
checkNameNodeSafeMode("Cannot set storage policy for " + src);
|
checkNameNodeSafeMode("Cannot set storage policy for " + src);
|
||||||
|
|
||||||
|
if (pc != null) {
|
||||||
|
checkPermission(pc, src, false, null, null, FsAction.WRITE, null,
|
||||||
|
false, true);
|
||||||
|
}
|
||||||
|
|
||||||
src = FSDirectory.resolvePath(src, pathComponents, dir);
|
src = FSDirectory.resolvePath(src, pathComponents, dir);
|
||||||
|
|
||||||
// get the corresponding policy and make sure the policy name is valid
|
// get the corresponding policy and make sure the policy name is valid
|
||||||
|
|
|
@ -77,7 +77,8 @@ public class DFSck extends Configured implements Tool {
|
||||||
private static final String USAGE = "Usage: DFSck <path> "
|
private static final String USAGE = "Usage: DFSck <path> "
|
||||||
+ "[-list-corruptfileblocks | "
|
+ "[-list-corruptfileblocks | "
|
||||||
+ "[-move | -delete | -openforwrite] "
|
+ "[-move | -delete | -openforwrite] "
|
||||||
+ "[-files [-blocks [-locations | -racks]]]] [-showprogress]\n"
|
+ "[-files [-blocks [-locations | -racks]]]] "
|
||||||
|
+ "[-includeSnapshots] [-showprogress]\n"
|
||||||
+ "\t<path>\tstart checking from this path\n"
|
+ "\t<path>\tstart checking from this path\n"
|
||||||
+ "\t-move\tmove corrupted files to /lost+found\n"
|
+ "\t-move\tmove corrupted files to /lost+found\n"
|
||||||
+ "\t-delete\tdelete corrupted files\n"
|
+ "\t-delete\tdelete corrupted files\n"
|
||||||
|
|
|
@ -2142,4 +2142,12 @@
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>dfs.storage.policy.enabled</name>
|
||||||
|
<value>true</value>
|
||||||
|
<description>
|
||||||
|
Allow users to change the storage policy on files and directories.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
|
||||||
</configuration>
|
</configuration>
|
||||||
|
|
|
@ -82,32 +82,40 @@ HDFS Commands Guide
|
||||||
See {{{./HdfsUserGuide.html#fsck}fsck}} for more info.
|
See {{{./HdfsUserGuide.html#fsck}fsck}} for more info.
|
||||||
|
|
||||||
Usage: <<<hdfs fsck [GENERIC_OPTIONS] <path>
|
Usage: <<<hdfs fsck [GENERIC_OPTIONS] <path>
|
||||||
|
[-list-corruptfileblocks |
|
||||||
[-move | -delete | -openforwrite]
|
[-move | -delete | -openforwrite]
|
||||||
[-files [-blocks [-locations | -racks]]]
|
[-files [-blocks [-locations | -racks]]]
|
||||||
[-showprogress]>>>
|
[-includeSnapshots] [-showprogress]>>>
|
||||||
|
|
||||||
*------------------+---------------------------------------------+
|
*------------------------+---------------------------------------------+
|
||||||
|| COMMAND_OPTION || Description
|
|| COMMAND_OPTION || Description
|
||||||
*------------------+---------------------------------------------+
|
*------------------------+---------------------------------------------+
|
||||||
| <path> | Start checking from this path.
|
| <path> | Start checking from this path.
|
||||||
*------------------+---------------------------------------------+
|
*------------------------+---------------------------------------------+
|
||||||
| -move | Move corrupted files to /lost+found
|
| -move | Move corrupted files to /lost+found.
|
||||||
*------------------+---------------------------------------------+
|
*------------------------+---------------------------------------------+
|
||||||
| -delete | Delete corrupted files.
|
| -delete | Delete corrupted files.
|
||||||
*------------------+---------------------------------------------+
|
*------------------------+---------------------------------------------+
|
||||||
| -openforwrite | Print out files opened for write.
|
| -files | Print out files being checked.
|
||||||
*------------------+---------------------------------------------+
|
*------------------------+---------------------------------------------+
|
||||||
| -files | Print out files being checked.
|
| -openforwrite | Print out files opened for write.
|
||||||
*------------------+---------------------------------------------+
|
*------------------------+---------------------------------------------+
|
||||||
| -blocks | Print out block report.
|
| | Include snapshot data if the given path
|
||||||
*------------------+---------------------------------------------+
|
| -includeSnapshots | indicates a snapshottable directory or
|
||||||
| -locations | Print out locations for every block.
|
| | there are snapshottable directories under it.
|
||||||
*------------------+---------------------------------------------+
|
*------------------------+---------------------------------------------+
|
||||||
| -racks | Print out network topology for data-node locations.
|
| -list-corruptfileblocks| Print out list of missing blocks and
|
||||||
*------------------+---------------------------------------------+
|
| | files they belong to.
|
||||||
| -showprogress | Print out dots for progress in output. Default is OFF
|
*------------------------+---------------------------------------------+
|
||||||
| | (no progress).
|
| -blocks | Print out block report.
|
||||||
*------------------+---------------------------------------------+
|
*------------------------+---------------------------------------------+
|
||||||
|
| -locations | Print out locations for every block.
|
||||||
|
*------------------------+---------------------------------------------+
|
||||||
|
| -racks | Print out network topology for data-node locations.
|
||||||
|
*------------------------+---------------------------------------------+
|
||||||
|
| -showprogress | Print out dots for progress in output. Default is OFF
|
||||||
|
| | (no progress).
|
||||||
|
*------------------------+---------------------------------------------+
|
||||||
|
|
||||||
* Administration Commands
|
* Administration Commands
|
||||||
|
|
||||||
|
|
|
@ -21,6 +21,7 @@ import static org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySu
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
|
import java.io.IOException;
|
||||||
import java.util.*;
|
import java.util.*;
|
||||||
|
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
|
@ -69,6 +70,40 @@ public class TestBlockStoragePolicy {
|
||||||
static final byte WARM = (byte) 8;
|
static final byte WARM = (byte) 8;
|
||||||
static final byte HOT = (byte) 12;
|
static final byte HOT = (byte) 12;
|
||||||
|
|
||||||
|
|
||||||
|
@Test (timeout=300000)
|
||||||
|
public void testConfigKeyEnabled() throws IOException {
|
||||||
|
Configuration conf = new HdfsConfiguration();
|
||||||
|
conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, true);
|
||||||
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
||||||
|
.numDataNodes(1).build();
|
||||||
|
try {
|
||||||
|
cluster.waitActive();
|
||||||
|
cluster.getFileSystem().setStoragePolicy(new Path("/"), "COLD");
|
||||||
|
} finally {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ensure that setStoragePolicy throws IOException when
|
||||||
|
* dfs.storage.policy.enabled is set to false.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
@Test (timeout=300000, expected=IOException.class)
|
||||||
|
public void testConfigKeyDisabled() throws IOException {
|
||||||
|
Configuration conf = new HdfsConfiguration();
|
||||||
|
conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, false);
|
||||||
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
||||||
|
.numDataNodes(1).build();
|
||||||
|
try {
|
||||||
|
cluster.waitActive();
|
||||||
|
cluster.getFileSystem().setStoragePolicy(new Path("/"), "COLD");
|
||||||
|
} finally {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testDefaultPolicies() {
|
public void testDefaultPolicies() {
|
||||||
final Map<Byte, String> expectedPolicyStrings = new HashMap<Byte, String>();
|
final Map<Byte, String> expectedPolicyStrings = new HashMap<Byte, String>();
|
||||||
|
|
Loading…
Reference in New Issue