HDFS-3172. dfs.upgrade.permission is dead code. Contributed by Eli Collins

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1307888 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2012-03-31 19:56:23 +00:00
parent ff897e75c9
commit eeb687daa7
6 changed files with 2 additions and 19 deletions

View File

@ -86,8 +86,6 @@ Deprecated Properties
*---+---+
|dfs.socket.timeout | dfs.client.socket-timeout
*---+---+
|dfs.upgrade.permission | dfs.namenode.upgrade.permission
*---+---+
|dfs.write.packet.size | dfs.client-write-packet-size
*---+---+
|fs.checkpoint.dir | dfs.namenode.checkpoint.dir

View File

@ -286,6 +286,8 @@ Release 2.0.0 - UNRELEASED
HDFS-3158. LiveNodes member of NameNodeMXBean should list non-DFS used
space and capacity per DN. (atm)
HDFS-3172. dfs.upgrade.permission is dead code. (eli)
OPTIMIZATIONS
HDFS-3024. Improve performance of stringification in addStoredBlock (todd)

View File

@ -239,11 +239,6 @@ to the web server.</p>
<br />The name of the group of super-users.
</li>
<li><code>dfs.namenode.upgrade.permission = 0777</code>
<br />The choice of initial mode during upgrade. The <em>x</em> permission is <em>never</em> set for files.
For configuration files, the decimal value <em>511<sub>10</sub></em> may be used.
</li>
<li><code>fs.permissions.umask-mode = 022</code>
<br />The <code>umask</code> used when creating files and directories. For configuration files, the decimal
value <em>18<sub>10</sub></em> may be used.

View File

@ -107,8 +107,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final long DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT = 3600;
public static final String DFS_NAMENODE_CHECKPOINT_TXNS_KEY = "dfs.namenode.checkpoint.txns";
public static final long DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT = 40000;
public static final String DFS_NAMENODE_UPGRADE_PERMISSION_KEY = "dfs.namenode.upgrade.permission";
public static final int DFS_NAMENODE_UPGRADE_PERMISSION_DEFAULT = 00777;
public static final String DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY = "dfs.namenode.heartbeat.recheck-interval";
public static final int DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT = 5*60*1000;
public static final String DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.client.https.keystore.resource";

View File

@ -86,7 +86,6 @@ public class HdfsConfiguration extends Configuration {
deprecate("fs.checkpoint.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
deprecate("fs.checkpoint.edits.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY);
deprecate("fs.checkpoint.period", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY);
deprecate("dfs.upgrade.permission", DFSConfigKeys.DFS_NAMENODE_UPGRADE_PERMISSION_KEY);
deprecate("heartbeat.recheck.interval", DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY);
deprecate("dfs.https.client.keystore.resource", DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY);
deprecate("dfs.https.need.client.auth", DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY);

View File

@ -52,8 +52,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHO
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_UPGRADE_PERMISSION_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_UPGRADE_PERMISSION_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY;
@ -118,7 +116,6 @@ import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.ha.ServiceFailedException;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
@ -267,7 +264,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
private boolean persistBlocks;
private UserGroupInformation fsOwner;
private String supergroup;
private PermissionStatus defaultPermission;
private boolean standbyShouldCheckpoint;
// Scan interval is not configurable.
@ -846,11 +842,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
"must not be specified if HA is not enabled.");
}
short filePermission = (short)conf.getInt(DFS_NAMENODE_UPGRADE_PERMISSION_KEY,
DFS_NAMENODE_UPGRADE_PERMISSION_DEFAULT);
this.defaultPermission = PermissionStatus.createImmutable(
fsOwner.getShortUserName(), supergroup, new FsPermission(filePermission));
this.serverDefaults = new FsServerDefaults(
conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT),
conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT),