, RetryPolicy>();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index ef5ad425c90..68f86169418 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -49,9 +49,9 @@ import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -108,7 +108,7 @@ public class DistributedFileSystem extends FileSystem {
InetSocketAddress namenode = NameNode.getAddress(uri.getAuthority());
this.dfs = new DFSClient(namenode, conf, statistics);
- this.uri = URI.create(FSConstants.HDFS_URI_SCHEME + "://" + uri.getAuthority());
+ this.uri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" + uri.getAuthority());
this.workingDir = getHomeDirectory();
}
@@ -642,9 +642,9 @@ public class DistributedFileSystem extends FileSystem {
* Enter, leave or get safe mode.
*
* @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setSafeMode(
- * FSConstants.SafeModeAction)
+ * HdfsConstants.SafeModeAction)
*/
- public boolean setSafeMode(FSConstants.SafeModeAction action)
+ public boolean setSafeMode(HdfsConstants.SafeModeAction action)
throws IOException {
return dfs.setSafeMode(action);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java
index ba26ad2c249..35d45bac322 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java
@@ -30,7 +30,7 @@ import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.StringUtils;
@@ -162,7 +162,7 @@ class LeaseRenewer {
/** The time in milliseconds that the map became empty. */
private long emptyTime = Long.MAX_VALUE;
/** A fixed lease renewal time period in milliseconds */
- private long renewal = FSConstants.LEASE_SOFTLIMIT_PERIOD/2;
+ private long renewal = HdfsConstants.LEASE_SOFTLIMIT_PERIOD/2;
/** A daemon for renewing lease */
private Daemon daemon = null;
@@ -352,7 +352,7 @@ class LeaseRenewer {
//update renewal time
if (renewal == dfsc.getHdfsTimeout()/2) {
- long min = FSConstants.LEASE_SOFTLIMIT_PERIOD;
+ long min = HdfsConstants.LEASE_SOFTLIMIT_PERIOD;
for(DFSClient c : dfsclients) {
if (c.getHdfsTimeout() > 0) {
final long timeout = c.getHdfsTimeout();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
index 51311f52161..0be0bb9fb9f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
@@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatus
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.token.Token;
@@ -394,7 +394,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
throws IOException {
// in and out will be closed when sock is closed (by the caller)
final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
- NetUtils.getOutputStream(sock, HdfsConstants.WRITE_TIMEOUT)));
+ NetUtils.getOutputStream(sock, HdfsServerConstants.WRITE_TIMEOUT)));
new Sender(out).readBlock(block, blockToken, clientName, startOffset, len);
//
@@ -486,7 +486,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
void sendReadResult(Socket sock, Status statusCode) {
assert !sentStatusCode : "already sent status code to " + sock;
try {
- OutputStream out = NetUtils.getOutputStream(sock, HdfsConstants.WRITE_TIMEOUT);
+ OutputStream out = NetUtils.getOutputStream(sock, HdfsServerConstants.WRITE_TIMEOUT);
ClientReadStatusProto.newBuilder()
.setStatus(statusCode)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
index 165096f24bb..e1006a65d43 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
@@ -22,7 +22,7 @@ import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 4153110ca9f..e2ecbaa46dc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
@@ -578,7 +578,7 @@ public interface ClientProtocol extends VersionedProtocol {
* Return live datanodes if type is LIVE; dead datanodes if type is DEAD;
* otherwise all datanodes if type is ALL.
*/
- public DatanodeInfo[] getDatanodeReport(FSConstants.DatanodeReportType type)
+ public DatanodeInfo[] getDatanodeReport(HdfsConstants.DatanodeReportType type)
throws IOException;
/**
@@ -601,7 +601,7 @@ public interface ClientProtocol extends VersionedProtocol {
*
* Safe mode is entered automatically at name node startup.
* Safe mode can also be entered manually using
- * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}.
+ * {@link #setSafeMode(HdfsConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}.
*
* At startup the name node accepts data node reports collecting
* information about block locations.
@@ -617,11 +617,11 @@ public interface ClientProtocol extends VersionedProtocol {
* Then the name node leaves safe mode.
*
* If safe mode is turned on manually using
- * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_ENTER)}
+ * {@link #setSafeMode(HdfsConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_ENTER)}
* then the name node stays in safe mode until it is manually turned off
- * using {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_LEAVE)}.
+ * using {@link #setSafeMode(HdfsConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_LEAVE)}.
* Current state of the name node can be verified using
- * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}
+ * {@link #setSafeMode(HdfsConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}
*
Configuration parameters:
* dfs.safemode.threshold.pct is the threshold parameter.
* dfs.safemode.extension is the safe mode extension parameter.
@@ -644,7 +644,7 @@ public interface ClientProtocol extends VersionedProtocol {
*
* @throws IOException
*/
- public boolean setSafeMode(FSConstants.SafeModeAction action)
+ public boolean setSafeMode(HdfsConstants.SafeModeAction action)
throws IOException;
/**
@@ -685,7 +685,7 @@ public interface ClientProtocol extends VersionedProtocol {
/**
* Report distributed upgrade progress or force current upgrade to proceed.
*
- * @param action {@link FSConstants.UpgradeAction} to perform
+ * @param action {@link HdfsConstants.UpgradeAction} to perform
* @return upgrade status information or null if no upgrades are in progress
* @throws IOException
*/
@@ -777,8 +777,8 @@ public interface ClientProtocol extends VersionedProtocol {
*
*
* The quota can have three types of values : (1) 0 or more will set
- * the quota to that value, (2) {@link FSConstants#QUOTA_DONT_SET} implies
- * the quota will not be changed, and (3) {@link FSConstants#QUOTA_RESET}
+ * the quota to that value, (2) {@link HdfsConstants#QUOTA_DONT_SET} implies
+ * the quota will not be changed, and (3) {@link HdfsConstants#QUOTA_RESET}
* implies the quota will be reset. Any other value is a runtime error.
*
* @throws AccessControlException permission denied
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
index 260cd7600b8..2f224409f94 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
@@ -55,15 +55,15 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
@@ -306,13 +306,13 @@ public class Balancer {
DataInputStream in = null;
try {
sock.connect(NetUtils.createSocketAddr(
- target.datanode.getName()), HdfsConstants.READ_TIMEOUT);
+ target.datanode.getName()), HdfsServerConstants.READ_TIMEOUT);
sock.setKeepAlive(true);
out = new DataOutputStream( new BufferedOutputStream(
- sock.getOutputStream(), FSConstants.IO_FILE_BUFFER_SIZE));
+ sock.getOutputStream(), HdfsConstants.IO_FILE_BUFFER_SIZE));
sendRequest(out);
in = new DataInputStream( new BufferedInputStream(
- sock.getInputStream(), FSConstants.IO_FILE_BUFFER_SIZE));
+ sock.getInputStream(), HdfsConstants.IO_FILE_BUFFER_SIZE));
receiveResponse(in);
bytesMoved.inc(block.getNumBytes());
LOG.info( "Moving block " + block.getBlock().getBlockId() +
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index e7c160af251..293d5c59699 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.util.LightWeightGSet;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
index c3f676e3d2d..29565ace47d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
@@ -22,8 +22,8 @@ import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 4e45449b1fc..682d272922b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -50,8 +50,8 @@ import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 1b483a75373..6455b579a6a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo;
@@ -439,7 +439,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
long remaining = node.getRemaining() -
(node.getBlocksScheduled() * blockSize);
// check the remaining capacity of the target machine
- if (blockSize* FSConstants.MIN_BLOCKS_FOR_WRITE>remaining) {
+ if (blockSize* HdfsConstants.MIN_BLOCKS_FOR_WRITE>remaining) {
if(LOG.isDebugEnabled()) {
threadLocalBuilder.get().append(node.toString()).append(": ")
.append("Node ").append(NodeBase.getPath(node))
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index a069d761f3a..e0c2de955a7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -45,7 +45,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java
index 990b089fd7f..5f0b2604b0f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java
@@ -21,7 +21,7 @@ import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
/**
* The exception is thrown when external version does not match
@@ -34,7 +34,7 @@ public class IncorrectVersionException extends IOException {
private static final long serialVersionUID = 1L;
public IncorrectVersionException(int versionReported, String ofWhat) {
- this(versionReported, ofWhat, FSConstants.LAYOUT_VERSION);
+ this(versionReported, ofWhat, HdfsConstants.LAYOUT_VERSION);
}
public IncorrectVersionException(int versionReported,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
index 81f182d6cab..6e220d6bd2b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
@@ -166,8 +166,8 @@ public class JspHelper {
try {
s = new Socket();
- s.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
- s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
+ s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
+ s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
} catch (IOException e) {
deadNodes.add(chosenNode);
s.close();
@@ -188,8 +188,8 @@ public class JspHelper {
JspWriter out, Configuration conf) throws IOException {
if (chunkSizeToView == 0) return;
Socket s = new Socket();
- s.connect(addr, HdfsConstants.READ_TIMEOUT);
- s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
+ s.connect(addr, HdfsServerConstants.READ_TIMEOUT);
+ s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
long amtToRead = Math.min(chunkSizeToView, blockSize - offsetIntoBlock);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index 19ad35bb9a7..4c11973d4ae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -32,11 +32,11 @@ import java.util.Properties;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.util.VersionInfo;
@@ -434,10 +434,10 @@ public abstract class Storage extends StorageInfo {
this.lock(); // lock storage if it exists
- if (startOpt == HdfsConstants.StartupOption.FORMAT)
+ if (startOpt == HdfsServerConstants.StartupOption.FORMAT)
return StorageState.NOT_FORMATTED;
- if (startOpt != HdfsConstants.StartupOption.IMPORT) {
+ if (startOpt != HdfsServerConstants.StartupOption.IMPORT) {
storage.checkOldLayoutStorage(this);
}
@@ -866,7 +866,7 @@ public abstract class Storage extends StorageInfo {
* @throws IOException
*/
public void writeAll() throws IOException {
- this.layoutVersion = FSConstants.LAYOUT_VERSION;
+ this.layoutVersion = HdfsConstants.LAYOUT_VERSION;
for (Iterator it = storageDirs.iterator(); it.hasNext();) {
writeProperties(it.next());
}
@@ -938,7 +938,7 @@ public abstract class Storage extends StorageInfo {
protected void setLayoutVersion(Properties props, StorageDirectory sd)
throws IncorrectVersionException, InconsistentFSStateException {
int lv = Integer.parseInt(getProperty(props, sd, "layoutVersion"));
- if (lv < FSConstants.LAYOUT_VERSION) { // future version
+ if (lv < HdfsConstants.LAYOUT_VERSION) { // future version
throw new IncorrectVersionException(lv, "storage directory "
+ sd.root.getAbsolutePath());
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeManager.java
index 911dd407d4a..405006bfb18 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeManager.java
@@ -21,7 +21,7 @@ import java.io.IOException;
import java.util.SortedSet;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
/**
@@ -69,7 +69,7 @@ public abstract class UpgradeManager {
currentUpgrades = getDistributedUpgrades();
if(currentUpgrades == null) {
// set new upgrade state
- setUpgradeState(false, FSConstants.LAYOUT_VERSION);
+ setUpgradeState(false, HdfsConstants.LAYOUT_VERSION);
return false;
}
Upgradeable curUO = currentUpgrades.first();
@@ -85,7 +85,7 @@ public abstract class UpgradeManager {
return false;
}
- public abstract HdfsConstants.NodeType getType();
+ public abstract HdfsServerConstants.NodeType getType();
public abstract boolean startUpgrade() throws IOException;
public abstract void completeUpgrade() throws IOException;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java
index c2558bed32c..b59ef965d72 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java
@@ -22,7 +22,7 @@ import java.util.SortedSet;
import java.util.TreeSet;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.util.StringUtils;
/**
@@ -40,7 +40,7 @@ public class UpgradeObjectCollection {
static class UOSignature implements Comparable {
int version;
- HdfsConstants.NodeType type;
+ HdfsServerConstants.NodeType type;
String className;
UOSignature(Upgradeable uo) {
@@ -53,7 +53,7 @@ public class UpgradeObjectCollection {
return version;
}
- HdfsConstants.NodeType getType() {
+ HdfsServerConstants.NodeType getType() {
return type;
}
@@ -111,13 +111,13 @@ public class UpgradeObjectCollection {
}
public static SortedSet getDistributedUpgrades(int versionFrom,
- HdfsConstants.NodeType type
+ HdfsServerConstants.NodeType type
) throws IOException {
- assert FSConstants.LAYOUT_VERSION <= versionFrom : "Incorrect version "
- + versionFrom + ". Expected to be <= " + FSConstants.LAYOUT_VERSION;
+ assert HdfsConstants.LAYOUT_VERSION <= versionFrom : "Incorrect version "
+ + versionFrom + ". Expected to be <= " + HdfsConstants.LAYOUT_VERSION;
SortedSet upgradeObjects = new TreeSet();
for(UOSignature sig : upgradeTable) {
- if(sig.getVersion() < FSConstants.LAYOUT_VERSION)
+ if(sig.getVersion() < HdfsConstants.LAYOUT_VERSION)
continue;
if(sig.getVersion() > versionFrom)
break;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Upgradeable.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Upgradeable.java
index 6081c4cfc6e..016fd948e84 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Upgradeable.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Upgradeable.java
@@ -42,7 +42,7 @@ public interface Upgradeable extends Comparable {
* Get the type of the software component, which this object is upgrading.
* @return type
*/
- HdfsConstants.NodeType getType();
+ HdfsServerConstants.NodeType getType();
/**
* Description of the upgrade object for displaying.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
index b547701b857..668b45bff87 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
@@ -30,14 +30,14 @@ import java.util.regex.Pattern;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.HardLink;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.util.Daemon;
@@ -89,7 +89,7 @@ public class BlockPoolSliceStorage extends Storage {
*/
void recoverTransitionRead(DataNode datanode, NamespaceInfo nsInfo,
Collection dataDirs, StartupOption startOpt) throws IOException {
- assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion()
+ assert HdfsConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion()
: "Block-pool and name-node layout versions must be the same.";
// 1. For each BP data directory analyze the state and
@@ -171,7 +171,7 @@ public class BlockPoolSliceStorage extends Storage {
LOG.info("Formatting block pool " + blockpoolID + " directory "
+ bpSdir.getCurrentDir());
bpSdir.clearDirectory(); // create directory
- this.layoutVersion = FSConstants.LAYOUT_VERSION;
+ this.layoutVersion = HdfsConstants.LAYOUT_VERSION;
this.cTime = nsInfo.getCTime();
this.namespaceID = nsInfo.getNamespaceID();
this.blockpoolID = nsInfo.getBlockPoolID();
@@ -239,7 +239,7 @@ public class BlockPoolSliceStorage extends Storage {
readProperties(sd);
checkVersionUpgradable(this.layoutVersion);
- assert this.layoutVersion >= FSConstants.LAYOUT_VERSION
+ assert this.layoutVersion >= HdfsConstants.LAYOUT_VERSION
: "Future version is not allowed";
if (getNamespaceID() != nsInfo.getNamespaceID()) {
throw new IOException("Incompatible namespaceIDs in "
@@ -253,7 +253,7 @@ public class BlockPoolSliceStorage extends Storage {
+ nsInfo.getBlockPoolID() + "; datanode blockpoolID = "
+ blockpoolID);
}
- if (this.layoutVersion == FSConstants.LAYOUT_VERSION
+ if (this.layoutVersion == HdfsConstants.LAYOUT_VERSION
&& this.cTime == nsInfo.getCTime())
return; // regular startup
@@ -261,7 +261,7 @@ public class BlockPoolSliceStorage extends Storage {
UpgradeManagerDatanode um =
datanode.getUpgradeManagerDatanode(nsInfo.getBlockPoolID());
verifyDistributedUpgradeProgress(um, nsInfo);
- if (this.layoutVersion > FSConstants.LAYOUT_VERSION
+ if (this.layoutVersion > HdfsConstants.LAYOUT_VERSION
|| this.cTime < nsInfo.getCTime()) {
doUpgrade(sd, nsInfo); // upgrade
return;
@@ -327,7 +327,7 @@ public class BlockPoolSliceStorage extends Storage {
// 3. Create new /current with block files hardlinks and VERSION
linkAllBlocks(bpTmpDir, bpCurDir);
- this.layoutVersion = FSConstants.LAYOUT_VERSION;
+ this.layoutVersion = HdfsConstants.LAYOUT_VERSION;
assert this.namespaceID == nsInfo.getNamespaceID()
: "Data-node and name-node layout versions must be the same.";
this.cTime = nsInfo.getCTime();
@@ -389,7 +389,7 @@ public class BlockPoolSliceStorage extends Storage {
// the namespace state or can be further upgraded to it.
// In another word, we can only roll back when ( storedLV >= software LV)
// && ( DN.previousCTime <= NN.ctime)
- if (!(prevInfo.getLayoutVersion() >= FSConstants.LAYOUT_VERSION &&
+ if (!(prevInfo.getLayoutVersion() >= HdfsConstants.LAYOUT_VERSION &&
prevInfo.getCTime() <= nsInfo.getCTime())) { // cannot rollback
throw new InconsistentFSStateException(bpSd.getRoot(),
"Cannot rollback to a newer state.\nDatanode previous state: LV = "
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index b51241ed3fe..50e118aaa00 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -36,7 +36,7 @@ import org.apache.commons.logging.Log;
import org.apache.hadoop.fs.FSOutputSummer;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
@@ -179,7 +179,7 @@ class BlockReceiver implements Closeable {
this.out = streams.dataOut;
this.cout = streams.checksumOut;
this.checksumOut = new DataOutputStream(new BufferedOutputStream(
- streams.checksumOut, FSConstants.SMALL_BUFFER_SIZE));
+ streams.checksumOut, HdfsConstants.SMALL_BUFFER_SIZE));
// write data chunk header if creating a new replica
if (isCreate) {
BlockMetadataHeader.writeHeader(checksumOut, checksum);
@@ -398,7 +398,7 @@ class BlockReceiver implements Closeable {
buf.limit(bufRead);
}
- while (buf.remaining() < FSConstants.BYTES_IN_INTEGER) {
+ while (buf.remaining() < HdfsConstants.BYTES_IN_INTEGER) {
if (buf.position() > 0) {
shiftBufData();
}
@@ -420,7 +420,7 @@ class BlockReceiver implements Closeable {
// Subtract BYTES_IN_INTEGER since that accounts for the payloadLen that
// we read above.
int pktSize = payloadLen + PacketHeader.PKT_HEADER_LEN
- - FSConstants.BYTES_IN_INTEGER;
+ - HdfsConstants.BYTES_IN_INTEGER;
if (buf.remaining() < pktSize) {
//we need to read more data
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
index ac194622a32..b9e3858f3e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
@@ -32,7 +32,7 @@ import java.util.Arrays;
import org.apache.commons.logging.Log;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.IOUtils;
@@ -155,7 +155,7 @@ class BlockSender implements java.io.Closeable {
if ( !corruptChecksumOk || datanode.data.metaFileExists(block) ) {
checksumIn = new DataInputStream(new BufferedInputStream(datanode.data
- .getMetaDataInputStream(block), FSConstants.IO_FILE_BUFFER_SIZE));
+ .getMetaDataInputStream(block), HdfsConstants.IO_FILE_BUFFER_SIZE));
// read and handle the common header here. For now just a version
BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
@@ -472,14 +472,14 @@ class BlockSender implements java.io.Closeable {
streamForSendChunks = baseStream;
// assure a mininum buffer size.
- maxChunksPerPacket = (Math.max(FSConstants.IO_FILE_BUFFER_SIZE,
+ maxChunksPerPacket = (Math.max(HdfsConstants.IO_FILE_BUFFER_SIZE,
MIN_BUFFER_WITH_TRANSFERTO)
+ bytesPerChecksum - 1)/bytesPerChecksum;
// allocate smaller buffer while using transferTo().
pktSize += checksumSize * maxChunksPerPacket;
} else {
- maxChunksPerPacket = Math.max(1, (FSConstants.IO_FILE_BUFFER_SIZE
+ maxChunksPerPacket = Math.max(1, (HdfsConstants.IO_FILE_BUFFER_SIZE
+ bytesPerChecksum - 1) / bytesPerChecksum);
pktSize += (bytesPerChecksum + checksumSize) * maxChunksPerPacket;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index a9c29cc821a..edc57fd797b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -68,7 +68,7 @@ import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
@@ -83,9 +83,9 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.common.Storage;
@@ -438,9 +438,9 @@ public class DataNode extends Configured
private void initConfig(Configuration conf) {
this.socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
- HdfsConstants.READ_TIMEOUT);
+ HdfsServerConstants.READ_TIMEOUT);
this.socketWriteTimeout = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
- HdfsConstants.WRITE_TIMEOUT);
+ HdfsServerConstants.WRITE_TIMEOUT);
/* Based on results on different platforms, we might need set the default
* to false on some of them. */
this.transferToAllowed = conf.getBoolean(
@@ -623,7 +623,7 @@ public class DataNode extends Configured
} else {
ss = secureResources.getStreamingSocket();
}
- ss.setReceiveBufferSize(FSConstants.DEFAULT_DATA_SOCKET_SIZE);
+ ss.setReceiveBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
// adjust machine name with the actual port
int tmpPort = ss.getLocalPort();
selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(),
@@ -768,9 +768,9 @@ public class DataNode extends Configured
} catch (InterruptedException ie) {}
}
- assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() :
+ assert HdfsConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() :
"Data-node and name-node layout versions must be the same."
- + "Expected: "+ FSConstants.LAYOUT_VERSION
+ + "Expected: "+ HdfsConstants.LAYOUT_VERSION
+ " actual "+ nsInfo.getLayoutVersion();
return nsInfo;
}
@@ -814,7 +814,7 @@ public class DataNode extends Configured
if (simulatedFSDataset) {
initFsDataSet(conf, dataDirs);
bpRegistration.setStorageID(getStorageId()); //same as DN
- bpRegistration.storageInfo.layoutVersion = FSConstants.LAYOUT_VERSION;
+ bpRegistration.storageInfo.layoutVersion = HdfsConstants.LAYOUT_VERSION;
bpRegistration.storageInfo.namespaceID = bpNSInfo.namespaceID;
bpRegistration.storageInfo.clusterID = bpNSInfo.clusterID;
} else {
@@ -1162,9 +1162,9 @@ public class DataNode extends Configured
throw new IncorrectVersionException(nsBuildVer, "namenode", stBuildVer);
}
- if (FSConstants.LAYOUT_VERSION != bpNSInfo.getLayoutVersion()) {
+ if (HdfsConstants.LAYOUT_VERSION != bpNSInfo.getLayoutVersion()) {
LOG.warn("Data-node and name-node layout versions must be " +
- "the same. Expected: "+ FSConstants.LAYOUT_VERSION +
+ "the same. Expected: "+ HdfsConstants.LAYOUT_VERSION +
" actual "+ bpNSInfo.getLayoutVersion());
throw new IncorrectVersionException
(bpNSInfo.getLayoutVersion(), "namenode");
@@ -1995,10 +1995,10 @@ public class DataNode extends Configured
sock.setSoTimeout(targets.length * socketTimeout);
long writeTimeout = socketWriteTimeout +
- HdfsConstants.WRITE_TIMEOUT_EXTENSION * (targets.length-1);
+ HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * (targets.length-1);
OutputStream baseStream = NetUtils.getOutputStream(sock, writeTimeout);
out = new DataOutputStream(new BufferedOutputStream(baseStream,
- FSConstants.SMALL_BUFFER_SIZE));
+ HdfsConstants.SMALL_BUFFER_SIZE));
blockSender = new BlockSender(b, 0, b.getNumBytes(),
false, false, false, DataNode.this);
DatanodeInfo srcNode = new DatanodeInfo(bpReg);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index 784ab949ec5..488c0188c3b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -43,15 +43,15 @@ import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Daemon;
@@ -137,8 +137,8 @@ public class DataStorage extends Storage {
// DN storage has been initialized, no need to do anything
return;
}
- assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() :
- "Data-node version " + FSConstants.LAYOUT_VERSION +
+ assert HdfsConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() :
+ "Data-node version " + HdfsConstants.LAYOUT_VERSION +
" and name-node layout version " + nsInfo.getLayoutVersion() +
" must be the same.";
@@ -268,7 +268,7 @@ public class DataStorage extends Storage {
void format(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException {
sd.clearDirectory(); // create directory
- this.layoutVersion = FSConstants.LAYOUT_VERSION;
+ this.layoutVersion = HdfsConstants.LAYOUT_VERSION;
this.clusterID = nsInfo.getClusterID();
this.namespaceID = nsInfo.getNamespaceID();
this.cTime = 0;
@@ -374,7 +374,7 @@ public class DataStorage extends Storage {
}
readProperties(sd);
checkVersionUpgradable(this.layoutVersion);
- assert this.layoutVersion >= FSConstants.LAYOUT_VERSION :
+ assert this.layoutVersion >= HdfsConstants.LAYOUT_VERSION :
"Future version is not allowed";
boolean federationSupported =
@@ -397,7 +397,7 @@ public class DataStorage extends Storage {
}
// regular start up
- if (this.layoutVersion == FSConstants.LAYOUT_VERSION
+ if (this.layoutVersion == HdfsConstants.LAYOUT_VERSION
&& this.cTime == nsInfo.getCTime())
return; // regular startup
// verify necessity of a distributed upgrade
@@ -406,7 +406,7 @@ public class DataStorage extends Storage {
verifyDistributedUpgradeProgress(um, nsInfo);
// do upgrade
- if (this.layoutVersion > FSConstants.LAYOUT_VERSION
+ if (this.layoutVersion > HdfsConstants.LAYOUT_VERSION
|| this.cTime < nsInfo.getCTime()) {
doUpgrade(sd, nsInfo); // upgrade
return;
@@ -482,7 +482,7 @@ public class DataStorage extends Storage {
linkAllBlocks(tmpDir, new File(curBpDir, STORAGE_DIR_CURRENT));
// 4. Write version file under /current
- layoutVersion = FSConstants.LAYOUT_VERSION;
+ layoutVersion = HdfsConstants.LAYOUT_VERSION;
clusterID = nsInfo.getClusterID();
writeProperties(sd);
@@ -542,7 +542,7 @@ public class DataStorage extends Storage {
// We allow rollback to a state, which is either consistent with
// the namespace state or can be further upgraded to it.
- if (!(prevInfo.getLayoutVersion() >= FSConstants.LAYOUT_VERSION
+ if (!(prevInfo.getLayoutVersion() >= HdfsConstants.LAYOUT_VERSION
&& prevInfo.getCTime() <= nsInfo.getCTime())) // cannot rollback
throw new InconsistentFSStateException(sd.getRoot(),
"Cannot rollback to a newer state.\nDatanode previous state: LV = "
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
index 374d3095044..8d7d95f8aa3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
@@ -41,7 +41,7 @@ import org.apache.commons.logging.Log;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
@@ -53,7 +53,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumR
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.io.IOUtils;
@@ -86,7 +86,7 @@ class DataXceiver extends Receiver implements Runnable {
public DataXceiver(Socket s, DataNode datanode,
DataXceiverServer dataXceiverServer) throws IOException {
super(new DataInputStream(new BufferedInputStream(
- NetUtils.getInputStream(s), FSConstants.SMALL_BUFFER_SIZE)));
+ NetUtils.getInputStream(s), HdfsConstants.SMALL_BUFFER_SIZE)));
this.s = s;
this.isLocal = s.getInetAddress().equals(s.getLocalAddress());
@@ -203,7 +203,7 @@ class DataXceiver extends Receiver implements Runnable {
OutputStream baseStream = NetUtils.getOutputStream(s,
datanode.socketWriteTimeout);
DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
- baseStream, FSConstants.SMALL_BUFFER_SIZE));
+ baseStream, HdfsConstants.SMALL_BUFFER_SIZE));
checkAccess(out, true, block, blockToken,
Op.READ_BLOCK, BlockTokenSecretManager.AccessMode.READ);
@@ -329,7 +329,7 @@ class DataXceiver extends Receiver implements Runnable {
final DataOutputStream replyOut = new DataOutputStream(
new BufferedOutputStream(
NetUtils.getOutputStream(s, datanode.socketWriteTimeout),
- FSConstants.SMALL_BUFFER_SIZE));
+ HdfsConstants.SMALL_BUFFER_SIZE));
checkAccess(replyOut, isClient, block, blockToken,
Op.WRITE_BLOCK, BlockTokenSecretManager.AccessMode.WRITE);
@@ -364,16 +364,16 @@ class DataXceiver extends Receiver implements Runnable {
mirrorSock = datanode.newSocket();
try {
int timeoutValue = datanode.socketTimeout
- + (HdfsConstants.READ_TIMEOUT_EXTENSION * targets.length);
+ + (HdfsServerConstants.READ_TIMEOUT_EXTENSION * targets.length);
int writeTimeout = datanode.socketWriteTimeout +
- (HdfsConstants.WRITE_TIMEOUT_EXTENSION * targets.length);
+ (HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * targets.length);
NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
mirrorSock.setSoTimeout(timeoutValue);
- mirrorSock.setSendBufferSize(FSConstants.DEFAULT_DATA_SOCKET_SIZE);
+ mirrorSock.setSendBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
mirrorOut = new DataOutputStream(
new BufferedOutputStream(
NetUtils.getOutputStream(mirrorSock, writeTimeout),
- FSConstants.SMALL_BUFFER_SIZE));
+ HdfsConstants.SMALL_BUFFER_SIZE));
mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock));
new Sender(mirrorOut).writeBlock(originalBlock, blockToken,
@@ -524,7 +524,7 @@ class DataXceiver extends Receiver implements Runnable {
final MetaDataInputStream metadataIn =
datanode.data.getMetaDataInputStream(block);
final DataInputStream checksumIn = new DataInputStream(new BufferedInputStream(
- metadataIn, FSConstants.IO_FILE_BUFFER_SIZE));
+ metadataIn, HdfsConstants.IO_FILE_BUFFER_SIZE));
updateCurrentThreadName("Getting checksum for block " + block);
try {
@@ -603,7 +603,7 @@ class DataXceiver extends Receiver implements Runnable {
OutputStream baseStream = NetUtils.getOutputStream(
s, datanode.socketWriteTimeout);
reply = new DataOutputStream(new BufferedOutputStream(
- baseStream, FSConstants.SMALL_BUFFER_SIZE));
+ baseStream, HdfsConstants.SMALL_BUFFER_SIZE));
// send status first
writeResponse(SUCCESS, reply);
@@ -682,14 +682,14 @@ class DataXceiver extends Receiver implements Runnable {
OutputStream baseStream = NetUtils.getOutputStream(proxySock,
datanode.socketWriteTimeout);
proxyOut = new DataOutputStream(new BufferedOutputStream(baseStream,
- FSConstants.SMALL_BUFFER_SIZE));
+ HdfsConstants.SMALL_BUFFER_SIZE));
/* send request to the proxy */
new Sender(proxyOut).copyBlock(block, blockToken);
// receive the response from the proxy
proxyReply = new DataInputStream(new BufferedInputStream(
- NetUtils.getInputStream(proxySock), FSConstants.IO_FILE_BUFFER_SIZE));
+ NetUtils.getInputStream(proxySock), HdfsConstants.IO_FILE_BUFFER_SIZE));
BlockOpResponseProto copyResponse = BlockOpResponseProto.parseFrom(
HdfsProtoUtil.vintPrefixed(proxyReply));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
index 89928a29719..f192747db59 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
@@ -30,7 +30,7 @@ import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.balancer.Balancer;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.IOUtils;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
index 8348c8f9c92..5ecdca7b793 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
@@ -53,10 +53,10 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
@@ -465,7 +465,7 @@ public class FSDataset implements FSDatasetInterface {
}
checksumIn = new DataInputStream(
new BufferedInputStream(new FileInputStream(metaFile),
- FSConstants.IO_FILE_BUFFER_SIZE));
+ HdfsConstants.IO_FILE_BUFFER_SIZE));
// read and handle the common header here. For now just a version
BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java
index 76b0bba2090..d0fc32c7693 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode;
import java.io.File;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/Replica.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/Replica.java
index d2ab20e914f..bd0485394a3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/Replica.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/Replica.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.hdfs.server.datanode;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
/**
* This represents block replicas which are stored in DataNode.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
index 921437df20c..d2a6f46c2ed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode;
import java.io.File;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
/** This class represents replicas being written.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
index d246f6f8dcd..447b9337ce1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
@@ -23,7 +23,7 @@ import java.io.IOException;
import java.io.RandomAccessFile;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams;
import org.apache.hadoop.io.IOUtils;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java
index c2cb5cfc403..972353962c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.datanode;
import java.io.File;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java
index 86bef1ea38a..91045b7ea55 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode;
import java.io.File;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
index d92b5913da5..c6744f9317c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
@@ -27,7 +27,7 @@ import org.apache.commons.daemon.DaemonContext;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.http.HttpServer;
import org.mortbay.jetty.nio.SelectChannelConnector;
@@ -71,7 +71,7 @@ public class SecureDataNodeStarter implements Daemon {
// Obtain secure port for data streaming to datanode
InetSocketAddress socAddr = DataNode.getStreamingAddr(conf);
int socketWriteTimeout = conf.getInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
- HdfsConstants.WRITE_TIMEOUT);
+ HdfsServerConstants.WRITE_TIMEOUT);
ServerSocket ss = (socketWriteTimeout > 0) ?
ServerSocketChannel.open().socket() : new ServerSocket();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java
index 5fc2f2b5d64..478fb5660d0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java
@@ -19,8 +19,8 @@ package org.apache.hadoop.hdfs.server.datanode;
import java.io.IOException;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.UpgradeManager;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
@@ -47,8 +47,8 @@ class UpgradeManagerDatanode extends UpgradeManager {
this.bpid = bpid;
}
- public HdfsConstants.NodeType getType() {
- return HdfsConstants.NodeType.DATA_NODE;
+ public HdfsServerConstants.NodeType getType() {
+ return HdfsServerConstants.NodeType.DATA_NODE;
}
synchronized void initializeUpgrade(NamespaceInfo nsInfo) throws IOException {
@@ -57,7 +57,7 @@ class UpgradeManagerDatanode extends UpgradeManager {
DataNode.LOG.info("\n Distributed upgrade for DataNode "
+ dataNode.getMachineName()
+ " version " + getUpgradeVersion() + " to current LV "
- + FSConstants.LAYOUT_VERSION + " is initialized.");
+ + HdfsConstants.LAYOUT_VERSION + " is initialized.");
UpgradeObjectDatanode curUO = (UpgradeObjectDatanode)currentUpgrades.first();
curUO.setDatanode(dataNode, this.bpid);
upgradeState = curUO.preUpgradeAction(nsInfo);
@@ -102,7 +102,7 @@ class UpgradeManagerDatanode extends UpgradeManager {
if(currentUpgrades == null) {
DataNode.LOG.info("\n Distributed upgrade for DataNode version "
+ getUpgradeVersion() + " to current LV "
- + FSConstants.LAYOUT_VERSION + " cannot be started. "
+ + HdfsConstants.LAYOUT_VERSION + " cannot be started. "
+ "The upgrade object is not defined.");
return false;
}
@@ -115,7 +115,7 @@ class UpgradeManagerDatanode extends UpgradeManager {
DataNode.LOG.info("\n Distributed upgrade for DataNode "
+ dataNode.getMachineName()
+ " version " + getUpgradeVersion() + " to current LV "
- + FSConstants.LAYOUT_VERSION + " is started.");
+ + HdfsConstants.LAYOUT_VERSION + " is started.");
return true;
}
@@ -130,7 +130,7 @@ class UpgradeManagerDatanode extends UpgradeManager {
throw new IOException(
"Distributed upgrade for DataNode " + dataNode.getMachineName()
+ " version " + getUpgradeVersion() + " to current LV "
- + FSConstants.LAYOUT_VERSION + " cannot be started. "
+ + HdfsConstants.LAYOUT_VERSION + " cannot be started. "
+ "The upgrade object is not defined.");
}
@@ -145,7 +145,7 @@ class UpgradeManagerDatanode extends UpgradeManager {
DataNode.LOG.info("\n Distributed upgrade for DataNode "
+ dataNode.getMachineName()
+ " version " + getUpgradeVersion() + " to current LV "
- + FSConstants.LAYOUT_VERSION + " is complete.");
+ + HdfsConstants.LAYOUT_VERSION + " is complete.");
}
synchronized void shutdownUpgrade() {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java
index 9e51f230f9b..ddb1d6029f8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java
@@ -18,8 +18,8 @@
package org.apache.hadoop.hdfs.server.datanode;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.UpgradeObject;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
@@ -36,8 +36,8 @@ public abstract class UpgradeObjectDatanode extends UpgradeObject implements Run
private DataNode dataNode = null;
private String bpid = null;
- public HdfsConstants.NodeType getType() {
- return HdfsConstants.NodeType.DATA_NODE;
+ public HdfsServerConstants.NodeType getType() {
+ return HdfsServerConstants.NodeType.DATA_NODE;
}
protected DataNode getDatanode() {
@@ -118,7 +118,7 @@ public abstract class UpgradeObjectDatanode extends UpgradeObject implements Run
if(getUpgradeStatus() < 100) {
DataNode.LOG.info("\n Distributed upgrade for DataNode version "
+ getVersion() + " to current LV "
- + FSConstants.LAYOUT_VERSION + " cannot be completed.");
+ + HdfsConstants.LAYOUT_VERSION + " cannot be completed.");
}
// Complete the upgrade by calling the manager method
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
index d72509cee2e..ce2a79b488d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
@@ -25,7 +25,7 @@ import java.util.zip.Checksum;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.Storage.StorageState;
@@ -106,7 +106,7 @@ public class BackupImage extends FSImage {
StorageDirectory sd = it.next();
StorageState curState;
try {
- curState = sd.analyzeStorage(HdfsConstants.StartupOption.REGULAR, storage);
+ curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage);
// sd is locked but not opened
switch(curState) {
case NON_EXISTENT:
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
index ad575b9e27a..1efd4fae3f4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
@@ -26,8 +26,8 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
@@ -134,7 +134,7 @@ public class BackupNode extends NameNode implements JournalProtocol {
// Backup node should never do lease recovery,
// therefore lease hard limit should never expire.
namesystem.leaseManager.setLeasePeriod(
- FSConstants.LEASE_SOFTLIMIT_PERIOD, Long.MAX_VALUE);
+ HdfsConstants.LEASE_SOFTLIMIT_PERIOD, Long.MAX_VALUE);
clusterId = nsInfo.getClusterID();
blockPoolId = nsInfo.getBlockPoolID();
@@ -356,9 +356,9 @@ public class BackupNode extends NameNode implements JournalProtocol {
LOG.fatal(errorMsg);
throw new IOException(errorMsg);
}
- assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() :
+ assert HdfsConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() :
"Active and backup node layout versions must be the same. Expected: "
- + FSConstants.LAYOUT_VERSION + " actual "+ nsInfo.getLayoutVersion();
+ + HdfsConstants.LAYOUT_VERSION + " actual "+ nsInfo.getLayoutVersion();
return nsInfo;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
index f75410031d3..5e73d93ab3c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
@@ -30,7 +30,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
index 532b2f2dcf4..4d86fd4b306 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
@@ -24,7 +24,7 @@ import java.io.IOException;
import java.io.BufferedInputStream;
import java.io.EOFException;
import java.io.DataInputStream;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation;
import org.apache.hadoop.io.IOUtils;
@@ -143,11 +143,11 @@ class EditLogFileInputStream extends EditLogInputStream {
throw new LogHeaderCorruptException(
"Reached EOF when reading log header");
}
- if (logVersion < FSConstants.LAYOUT_VERSION) { // future version
+ if (logVersion < HdfsConstants.LAYOUT_VERSION) { // future version
throw new LogHeaderCorruptException(
"Unexpected version of the file system log file: "
+ logVersion + ". Current version = "
- + FSConstants.LAYOUT_VERSION + ".");
+ + HdfsConstants.LAYOUT_VERSION + ".");
}
assert logVersion <= Storage.LAST_UPGRADABLE_LAYOUT_VERSION :
"Unsupported version " + logVersion;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
index f79f44266e0..be75f637a9b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
@@ -27,7 +27,7 @@ import java.nio.channels.FileChannel;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.io.IOUtils;
import com.google.common.annotations.VisibleForTesting;
@@ -109,7 +109,7 @@ class EditLogFileOutputStream extends EditLogOutputStream {
void create() throws IOException {
fc.truncate(0);
fc.position(0);
- doubleBuf.getCurrentBuf().writeInt(FSConstants.LAYOUT_VERSION);
+ doubleBuf.getCurrentBuf().writeInt(HdfsConstants.LAYOUT_VERSION);
setReadyToFlush();
flush();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java
index 0dd90588f44..5312b145aea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.io.OutputStream;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.Writer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.IOUtils;
@@ -129,7 +129,7 @@ class EditsDoubleBuffer {
}
public void writeOp(FSEditLogOp op) throws IOException {
- if (firstTxId == FSConstants.INVALID_TXID) {
+ if (firstTxId == HdfsConstants.INVALID_TXID) {
firstTxId = op.txid;
} else {
assert op.txid > firstTxId;
@@ -141,7 +141,7 @@ class EditsDoubleBuffer {
@Override
public DataOutputBuffer reset() {
super.reset();
- firstTxId = FSConstants.INVALID_TXID;
+ firstTxId = HdfsConstants.INVALID_TXID;
numTxns = 0;
return this;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 4ad7c7e451b..9cd1799ab33 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -43,7 +43,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.FSLimitException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
@@ -55,8 +55,8 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.util.ByteArray;
/*************************************************
@@ -1876,10 +1876,10 @@ public class FSDirectory implements Closeable {
UnresolvedLinkException {
assert hasWriteLock();
// sanity check
- if ((nsQuota < 0 && nsQuota != FSConstants.QUOTA_DONT_SET &&
- nsQuota < FSConstants.QUOTA_RESET) ||
- (dsQuota < 0 && dsQuota != FSConstants.QUOTA_DONT_SET &&
- dsQuota < FSConstants.QUOTA_RESET)) {
+ if ((nsQuota < 0 && nsQuota != HdfsConstants.QUOTA_DONT_SET &&
+ nsQuota < HdfsConstants.QUOTA_RESET) ||
+ (dsQuota < 0 && dsQuota != HdfsConstants.QUOTA_DONT_SET &&
+ dsQuota < HdfsConstants.QUOTA_RESET)) {
throw new IllegalArgumentException("Illegal value for nsQuota or " +
"dsQuota : " + nsQuota + " and " +
dsQuota);
@@ -1893,16 +1893,16 @@ public class FSDirectory implements Closeable {
throw new FileNotFoundException("Directory does not exist: " + srcs);
} else if (!targetNode.isDirectory()) {
throw new FileNotFoundException("Cannot set quota on a file: " + srcs);
- } else if (targetNode.isRoot() && nsQuota == FSConstants.QUOTA_RESET) {
+ } else if (targetNode.isRoot() && nsQuota == HdfsConstants.QUOTA_RESET) {
throw new IllegalArgumentException("Cannot clear namespace quota on root.");
} else { // a directory inode
INodeDirectory dirNode = (INodeDirectory)targetNode;
long oldNsQuota = dirNode.getNsQuota();
long oldDsQuota = dirNode.getDsQuota();
- if (nsQuota == FSConstants.QUOTA_DONT_SET) {
+ if (nsQuota == HdfsConstants.QUOTA_DONT_SET) {
nsQuota = oldNsQuota;
}
- if (dsQuota == FSConstants.QUOTA_DONT_SET) {
+ if (dsQuota == HdfsConstants.QUOTA_DONT_SET) {
dsQuota = oldDsQuota;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 495c42e45a8..82ad9d7ec73 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -29,9 +29,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import static org.apache.hadoop.hdfs.server.common.Util.now;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
@@ -91,7 +91,7 @@ public class FSEditLog {
// the first txid of the log that's currently open for writing.
// If this value is N, we are currently writing to edits_inprogress_N
- private long curSegmentTxId = FSConstants.INVALID_TXID;
+ private long curSegmentTxId = HdfsConstants.INVALID_TXID;
// the time of printing the statistics to the log file.
private long lastPrintTime;
@@ -904,7 +904,7 @@ public class FSEditLog {
// synchronized to prevent findbugs warning about inconsistent
// synchronization. This will be JIT-ed out if asserts are
// off.
- assert curSegmentTxId == FSConstants.INVALID_TXID || // on format this is no-op
+ assert curSegmentTxId == HdfsConstants.INVALID_TXID || // on format this is no-op
minTxIdToKeep <= curSegmentTxId :
"cannot purge logs older than txid " + minTxIdToKeep +
" when current segment starts at " + curSegmentTxId;
@@ -1078,7 +1078,7 @@ public class FSEditLog {
static class JournalAndStream {
private final JournalManager manager;
private EditLogOutputStream stream;
- private long segmentStartsAtTxId = FSConstants.INVALID_TXID;
+ private long segmentStartsAtTxId = HdfsConstants.INVALID_TXID;
private JournalAndStream(JournalManager manager) {
this.manager = manager;
@@ -1110,7 +1110,7 @@ public class FSEditLog {
LOG.error("Unable to abort stream " + stream, ioe);
}
stream = null;
- segmentStartsAtTxId = FSConstants.INVALID_TXID;
+ segmentStartsAtTxId = HdfsConstants.INVALID_TXID;
}
private boolean isActive() {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index db985691f63..51865c82de1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -27,7 +27,7 @@ import java.util.Arrays;
import java.util.EnumMap;
import org.apache.hadoop.fs.permission.PermissionStatus;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
@@ -274,14 +274,14 @@ public class FSEditLogLoader {
SetNSQuotaOp setNSQuotaOp = (SetNSQuotaOp)op;
fsDir.unprotectedSetQuota(setNSQuotaOp.src,
setNSQuotaOp.nsQuota,
- FSConstants.QUOTA_DONT_SET);
+ HdfsConstants.QUOTA_DONT_SET);
break;
}
case OP_CLEAR_NS_QUOTA: {
ClearNSQuotaOp clearNSQuotaOp = (ClearNSQuotaOp)op;
fsDir.unprotectedSetQuota(clearNSQuotaOp.src,
- FSConstants.QUOTA_RESET,
- FSConstants.QUOTA_DONT_SET);
+ HdfsConstants.QUOTA_RESET,
+ HdfsConstants.QUOTA_DONT_SET);
break;
}
@@ -435,7 +435,7 @@ public class FSEditLogLoader {
// The editlog must be emptied by restarting the namenode, before proceeding
// with the upgrade.
if (Storage.is203LayoutVersion(logVersion)
- && logVersion != FSConstants.LAYOUT_VERSION) {
+ && logVersion != HdfsConstants.LAYOUT_VERSION) {
String msg = "During upgrade failed to load the editlog version "
+ logVersion + " from release 0.20.203. Please go back to the old "
+ " release and restart the namenode. This empties the editlog "
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index 6529c876c0b..25f99b4081c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index 8b259018f19..0ad96e0868a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -35,7 +35,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
@@ -44,8 +44,8 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.Storage.StorageState;
import org.apache.hadoop.hdfs.server.common.Util;
import static org.apache.hadoop.hdfs.server.common.Util.now;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.LoadPlan;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
@@ -227,11 +227,11 @@ public class FSImage implements Closeable {
}
if (startOpt != StartupOption.UPGRADE
&& layoutVersion < Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION
- && layoutVersion != FSConstants.LAYOUT_VERSION) {
+ && layoutVersion != HdfsConstants.LAYOUT_VERSION) {
throw new IOException(
"\nFile system image contains an old layout version "
+ storage.getLayoutVersion() + ".\nAn upgrade to version "
- + FSConstants.LAYOUT_VERSION + " is required.\n"
+ + HdfsConstants.LAYOUT_VERSION + " is required.\n"
+ "Please restart NameNode with -upgrade option.");
}
@@ -349,7 +349,7 @@ public class FSImage implements Closeable {
long oldCTime = storage.getCTime();
storage.cTime = now(); // generate new cTime for the state
int oldLV = storage.getLayoutVersion();
- storage.layoutVersion = FSConstants.LAYOUT_VERSION;
+ storage.layoutVersion = HdfsConstants.LAYOUT_VERSION;
List errorSDs =
Collections.synchronizedList(new ArrayList());
@@ -423,7 +423,7 @@ public class FSImage implements Closeable {
// Directories that don't have previous state do not rollback
boolean canRollback = false;
FSImage prevState = new FSImage(conf, getFSNamesystem());
- prevState.getStorage().layoutVersion = FSConstants.LAYOUT_VERSION;
+ prevState.getStorage().layoutVersion = HdfsConstants.LAYOUT_VERSION;
for (Iterator it = storage.dirIterator(); it.hasNext();) {
StorageDirectory sd = it.next();
File prevDir = sd.getPreviousDir();
@@ -438,12 +438,12 @@ public class FSImage implements Closeable {
// read and verify consistency of the prev dir
prevState.getStorage().readPreviousVersionProperties(sd);
- if (prevState.getLayoutVersion() != FSConstants.LAYOUT_VERSION) {
+ if (prevState.getLayoutVersion() != HdfsConstants.LAYOUT_VERSION) {
throw new IOException(
"Cannot rollback to storage version " +
prevState.getLayoutVersion() +
" using this version of the NameNode, which uses storage version " +
- FSConstants.LAYOUT_VERSION + ". " +
+ HdfsConstants.LAYOUT_VERSION + ". " +
"Please use the previous version of HDFS to perform the rollback.");
}
canRollback = true;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
index 453985d917a..85797644197 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
@@ -39,7 +39,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
@@ -555,7 +555,7 @@ class FSImageFormat {
DigestOutputStream fos = new DigestOutputStream(fout, digester);
DataOutputStream out = new DataOutputStream(fos);
try {
- out.writeInt(FSConstants.LAYOUT_VERSION);
+ out.writeInt(HdfsConstants.LAYOUT_VERSION);
out.writeInt(sourceNamesystem.getFSImage()
.getStorage().getNamespaceID()); // TODO bad dependency
out.writeLong(fsDir.rootDir.numItemsInTree());
@@ -568,7 +568,7 @@ class FSImageFormat {
" using " + compression);
- byte[] byteStore = new byte[4*FSConstants.MAX_PATH_LENGTH];
+ byte[] byteStore = new byte[4*HdfsConstants.MAX_PATH_LENGTH];
ByteBuffer strbuf = ByteBuffer.wrap(byteStore);
// save the root
FSImageSerialization.saveINode2Image(fsDir.rootDir, out);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
index 277fac0eb9c..3ed85136369 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java
index 0814a140b5e..c3a76947893 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java
@@ -35,7 +35,7 @@ import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
@@ -224,7 +224,7 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector {
logGroup.planRecovery();
- if (expectedTxId != FSConstants.INVALID_TXID && logStartTxId != expectedTxId) {
+ if (expectedTxId != HdfsConstants.INVALID_TXID && logStartTxId != expectedTxId) {
throw new IOException("Expected next log group would start at txid " +
expectedTxId + " but starts at txid " + logStartTxId);
}
@@ -239,7 +239,7 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector {
} else {
// the log group was in-progress so we don't know what ID
// the next group should start from.
- expectedTxId = FSConstants.INVALID_TXID;
+ expectedTxId = HdfsConstants.INVALID_TXID;
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index ceb557b4e6e..0f67d7cb999 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -78,10 +78,10 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
-import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -99,9 +99,9 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.common.Util;
@@ -2756,7 +2756,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* not tracked because the name node is not intended to leave safe mode
* automatically in the case.
*
- * @see ClientProtocol#setSafeMode(FSConstants.SafeModeAction)
+ * @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction)
* @see SafeModeMonitor
*/
class SafeModeInfo {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
index b7587c0dd1c..d8bd5025970 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DatanodeJspHelper;
@@ -120,7 +120,7 @@ public class FileChecksumServlets {
new HdfsConfiguration(datanode.getConf());
final int socketTimeout = conf.getInt(
DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,
- HdfsConstants.READ_TIMEOUT);
+ HdfsServerConstants.READ_TIMEOUT);
final SocketFactory socketFactory = NetUtils.getSocketFactory(conf,
ClientProtocol.class);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java
index 3831b4580f6..8476e27cdcd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java
@@ -29,7 +29,7 @@ import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.security.UserGroupInformation;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
index 7663ecff762..2440c4dd122 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
/**
* I-node for file being written.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
index 257d37e0cb9..44857739b30 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
@@ -32,8 +32,8 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import static org.apache.hadoop.hdfs.server.common.Util.now;
@@ -65,8 +65,8 @@ public class LeaseManager {
private final FSNamesystem fsnamesystem;
- private long softLimit = FSConstants.LEASE_SOFTLIMIT_PERIOD;
- private long hardLimit = FSConstants.LEASE_HARDLIMIT_PERIOD;
+ private long softLimit = HdfsConstants.LEASE_SOFTLIMIT_PERIOD;
+ private long hardLimit = HdfsConstants.LEASE_HARDLIMIT_PERIOD;
//
// Used for handling lock-leases
@@ -379,7 +379,7 @@ public class LeaseManager {
try {
- Thread.sleep(HdfsConstants.NAMENODE_LEASE_RECHECK_INTERVAL);
+ Thread.sleep(HdfsServerConstants.NAMENODE_LEASE_RECHECK_INTERVAL);
} catch(InterruptedException ie) {
if (LOG.isDebugEnabled()) {
LOG.debug(name + " is interrupted", ie);
@@ -409,7 +409,7 @@ public class LeaseManager {
oldest.getPaths().toArray(leasePaths);
for(String p : leasePaths) {
try {
- if(fsnamesystem.internalReleaseLease(oldest, p, HdfsConstants.NAMENODE_LEASE_HOLDER)) {
+ if(fsnamesystem.internalReleaseLease(oldest, p, HdfsServerConstants.NAMENODE_LEASE_HOLDER)) {
LOG.info("Lease recovery for file " + p +
" is complete. File closed.");
removing.add(p);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
index 00461e2fb3f..869922abb2d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
@@ -42,11 +42,11 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.UpgradeManager;
@@ -126,7 +126,7 @@ public class NNStorage extends Storage implements Closeable {
* recent fsimage file. This does not include any transactions
* that have since been written to the edit log.
*/
- protected long mostRecentCheckpointTxId = FSConstants.INVALID_TXID;
+ protected long mostRecentCheckpointTxId = HdfsConstants.INVALID_TXID;
/**
* list of failed (and thus removed) storages
@@ -501,7 +501,7 @@ public class NNStorage extends Storage implements Closeable {
* Format all available storage directories.
*/
public void format(String clusterId) throws IOException {
- this.layoutVersion = FSConstants.LAYOUT_VERSION;
+ this.layoutVersion = HdfsConstants.LAYOUT_VERSION;
this.namespaceID = newNamespaceID();
this.clusterID = clusterId;
this.blockpoolID = newBlockPoolID();
@@ -574,7 +574,7 @@ public class NNStorage extends Storage implements Closeable {
* This should only be used during upgrades.
*/
String getDeprecatedProperty(String prop) {
- assert getLayoutVersion() > FSConstants.LAYOUT_VERSION :
+ assert getLayoutVersion() > HdfsConstants.LAYOUT_VERSION :
"getDeprecatedProperty should only be done when loading " +
"storage from past versions during upgrade.";
return deprecatedProperties.get(prop);
@@ -764,7 +764,7 @@ public class NNStorage extends Storage implements Closeable {
if(upgradeManager.getDistributedUpgrades() != null)
throw new IOException("\n Distributed upgrade for NameNode version "
+ upgradeManager.getUpgradeVersion()
- + " to current LV " + FSConstants.LAYOUT_VERSION
+ + " to current LV " + HdfsConstants.LAYOUT_VERSION
+ " is required.\n Please restart NameNode"
+ " with -upgrade option.");
}
@@ -780,7 +780,7 @@ public class NNStorage extends Storage implements Closeable {
writeAll();
LOG.info("\n Distributed upgrade for NameNode version "
+ upgradeManager.getUpgradeVersion() + " to current LV "
- + FSConstants.LAYOUT_VERSION + " is initialized.");
+ + HdfsConstants.LAYOUT_VERSION + " is initialized.");
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 52f576484a7..5c481f76528 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -55,12 +55,12 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
-import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
-import static org.apache.hadoop.hdfs.protocol.FSConstants.MAX_PATH_LENGTH;
-import static org.apache.hadoop.hdfs.protocol.FSConstants.MAX_PATH_DEPTH;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
+import static org.apache.hadoop.hdfs.protocol.HdfsConstants.MAX_PATH_LENGTH;
+import static org.apache.hadoop.hdfs.protocol.HdfsConstants.MAX_PATH_DEPTH;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -68,8 +68,8 @@ import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
@@ -308,12 +308,12 @@ public class NameNode implements NamenodeProtocols {
"Invalid URI for NameNode address (check %s): %s has no authority.",
FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString()));
}
- if (!FSConstants.HDFS_URI_SCHEME.equalsIgnoreCase(
+ if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(
filesystemURI.getScheme())) {
throw new IllegalArgumentException(String.format(
"Invalid URI for NameNode address (check %s): %s is not of scheme '%s'.",
FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString(),
- FSConstants.HDFS_URI_SCHEME));
+ HdfsConstants.HDFS_URI_SCHEME));
}
return getAddress(authority);
}
@@ -321,7 +321,7 @@ public class NameNode implements NamenodeProtocols {
public static URI getUri(InetSocketAddress namenode) {
int port = namenode.getPort();
String portString = port == DEFAULT_PORT ? "" : (":"+port);
- return URI.create(FSConstants.HDFS_URI_SCHEME + "://"
+ return URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
+ namenode.getHostName()+portString);
}
@@ -1279,7 +1279,7 @@ public class NameNode implements NamenodeProtocols {
* @throws IOException
*/
void verifyVersion(int version) throws IOException {
- if (version != FSConstants.LAYOUT_VERSION)
+ if (version != HdfsConstants.LAYOUT_VERSION)
throw new IncorrectVersionException(version, "data node");
}
@@ -1573,7 +1573,7 @@ public class NameNode implements NamenodeProtocols {
DFSUtil.setGenericConf(conf, nameserviceId, NAMESERVICE_SPECIFIC_KEYS);
if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
- URI defaultUri = URI.create(FSConstants.HDFS_URI_SCHEME + "://"
+ URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
+ conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY));
conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 26376d476f4..e74859af181 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -48,7 +48,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.NodeBase;
@@ -502,8 +502,8 @@ public class NamenodeFsck {
}
try {
s = new Socket();
- s.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
- s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
+ s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
+ s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
String file = BlockReaderFactory.getFileName(targetAddr, block.getBlockPoolId(),
block.getBlockId());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
index f50e1f8b9f3..de94cbeba04 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
@@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index f126f17eeb8..f8339eb69dd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -45,8 +45,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
@@ -456,7 +456,7 @@ public class SecondaryNameNode implements Runnable {
*/
private String getInfoServer() throws IOException {
URI fsName = FileSystem.getDefaultUri(conf);
- if (!FSConstants.HDFS_URI_SCHEME.equalsIgnoreCase(fsName.getScheme())) {
+ if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(fsName.getScheme())) {
throw new IOException("This is not a DFS");
}
@@ -793,7 +793,7 @@ public class SecondaryNameNode implements Runnable {
StorageState curState;
try {
- curState = sd.analyzeStorage(HdfsConstants.StartupOption.REGULAR, storage);
+ curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage);
// sd is locked but not opened
switch(curState) {
case NON_EXISTENT:
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
index 944e998ecfd..cc8dccaf1ae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
@@ -27,7 +27,7 @@ import java.lang.Math;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
@@ -124,7 +124,7 @@ class TransferFsImage {
static void getFileServer(OutputStream outstream, File localfile,
DataTransferThrottler throttler)
throws IOException {
- byte buf[] = new byte[FSConstants.IO_FILE_BUFFER_SIZE];
+ byte buf[] = new byte[HdfsConstants.IO_FILE_BUFFER_SIZE];
FileInputStream infile = null;
try {
infile = new FileInputStream(localfile);
@@ -139,7 +139,7 @@ class TransferFsImage {
&& localfile.getAbsolutePath().contains("fsimage")) {
// Test sending image shorter than localfile
long len = localfile.length();
- buf = new byte[(int)Math.min(len/2, FSConstants.IO_FILE_BUFFER_SIZE)];
+ buf = new byte[(int)Math.min(len/2, HdfsConstants.IO_FILE_BUFFER_SIZE)];
// This will read at most half of the image
// and the rest of the image will be sent over the wire
infile.read(buf);
@@ -179,7 +179,7 @@ class TransferFsImage {
static MD5Hash getFileClient(String nnHostPort,
String queryString, List localPaths,
NNStorage dstStorage, boolean getChecksum) throws IOException {
- byte[] buf = new byte[FSConstants.IO_FILE_BUFFER_SIZE];
+ byte[] buf = new byte[HdfsConstants.IO_FILE_BUFFER_SIZE];
String proto = UserGroupInformation.isSecurityEnabled() ? "https://" : "http://";
StringBuilder str = new StringBuilder(proto+nnHostPort+"/getimage?");
str.append(queryString);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java
index b4e89e3fa15..a46efae8a49 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java
@@ -19,9 +19,9 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
import org.apache.hadoop.hdfs.server.common.UpgradeManager;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
@@ -38,8 +38,8 @@ import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
* and updates its status.
*/
class UpgradeManagerNamenode extends UpgradeManager {
- public HdfsConstants.NodeType getType() {
- return HdfsConstants.NodeType.NAME_NODE;
+ public HdfsServerConstants.NodeType getType() {
+ return HdfsServerConstants.NodeType.NAME_NODE;
}
private final FSNamesystem namesystem;
@@ -66,7 +66,7 @@ class UpgradeManagerNamenode extends UpgradeManager {
this.broadcastCommand = currentUpgrades.first().startUpgrade();
NameNode.LOG.info("\n Distributed upgrade for NameNode version "
+ getUpgradeVersion() + " to current LV "
- + FSConstants.LAYOUT_VERSION + " is started.");
+ + HdfsConstants.LAYOUT_VERSION + " is started.");
return true;
}
@@ -75,7 +75,7 @@ class UpgradeManagerNamenode extends UpgradeManager {
if(NameNode.LOG.isDebugEnabled()) {
NameNode.LOG.debug("\n Distributed upgrade for NameNode version "
+ getUpgradeVersion() + " to current LV "
- + FSConstants.LAYOUT_VERSION + " is processing upgrade command: "
+ + HdfsConstants.LAYOUT_VERSION + " is processing upgrade command: "
+ command.getAction() + " status = " + getUpgradeStatus() + "%");
}
if(currentUpgrades == null) {
@@ -96,7 +96,7 @@ class UpgradeManagerNamenode extends UpgradeManager {
curUO.completeUpgrade();
NameNode.LOG.info("\n Distributed upgrade for NameNode version "
+ curUO.getVersion() + " to current LV "
- + FSConstants.LAYOUT_VERSION + " is complete.");
+ + HdfsConstants.LAYOUT_VERSION + " is complete.");
// proceede with the next one
currentUpgrades.remove(curUO);
if(currentUpgrades.isEmpty()) { // all upgrades are done
@@ -110,7 +110,7 @@ class UpgradeManagerNamenode extends UpgradeManager {
public synchronized void completeUpgrade() throws IOException {
// set and write new upgrade state into disk
- setUpgradeState(false, FSConstants.LAYOUT_VERSION);
+ setUpgradeState(false, HdfsConstants.LAYOUT_VERSION);
namesystem.getFSImage().getStorage().writeAll();
currentUpgrades = null;
broadcastCommand = null;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java
index 0872eb22c01..5a755545447 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.UpgradeObject;
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
@@ -44,8 +44,8 @@ public abstract class UpgradeObjectNamenode extends UpgradeObject {
public abstract UpgradeCommand processUpgradeCommand(UpgradeCommand command
) throws IOException;
- public HdfsConstants.NodeType getType() {
- return HdfsConstants.NodeType.NAME_NODE;
+ public HdfsServerConstants.NodeType getType() {
+ return HdfsServerConstants.NodeType.NAME_NODE;
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
index 2ee18666171..a75701ef86b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.hdfs.server.namenode.metrics;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.annotation.Metric;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java
index 80426605a05..aa98ab19b60 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
/**
* Information sent by a subordinate name-node to the active name-node
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
index e847cfc371a..cc33a04d1e9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
@@ -24,7 +24,7 @@ import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
@@ -53,7 +53,7 @@ public class NamespaceInfo extends StorageInfo {
public NamespaceInfo(int nsID, String clusterID, String bpID,
long cT, int duVersion) {
- super(FSConstants.LAYOUT_VERSION, nsID, clusterID, cT);
+ super(HdfsConstants.LAYOUT_VERSION, nsID, clusterID, cT);
blockPoolID = bpID;
buildVersion = Storage.getBuildVersion();
this.distributedUpgradeVersion = duVersion;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java
index 5b8ac59f37e..c82494d5ba4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java
@@ -22,15 +22,15 @@ import java.io.DataOutput;
import java.io.IOException;
import java.util.Comparator;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.io.Writable;
import com.google.common.base.Function;
import com.google.common.collect.ComparisonChain;
public class RemoteEditLog implements Writable, Comparable {
- private long startTxId = FSConstants.INVALID_TXID;
- private long endTxId = FSConstants.INVALID_TXID;
+ private long startTxId = HdfsConstants.INVALID_TXID;
+ private long endTxId = HdfsConstants.INVALID_TXID;
public RemoteEditLog() {
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java
index cdf1d791d2a..bf9b68b1b76 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java
@@ -25,7 +25,7 @@ import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableFactories;
import org.apache.hadoop.io.WritableFactory;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 91cf9eec58a..b4f4e7c4d1f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -40,9 +40,9 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.ipc.RPC;
@@ -115,7 +115,7 @@ public class DFSAdmin extends FsShell {
@Override
public void run(Path path) throws IOException {
- dfs.setQuota(path, FSConstants.QUOTA_RESET, FSConstants.QUOTA_DONT_SET);
+ dfs.setQuota(path, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_DONT_SET);
}
}
@@ -161,7 +161,7 @@ public class DFSAdmin extends FsShell {
@Override
public void run(Path path) throws IOException {
- dfs.setQuota(path, quota, FSConstants.QUOTA_DONT_SET);
+ dfs.setQuota(path, quota, HdfsConstants.QUOTA_DONT_SET);
}
}
@@ -200,7 +200,7 @@ public class DFSAdmin extends FsShell {
@Override
public void run(Path path) throws IOException {
- dfs.setQuota(path, FSConstants.QUOTA_DONT_SET, FSConstants.QUOTA_RESET);
+ dfs.setQuota(path, HdfsConstants.QUOTA_DONT_SET, HdfsConstants.QUOTA_RESET);
}
}
@@ -250,7 +250,7 @@ public class DFSAdmin extends FsShell {
@Override
public void run(Path path) throws IOException {
- dfs.setQuota(path, FSConstants.QUOTA_DONT_SET, quota);
+ dfs.setQuota(path, HdfsConstants.QUOTA_DONT_SET, quota);
}
}
@@ -288,7 +288,7 @@ public class DFSAdmin extends FsShell {
long used = ds.getUsed();
long remaining = ds.getRemaining();
long presentCapacity = used + remaining;
- boolean mode = dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET);
+ boolean mode = dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET);
UpgradeStatusReport status =
dfs.distributedUpgradeProgress(UpgradeAction.GET_STATUS);
@@ -361,17 +361,17 @@ public class DFSAdmin extends FsShell {
printUsage("-safemode");
return;
}
- FSConstants.SafeModeAction action;
+ HdfsConstants.SafeModeAction action;
Boolean waitExitSafe = false;
if ("leave".equalsIgnoreCase(argv[idx])) {
- action = FSConstants.SafeModeAction.SAFEMODE_LEAVE;
+ action = HdfsConstants.SafeModeAction.SAFEMODE_LEAVE;
} else if ("enter".equalsIgnoreCase(argv[idx])) {
- action = FSConstants.SafeModeAction.SAFEMODE_ENTER;
+ action = HdfsConstants.SafeModeAction.SAFEMODE_ENTER;
} else if ("get".equalsIgnoreCase(argv[idx])) {
- action = FSConstants.SafeModeAction.SAFEMODE_GET;
+ action = HdfsConstants.SafeModeAction.SAFEMODE_GET;
} else if ("wait".equalsIgnoreCase(argv[idx])) {
- action = FSConstants.SafeModeAction.SAFEMODE_GET;
+ action = HdfsConstants.SafeModeAction.SAFEMODE_GET;
waitExitSafe = true;
} else {
printUsage("-safemode");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java
index c68cef6a1dd..617b90026c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import static org.apache.hadoop.fs.FileContextTestHelper.*;
import org.apache.hadoop.ipc.RemoteException;
import static org.junit.Assert.*;
@@ -212,7 +212,7 @@ public class TestFcHdfsSymlink extends FileContextSymlinkBaseTest {
public void testCreateLinkMaxPathLink() throws IOException {
Path dir = new Path(testBaseDir1());
Path file = new Path(testBaseDir1(), "file");
- final int maxPathLen = FSConstants.MAX_PATH_LENGTH;
+ final int maxPathLen = HdfsConstants.MAX_PATH_LENGTH;
final int dirLen = dir.toString().length() + 1;
int len = maxPathLen - dirLen;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
index a932f881a25..a437fffadd4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.After;
import org.junit.AfterClass;
@@ -108,11 +108,11 @@ public class TestHDFSFileContextMainOperations extends
Path dst2 = getTestRootPath(fc, "test/testOldRenameWithQuota/dstdir/dst2");
createFile(src1);
createFile(src2);
- fs.setQuota(src1.getParent(), FSConstants.QUOTA_DONT_SET,
- FSConstants.QUOTA_DONT_SET);
+ fs.setQuota(src1.getParent(), HdfsConstants.QUOTA_DONT_SET,
+ HdfsConstants.QUOTA_DONT_SET);
fc.mkdir(dst1.getParent(), FileContext.DEFAULT_PERM, true);
- fs.setQuota(dst1.getParent(), 2, FSConstants.QUOTA_DONT_SET);
+ fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
/*
* Test1: src does not exceed quota and dst has no quota check and hence
* accommodates rename
@@ -130,7 +130,7 @@ public class TestHDFSFileContextMainOperations extends
* Test3: src exceeds quota and dst has *no* quota to accommodate rename
*/
// src1 has no quota to accommodate new rename node
- fs.setQuota(src1.getParent(), 1, FSConstants.QUOTA_DONT_SET);
+ fs.setQuota(src1.getParent(), 1, HdfsConstants.QUOTA_DONT_SET);
oldRename(dst1, src1, false, true);
}
@@ -143,11 +143,11 @@ public class TestHDFSFileContextMainOperations extends
Path dst2 = getTestRootPath(fc, "test/testRenameWithQuota/dstdir/dst2");
createFile(src1);
createFile(src2);
- fs.setQuota(src1.getParent(), FSConstants.QUOTA_DONT_SET,
- FSConstants.QUOTA_DONT_SET);
+ fs.setQuota(src1.getParent(), HdfsConstants.QUOTA_DONT_SET,
+ HdfsConstants.QUOTA_DONT_SET);
fc.mkdir(dst1.getParent(), FileContext.DEFAULT_PERM, true);
- fs.setQuota(dst1.getParent(), 2, FSConstants.QUOTA_DONT_SET);
+ fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
/*
* Test1: src does not exceed quota and dst has no quota check and hence
* accommodates rename
@@ -170,7 +170,7 @@ public class TestHDFSFileContextMainOperations extends
* rename to a destination that does not exist
*/
// src1 has no quota to accommodate new rename node
- fs.setQuota(src1.getParent(), 1, FSConstants.QUOTA_DONT_SET);
+ fs.setQuota(src1.getParent(), 1, HdfsConstants.QUOTA_DONT_SET);
rename(dst1, src1, false, false, true, Rename.NONE);
/*
@@ -179,9 +179,9 @@ public class TestHDFSFileContextMainOperations extends
* is same as quota needed by src.
*/
// src1 has no quota to accommodate new rename node
- fs.setQuota(src1.getParent(), 100, FSConstants.QUOTA_DONT_SET);
+ fs.setQuota(src1.getParent(), 100, HdfsConstants.QUOTA_DONT_SET);
createFile(src1);
- fs.setQuota(src1.getParent(), 1, FSConstants.QUOTA_DONT_SET);
+ fs.setQuota(src1.getParent(), 1, HdfsConstants.QUOTA_DONT_SET);
rename(dst1, src1, true, true, false, Rename.OVERWRITE);
}
@@ -208,7 +208,7 @@ public class TestHDFSFileContextMainOperations extends
createFile(dst1);
// Set quota so that dst1 parent cannot allow under it new files/directories
- fs.setQuota(dst1.getParent(), 2, FSConstants.QUOTA_DONT_SET);
+ fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
// Free up quota for a subsequent rename
fs.delete(dst1, true);
oldRename(src1, dst1, true, false);
@@ -237,7 +237,7 @@ public class TestHDFSFileContextMainOperations extends
createFile(dst1);
// Set quota so that dst1 parent cannot allow under it new files/directories
- fs.setQuota(dst1.getParent(), 2, FSConstants.QUOTA_DONT_SET);
+ fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
// Free up quota for a subsequent rename
fs.delete(dst1, true);
rename(src1, dst1, true, true, false, Rename.OVERWRITE);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
index 25585cecbbb..c61e65b6c04 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
@@ -28,7 +28,7 @@ import java.io.IOException;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
@@ -140,8 +140,8 @@ public class BlockReaderTestUtil {
DatanodeInfo[] nodes = testBlock.getLocations();
targetAddr = NetUtils.createSocketAddr(nodes[0].getName());
sock = new Socket();
- sock.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
- sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
+ sock.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
+ sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
return BlockReaderFactory.newBlockReader(
sock, targetAddr.toString()+ ":" + block.getBlockId(), block,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 3d8b6f29f5a..c7566d2c627 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -54,7 +54,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
@@ -63,7 +63,7 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -670,7 +670,7 @@ public class DFSTestUtil {
final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
NetUtils.getOutputStream(s, writeTimeout),
- FSConstants.SMALL_BUFFER_SIZE));
+ HdfsConstants.SMALL_BUFFER_SIZE));
final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));
// send the request
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java
index 8e19f456414..f82986f331b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java
@@ -27,7 +27,7 @@ import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.namenode.CreateEditsLog;
import org.apache.hadoop.net.DNS;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 89627b71b94..16d2bc21128 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -47,8 +47,8 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java
index d06d9766c3d..1613e82ca2d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java
@@ -23,7 +23,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
@@ -85,7 +85,7 @@ public class TestAbandonBlock {
public void testQuotaUpdatedWhenBlockAbandoned() throws IOException {
DistributedFileSystem dfs = (DistributedFileSystem)fs;
// Setting diskspace quota to 3MB
- dfs.setQuota(new Path("/"), FSConstants.QUOTA_DONT_SET, 3 * 1024 * 1024);
+ dfs.setQuota(new Path("/"), HdfsConstants.QUOTA_DONT_SET, 3 * 1024 * 1024);
// Start writing a file with 2 replicas to ensure each datanode has one.
// Block Size is 1MB.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
index 1d43ea7e6bc..1407fd46a0c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
@@ -32,7 +32,7 @@ import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java
index 5a778049d55..7a93226e295 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java
@@ -25,8 +25,8 @@ import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
-import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import com.google.common.collect.Lists;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
index 511e9c1b920..cdf3665af18 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
@@ -17,8 +17,8 @@
*/
package org.apache.hadoop.hdfs;
-import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE;
-import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE;
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
import java.io.File;
import java.io.IOException;
@@ -32,8 +32,8 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.util.StringUtils;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
index 0455366e80b..f0c20a1ca95 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
@@ -17,8 +17,8 @@
*/
package org.apache.hadoop.hdfs;
-import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE;
-import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE;
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
import java.io.File;
@@ -27,11 +27,11 @@ import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
/**
* This test ensures the appropriate response (successful or failure) from
@@ -198,7 +198,7 @@ public class TestDFSStartupVersions extends TestCase {
return false;
}
// check #3
- int softwareLV = FSConstants.LAYOUT_VERSION; // will also be Namenode's LV
+ int softwareLV = HdfsConstants.LAYOUT_VERSION; // will also be Namenode's LV
int storedLV = datanodeVer.getLayoutVersion();
if (softwareLV == storedLV &&
datanodeVer.getCTime() == namenodeVer.getCTime())
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
index 020bdcfd0d0..ebfe785fa08 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
@@ -24,11 +24,11 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.common.Storage;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
-import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE;
-import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE;
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
/**
* This test ensures the appropriate response (successful or failure) from
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
index 58d3f3386fe..251f23dee70 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
@@ -17,8 +17,8 @@
*/
package org.apache.hadoop.hdfs;
-import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE;
-import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE;
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
import java.io.File;
import java.io.IOException;
@@ -29,7 +29,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.TestParallelImageWrite;
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName;
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
index ec33f769ce7..6ad08cd2aa2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
@@ -30,8 +30,8 @@ import org.apache.hadoop.fs.FSInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
@@ -290,7 +290,7 @@ public class TestDFSUpgradeFromImage extends TestCase {
DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
DFSClient dfsClient = dfs.dfs;
//Safemode will be off only after upgrade is complete. Wait for it.
- while ( dfsClient.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET) ) {
+ while ( dfsClient.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET) ) {
LOG.info("Waiting for SafeMode to be OFF.");
try {
Thread.sleep(1000);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
index 24c3bc48b19..72faa319b7a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
@@ -41,7 +41,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
@@ -52,7 +52,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseP
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.Builder;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
@@ -97,8 +97,8 @@ public class TestDataTransferProtocol extends TestCase {
StringUtils.byteToHexString(sendBuf.toByteArray()));
sock = new Socket();
- sock.connect(dnAddr, HdfsConstants.READ_TIMEOUT);
- sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
+ sock.connect(dnAddr, HdfsServerConstants.READ_TIMEOUT);
+ sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
OutputStream out = sock.getOutputStream();
// Should we excuse
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
index a0da70c4ef5..b6ecb0e4d19 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
@@ -32,7 +32,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java
index 58bd57a68a6..62565170bb0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java
@@ -28,7 +28,7 @@ import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.junit.AfterClass;
import org.junit.BeforeClass;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
index 5a542a263e9..2cde7ed4760 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs;
import java.net.InetSocketAddress;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.DFSClient;
import junit.framework.TestCase;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
index 61d1adc6f1f..06194b8e43f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
@@ -24,7 +24,7 @@ import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import static org.apache.hadoop.test.MetricsAsserts.*;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
index 2a8383d31b9..15d7378b9f2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
index 7a013d485dc..4425fcfbf04 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -111,7 +111,7 @@ public class TestFileAppend4 {
// set the soft limit to be 1 second so that the
// namenode triggers lease recovery upon append request
- cluster.setLeasePeriod(1000, FSConstants.LEASE_HARDLIMIT_PERIOD);
+ cluster.setLeasePeriod(1000, HdfsConstants.LEASE_HARDLIMIT_PERIOD);
// Trying recovery
int tries = 60;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
index d2dfd7fc65e..1d7ff4e6e39 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
@@ -40,7 +40,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -301,7 +301,7 @@ public class TestFileCreation extends junit.framework.TestCase {
// wait for the datanode to be declared dead
while (true) {
DatanodeInfo[] info = client.datanodeReport(
- FSConstants.DatanodeReportType.LIVE);
+ HdfsConstants.DatanodeReportType.LIVE);
if (info.length == 0) {
break;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
index d4d66b4c7f9..56cb4506c81 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -91,7 +91,7 @@ public class TestFileStatus {
int fileSize, int blockSize) throws IOException {
// Create and write a file that contains three blocks of data
FSDataOutputStream stm = fileSys.create(name, true,
- FSConstants.IO_FILE_BUFFER_SIZE, (short)repl, (long)blockSize);
+ HdfsConstants.IO_FILE_BUFFER_SIZE, (short)repl, (long)blockSize);
byte[] buffer = new byte[fileSize];
Random rand = new Random(seed);
rand.nextBytes(buffer);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
index 995006279c8..736b765933c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.BackupNode;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
index 3e084e15476..7bcc7d796eb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.TestInterDatanodeProtocol;
@@ -129,14 +129,14 @@ public class TestLeaseRecovery extends junit.framework.TestCase {
filestr = "/foo.safemode";
filepath = new Path(filestr);
dfs.create(filepath, (short)1);
- cluster.getNameNode().setSafeMode(FSConstants.SafeModeAction.SAFEMODE_ENTER);
+ cluster.getNameNode().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
assertTrue(dfs.dfs.exists(filestr));
DFSTestUtil.waitReplication(dfs, filepath, (short)1);
waitLeaseRecovery(cluster);
// verify that we still cannot recover the lease
LeaseManager lm = NameNodeAdapter.getLeaseManager(cluster.getNamesystem());
assertTrue("Found " + lm.countLease() + " lease, expected 1", lm.countLease() == 1);
- cluster.getNameNode().setSafeMode(FSConstants.SafeModeAction.SAFEMODE_LEAVE);
+ cluster.getNameNode().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
}
finally {
if (cluster != null) {cluster.shutdown();}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
index 8e720fd1723..21d7f2dd816 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
@@ -36,9 +36,9 @@ import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -318,8 +318,8 @@ public class TestLeaseRecovery2 {
DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2g_map);
// Reset default lease periods
- cluster.setLeasePeriod(FSConstants.LEASE_SOFTLIMIT_PERIOD,
- FSConstants.LEASE_HARDLIMIT_PERIOD);
+ cluster.setLeasePeriod(HdfsConstants.LEASE_SOFTLIMIT_PERIOD,
+ HdfsConstants.LEASE_HARDLIMIT_PERIOD);
//create a file
// create a random file name
String filestr = "/foo" + AppendTestUtil.nextInt();
@@ -432,7 +432,7 @@ public class TestLeaseRecovery2 {
cluster.getNameNode(), fileStr);
assertFalse("original lease holder should not be the NN",
- originalLeaseHolder.equals(HdfsConstants.NAMENODE_LEASE_HOLDER));
+ originalLeaseHolder.equals(HdfsServerConstants.NAMENODE_LEASE_HOLDER));
// hflush file
AppendTestUtil.LOG.info("hflush");
@@ -459,15 +459,15 @@ public class TestLeaseRecovery2 {
cluster.setLeasePeriod(LONG_LEASE_PERIOD, SHORT_LEASE_PERIOD);
// Make sure lease recovery begins.
- Thread.sleep(HdfsConstants.NAMENODE_LEASE_RECHECK_INTERVAL * 2);
+ Thread.sleep(HdfsServerConstants.NAMENODE_LEASE_RECHECK_INTERVAL * 2);
- assertEquals("lease holder should now be the NN", HdfsConstants.NAMENODE_LEASE_HOLDER,
+ assertEquals("lease holder should now be the NN", HdfsServerConstants.NAMENODE_LEASE_HOLDER,
NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(), fileStr));
cluster.restartNameNode(false);
assertEquals("lease holder should still be the NN after restart",
- HdfsConstants.NAMENODE_LEASE_HOLDER,
+ HdfsServerConstants.NAMENODE_LEASE_HOLDER,
NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(), fileStr));
// Let the DNs send heartbeats again.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java
index 9f915b4506b..abe9036bb1e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java
@@ -23,7 +23,7 @@ import java.util.Random;
import java.net.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
index 3a3dde83047..11b7f49d9d7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
import org.apache.hadoop.hdfs.server.datanode.Replica;
@@ -111,7 +111,7 @@ public class TestPipelines {
assertTrue("Replica on DN " + dn + " shouldn't be null", r != null);
assertEquals("Should be RBW replica on " + dn
+ " after sequence of calls append()/write()/hflush()",
- HdfsConstants.ReplicaState.RBW, r.getState());
+ HdfsServerConstants.ReplicaState.RBW, r.getState());
}
ofs.close();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
index fbc84f98647..a0727a6c90b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.io.IOUtils;
@@ -370,14 +370,14 @@ public class TestQuota {
// 2: set the quota of /nqdir0/qdir1 to be 6
final Path quotaDir1 = new Path("/nqdir0/qdir1");
- dfs.setQuota(quotaDir1, 6, FSConstants.QUOTA_DONT_SET);
+ dfs.setQuota(quotaDir1, 6, HdfsConstants.QUOTA_DONT_SET);
ContentSummary c = dfs.getContentSummary(quotaDir1);
assertEquals(c.getDirectoryCount(), 3);
assertEquals(c.getQuota(), 6);
// 3: set the quota of /nqdir0/qdir1/qdir20 to be 7
final Path quotaDir2 = new Path("/nqdir0/qdir1/qdir20");
- dfs.setQuota(quotaDir2, 7, FSConstants.QUOTA_DONT_SET);
+ dfs.setQuota(quotaDir2, 7, HdfsConstants.QUOTA_DONT_SET);
c = dfs.getContentSummary(quotaDir2);
assertEquals(c.getDirectoryCount(), 2);
assertEquals(c.getQuota(), 7);
@@ -385,7 +385,7 @@ public class TestQuota {
// 4: Create directory /nqdir0/qdir1/qdir21 and set its quota to 2
final Path quotaDir3 = new Path("/nqdir0/qdir1/qdir21");
assertTrue(dfs.mkdirs(quotaDir3));
- dfs.setQuota(quotaDir3, 2, FSConstants.QUOTA_DONT_SET);
+ dfs.setQuota(quotaDir3, 2, HdfsConstants.QUOTA_DONT_SET);
c = dfs.getContentSummary(quotaDir3);
assertEquals(c.getDirectoryCount(), 1);
assertEquals(c.getQuota(), 2);
@@ -547,13 +547,13 @@ public class TestQuota {
// set the quota of /nqdir0/qdir1 to 4 * fileSpace
final Path quotaDir1 = new Path("/nqdir0/qdir1");
- dfs.setQuota(quotaDir1, FSConstants.QUOTA_DONT_SET, 4 * fileSpace);
+ dfs.setQuota(quotaDir1, HdfsConstants.QUOTA_DONT_SET, 4 * fileSpace);
ContentSummary c = dfs.getContentSummary(quotaDir1);
assertEquals(c.getSpaceQuota(), 4 * fileSpace);
// set the quota of /nqdir0/qdir1/qdir20 to 6 * fileSpace
final Path quotaDir20 = new Path("/nqdir0/qdir1/qdir20");
- dfs.setQuota(quotaDir20, FSConstants.QUOTA_DONT_SET, 6 * fileSpace);
+ dfs.setQuota(quotaDir20, HdfsConstants.QUOTA_DONT_SET, 6 * fileSpace);
c = dfs.getContentSummary(quotaDir20);
assertEquals(c.getSpaceQuota(), 6 * fileSpace);
@@ -561,7 +561,7 @@ public class TestQuota {
// Create /nqdir0/qdir1/qdir21 and set its space quota to 2 * fileSpace
final Path quotaDir21 = new Path("/nqdir0/qdir1/qdir21");
assertTrue(dfs.mkdirs(quotaDir21));
- dfs.setQuota(quotaDir21, FSConstants.QUOTA_DONT_SET, 2 * fileSpace);
+ dfs.setQuota(quotaDir21, HdfsConstants.QUOTA_DONT_SET, 2 * fileSpace);
c = dfs.getContentSummary(quotaDir21);
assertEquals(c.getSpaceQuota(), 2 * fileSpace);
@@ -661,7 +661,7 @@ public class TestQuota {
assertEquals(c.getSpaceConsumed(), 4 * fileSpace);
// now increase the quota for quotaDir1
- dfs.setQuota(quotaDir1, FSConstants.QUOTA_DONT_SET, 5 * fileSpace);
+ dfs.setQuota(quotaDir1, HdfsConstants.QUOTA_DONT_SET, 5 * fileSpace);
// Now, appending more than 1 fileLen should result in an error
out = dfs.append(file2);
hasException = false;
@@ -704,8 +704,8 @@ public class TestQuota {
assertEquals(c.getSpaceConsumed(), 5 * fileSpace - file2Len);
// now increase the quota for quotaDir1 and quotaDir20
- dfs.setQuota(quotaDir1, FSConstants.QUOTA_DONT_SET, 10 * fileSpace);
- dfs.setQuota(quotaDir20, FSConstants.QUOTA_DONT_SET, 10 * fileSpace);
+ dfs.setQuota(quotaDir1, HdfsConstants.QUOTA_DONT_SET, 10 * fileSpace);
+ dfs.setQuota(quotaDir20, HdfsConstants.QUOTA_DONT_SET, 10 * fileSpace);
// then increasing replication should be ok.
dfs.setReplication(file2, (short)(replication+1));
@@ -735,7 +735,7 @@ public class TestQuota {
int sizeFactorC = 4;
// Set space quota for subdirectory C
- dfs.setQuota(quotaDir2053_C, FSConstants.QUOTA_DONT_SET,
+ dfs.setQuota(quotaDir2053_C, HdfsConstants.QUOTA_DONT_SET,
(sizeFactorC + 1) * fileSpace);
c = dfs.getContentSummary(quotaDir2053_C);
assertEquals(c.getSpaceQuota(), (sizeFactorC + 1) * fileSpace);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
index af2339dac37..eef83e4174d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
@@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
index 8c98a205283..73adf8efcfe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import static org.junit.Assert.*;
import org.junit.Before;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java
index e08d9f76ea9..b230391dd02 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java
@@ -23,7 +23,7 @@ import java.util.Random;
import java.net.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
index 5771b22abf0..2225449f196 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
@@ -35,15 +35,15 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
-import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
-import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE;
-import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE;
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
@@ -504,7 +504,7 @@ public class UpgradeUtilities {
* of the Namenode, whether it is running or not.
*/
public static int getCurrentLayoutVersion() {
- return FSConstants.LAYOUT_VERSION;
+ return HdfsConstants.LAYOUT_VERSION;
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index 5b0ac315599..34cd784bd04 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -41,7 +41,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
index 9ee296c17cc..8afedc8bb16 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
@@ -39,7 +39,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index e8193b56d5d..565a765b1f1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
@@ -89,8 +89,8 @@ public class TestBlockManager {
for (DatanodeDescriptor dn : nodesToAdd) {
cluster.add(dn);
dn.updateHeartbeat(
- 2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
- 2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0);
+ 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
+ 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0);
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
index 25a486b1665..1eef522f83f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
@@ -49,7 +49,7 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
import org.apache.hadoop.hdfs.security.token.block.SecurityTestUtil;
import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.token.Token;
@@ -137,8 +137,8 @@ public class TestBlockTokenWithDFS {
DatanodeInfo[] nodes = lblock.getLocations();
targetAddr = NetUtils.createSocketAddr(nodes[0].getName());
s = new Socket();
- s.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
- s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
+ s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
+ s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
String file = BlockReaderFactory.getFileName(targetAddr,
"test-blockpoolid", block.getBlockId());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index ee84feca81e..794b23c652e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
@@ -76,8 +76,8 @@ public class TestReplicationPolicy extends TestCase {
}
for(int i=0; i