From 0931bd94be50ba5fe266d5c31e8fcfad9897bfec Mon Sep 17 00:00:00 2001 From: Uma Maheswara Rao G Date: Fri, 25 Apr 2014 05:19:50 +0000 Subject: [PATCH] HDFS-6246. Remove 'dfs.support.append' flag from trunk code. Contributed by Uma Maheswara Rao G. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1589927 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java | 2 -- .../datanode/fsdataset/impl/BlockPoolSlice.java | 6 ------ .../hadoop/hdfs/server/namenode/FSNamesystem.java | 11 ----------- .../java/org/apache/hadoop/hdfs/TestFileAppend.java | 1 - .../src/test/java/org/apache/hadoop/fs/TestDFSIO.java | 2 -- 6 files changed, 2 insertions(+), 22 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 21507f0b404..41e94bb604e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -125,6 +125,8 @@ Trunk (Unreleased) HDFS-6228. comments typo fix for FsDatasetImpl.java (zhaoyunjiong via umamahesh) + HDFS-6246. Remove 'dfs.support.append' flag from trunk code. (umamahesh) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 58557c97d54..1d5e5bc5533 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -381,8 +381,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final int DFS_NAMENODE_HANDLER_COUNT_DEFAULT = 10; public static final String DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY = "dfs.namenode.service.handler.count"; public static final int DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT = 10; - public static final String DFS_SUPPORT_APPEND_KEY = "dfs.support.append"; - public static final boolean DFS_SUPPORT_APPEND_DEFAULT = true; public static final String DFS_HTTP_POLICY_KEY = "dfs.http.policy"; public static final String DFS_HTTP_POLICY_DEFAULT = HttpConfig.Policy.HTTP_ONLY.name(); public static final String DFS_DEFAULT_CHUNK_VIEW_SIZE_KEY = "dfs.default.chunk.view.size"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java index e47a302836d..6093339bdcb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java @@ -95,12 +95,6 @@ class BlockPoolSlice { FileUtil.fullyDelete(tmpDir); } this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW); - final boolean supportAppends = conf.getBoolean( - DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, - DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT); - if (rbwDir.exists() && !supportAppends) { - FileUtil.fullyDelete(rbwDir); - } final int maxBlocksPerDir = conf.getInt( DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_KEY, DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_DEFAULT); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 508e9556546..16b356468a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -83,8 +83,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROU import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SUPPORT_APPEND_KEY; import static org.apache.hadoop.util.Time.now; import java.io.BufferedWriter; @@ -446,7 +444,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats, NameNodeResourceChecker nnResourceChecker; private final FsServerDefaults serverDefaults; - private final boolean supportAppends; private final ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure; private volatile SafeModeInfo safeMode; // safe mode information @@ -754,8 +751,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats, DFSConfigKeys.DFS_NAMENODE_MAX_BLOCKS_PER_FILE_DEFAULT); this.accessTimePrecision = conf.getLong(DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, DFS_NAMENODE_ACCESSTIME_PRECISION_DEFAULT); - this.supportAppends = conf.getBoolean(DFS_SUPPORT_APPEND_KEY, DFS_SUPPORT_APPEND_DEFAULT); - LOG.info("Append Enabled: " + supportAppends); this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf); @@ -2597,12 +2592,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats, + ", clientMachine=" + clientMachine); } boolean skipSync = false; - if (!supportAppends) { - throw new UnsupportedOperationException( - "Append is not enabled on this NameNode. Use the " + - DFS_SUPPORT_APPEND_KEY + " configuration option to enable it."); - } - LocatedBlock lb = null; FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.WRITE); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java index 41feabb2abb..4ea534acdd6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java @@ -345,7 +345,6 @@ public class TestFileAppend{ throws IOException, InterruptedException { Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); - conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true); //Set small soft-limit for lease final long softLimit = 1L; final long hardLimit = 9999999L; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java index b354b204b8e..78f1ffa6841 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java @@ -201,7 +201,6 @@ public class TestDFSIO implements Tool { @BeforeClass public static void beforeClass() throws Exception { bench = new TestDFSIO(); - bench.getConf().setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true); cluster = new MiniDFSCluster.Builder(bench.getConf()) .numDataNodes(2) .format(true) @@ -733,7 +732,6 @@ public class TestDFSIO implements Tool { config.setInt("test.io.file.buffer.size", bufferSize); config.setLong("test.io.skip.size", skipSize); - config.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true); FileSystem fs = FileSystem.get(config); if (isSequential) {