From 9df84c35d5a228e3ae42e90487f2d1f1264e5eea Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Fri, 6 Dec 2013 01:55:13 +0000 Subject: [PATCH] HDFS-5590. Block ID and generation stamp may be reused when persistBlocks is set to false. Contributed by Jing Zhao. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1548368 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 2 -- .../hdfs/server/namenode/FSNamesystem.java | 18 ++++-------------- .../apache/hadoop/hdfs/TestPersistBlocks.java | 4 ---- .../hdfs/server/namenode/TestBackupNode.java | 1 - 5 files changed, 7 insertions(+), 21 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index d44af025ffe..e01ead4d4a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -786,6 +786,9 @@ Release 2.3.0 - UNRELEASED HDFS-5587. add debug information when NFS fails to start with duplicate user or group names (brandonli) + HDFS-5590. Block ID and generation stamp may be reused when persistBlocks is + set to false. (jing9) + Release 2.2.0 - 2013-10-13 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index ea65cbf0fe7..3dc06ba8a39 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -166,8 +166,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final boolean DFS_WEBHDFS_ENABLED_DEFAULT = true; public static final String DFS_PERMISSIONS_ENABLED_KEY = "dfs.permissions.enabled"; public static final boolean DFS_PERMISSIONS_ENABLED_DEFAULT = true; - public static final String DFS_PERSIST_BLOCKS_KEY = "dfs.persist.blocks"; - public static final boolean DFS_PERSIST_BLOCKS_DEFAULT = false; public static final String DFS_PERMISSIONS_SUPERUSERGROUP_KEY = "dfs.permissions.superusergroup"; public static final String DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT = "supergroup"; public static final String DFS_ADMIN = "dfs.cluster.administrators"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 6a74b49fa53..a465e8e7a15 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -79,8 +79,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERSIST_BLOCKS_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT; @@ -153,6 +151,8 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; +import org.apache.hadoop.hdfs.protocol.CachePoolEntry; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -164,8 +164,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.protocol.CachePoolEntry; -import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; @@ -365,7 +363,6 @@ private void logAuditEvent(boolean succeeded, static final int DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED = 100; static int BLOCK_DELETION_INCREMENT = 1000; private final boolean isPermissionEnabled; - private final boolean persistBlocks; private final UserGroupInformation fsOwner; private final String fsOwnerShortUserName; private final String supergroup; @@ -670,13 +667,10 @@ public static FSNamesystem loadFromDisk(Configuration conf) LOG.info("supergroup = " + supergroup); LOG.info("isPermissionEnabled = " + isPermissionEnabled); - final boolean persistBlocks = conf.getBoolean(DFS_PERSIST_BLOCKS_KEY, - DFS_PERSIST_BLOCKS_DEFAULT); // block allocation has to be persisted in HA using a shared edits directory // so that the standby has up-to-date namespace information String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf); this.haEnabled = HAUtil.isHAEnabled(conf, nameserviceId); - this.persistBlocks = persistBlocks || (haEnabled && HAUtil.usesSharedEditsDir(conf)); // Sanity check the HA-related config. if (nameserviceId != null) { @@ -2635,9 +2629,7 @@ LocatedBlock getAdditionalBlock(String src, long fileId, String clientName, } finally { writeUnlock(); } - if (persistBlocks) { - getEditLog().logSync(); - } + getEditLog().logSync(); // Return located block return makeLocatedBlock(newBlock, targets, offset); @@ -2828,9 +2820,7 @@ boolean abandonBlock(ExtendedBlock b, String src, String holder) } finally { writeUnlock(); } - if (persistBlocks) { - getEditLog().logSync(); - } + getEditLog().logSync(); return true; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java index 4ee6a283819..e12c1cb22c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java @@ -97,7 +97,6 @@ void testRestartDfs(boolean useFlush) throws Exception { conf.setInt( CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0); - conf.setBoolean(DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY, true); MiniDFSCluster cluster = null; long len = 0; @@ -157,7 +156,6 @@ public void testRestartDfsWithAbandonedBlock() throws Exception { conf.setInt( CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0); - conf.setBoolean(DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY, true); MiniDFSCluster cluster = null; long len = 0; @@ -219,7 +217,6 @@ public void testRestartWithPartialBlockHflushed() throws IOException { conf.setInt( CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0); - conf.setBoolean(DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY, true); MiniDFSCluster cluster = null; FSDataOutputStream stream; @@ -269,7 +266,6 @@ public void testRestartWithAppend() throws IOException { conf.setInt( CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0); - conf.setBoolean(DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY, true); MiniDFSCluster cluster = null; FSDataOutputStream stream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java index dfc3667e322..fc56eb48c24 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java @@ -434,7 +434,6 @@ void testCheckpoint(StartupOption op) throws Exception { public void testCanReadData() throws IOException { Path file1 = new Path("/fileToRead.dat"); Configuration conf = new HdfsConfiguration(); - conf.setBoolean(DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY, true); MiniDFSCluster cluster = null; FileSystem fileSys = null; BackupNode backup = null;