HDFS-5590. Block ID and generation stamp may be reused when persistBlocks is set to false. Contributed by Jing Zhao.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1548368 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jing Zhao 2013-12-06 01:55:13 +00:00
parent eccd7b2093
commit 9df84c35d5
5 changed files with 7 additions and 21 deletions

View File

@ -786,6 +786,9 @@ Release 2.3.0 - UNRELEASED
HDFS-5587. add debug information when NFS fails to start with duplicate user HDFS-5587. add debug information when NFS fails to start with duplicate user
or group names (brandonli) or group names (brandonli)
HDFS-5590. Block ID and generation stamp may be reused when persistBlocks is
set to false. (jing9)
Release 2.2.0 - 2013-10-13 Release 2.2.0 - 2013-10-13
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -166,8 +166,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final boolean DFS_WEBHDFS_ENABLED_DEFAULT = true; public static final boolean DFS_WEBHDFS_ENABLED_DEFAULT = true;
public static final String DFS_PERMISSIONS_ENABLED_KEY = "dfs.permissions.enabled"; public static final String DFS_PERMISSIONS_ENABLED_KEY = "dfs.permissions.enabled";
public static final boolean DFS_PERMISSIONS_ENABLED_DEFAULT = true; public static final boolean DFS_PERMISSIONS_ENABLED_DEFAULT = true;
public static final String DFS_PERSIST_BLOCKS_KEY = "dfs.persist.blocks";
public static final boolean DFS_PERSIST_BLOCKS_DEFAULT = false;
public static final String DFS_PERMISSIONS_SUPERUSERGROUP_KEY = "dfs.permissions.superusergroup"; public static final String DFS_PERMISSIONS_SUPERUSERGROUP_KEY = "dfs.permissions.superusergroup";
public static final String DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT = "supergroup"; public static final String DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT = "supergroup";
public static final String DFS_ADMIN = "dfs.cluster.administrators"; public static final String DFS_ADMIN = "dfs.cluster.administrators";

View File

@ -79,8 +79,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_DEFAU
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERSIST_BLOCKS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT;
@ -153,6 +151,8 @@ import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@ -164,8 +164,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
@ -365,7 +363,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
static final int DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED = 100; static final int DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED = 100;
static int BLOCK_DELETION_INCREMENT = 1000; static int BLOCK_DELETION_INCREMENT = 1000;
private final boolean isPermissionEnabled; private final boolean isPermissionEnabled;
private final boolean persistBlocks;
private final UserGroupInformation fsOwner; private final UserGroupInformation fsOwner;
private final String fsOwnerShortUserName; private final String fsOwnerShortUserName;
private final String supergroup; private final String supergroup;
@ -670,13 +667,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
LOG.info("supergroup = " + supergroup); LOG.info("supergroup = " + supergroup);
LOG.info("isPermissionEnabled = " + isPermissionEnabled); LOG.info("isPermissionEnabled = " + isPermissionEnabled);
final boolean persistBlocks = conf.getBoolean(DFS_PERSIST_BLOCKS_KEY,
DFS_PERSIST_BLOCKS_DEFAULT);
// block allocation has to be persisted in HA using a shared edits directory // block allocation has to be persisted in HA using a shared edits directory
// so that the standby has up-to-date namespace information // so that the standby has up-to-date namespace information
String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf); String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
this.haEnabled = HAUtil.isHAEnabled(conf, nameserviceId); this.haEnabled = HAUtil.isHAEnabled(conf, nameserviceId);
this.persistBlocks = persistBlocks || (haEnabled && HAUtil.usesSharedEditsDir(conf));
// Sanity check the HA-related config. // Sanity check the HA-related config.
if (nameserviceId != null) { if (nameserviceId != null) {
@ -2635,9 +2629,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
} finally { } finally {
writeUnlock(); writeUnlock();
} }
if (persistBlocks) {
getEditLog().logSync(); getEditLog().logSync();
}
// Return located block // Return located block
return makeLocatedBlock(newBlock, targets, offset); return makeLocatedBlock(newBlock, targets, offset);
@ -2828,9 +2820,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
} finally { } finally {
writeUnlock(); writeUnlock();
} }
if (persistBlocks) {
getEditLog().logSync(); getEditLog().logSync();
}
return true; return true;
} }

View File

@ -97,7 +97,6 @@ public class TestPersistBlocks {
conf.setInt( conf.setInt(
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
0); 0);
conf.setBoolean(DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY, true);
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
long len = 0; long len = 0;
@ -157,7 +156,6 @@ public class TestPersistBlocks {
conf.setInt( conf.setInt(
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
0); 0);
conf.setBoolean(DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY, true);
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
long len = 0; long len = 0;
@ -219,7 +217,6 @@ public class TestPersistBlocks {
conf.setInt( conf.setInt(
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
0); 0);
conf.setBoolean(DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY, true);
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
FSDataOutputStream stream; FSDataOutputStream stream;
@ -269,7 +266,6 @@ public class TestPersistBlocks {
conf.setInt( conf.setInt(
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
0); 0);
conf.setBoolean(DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY, true);
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
FSDataOutputStream stream; FSDataOutputStream stream;

View File

@ -434,7 +434,6 @@ public class TestBackupNode {
public void testCanReadData() throws IOException { public void testCanReadData() throws IOException {
Path file1 = new Path("/fileToRead.dat"); Path file1 = new Path("/fileToRead.dat");
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY, true);
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
FileSystem fileSys = null; FileSystem fileSys = null;
BackupNode backup = null; BackupNode backup = null;