HDFS-4377. Some trivial DN comment cleanup. Contributed by Eli Collins

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1431754 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2013-01-11 00:11:06 +00:00
parent 4774c6a5b8
commit 71163ca1ed
4 changed files with 32 additions and 33 deletions

View File

@ -186,6 +186,8 @@ Release 2.0.3-alpha - Unreleased
HDFS-4363. Combine PBHelper and HdfsProtoUtil and remove redundant HDFS-4363. Combine PBHelper and HdfsProtoUtil and remove redundant
methods. (suresh) methods. (suresh)
HDFS-4377. Some trivial DN comment cleanup. (eli)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES

View File

@ -171,20 +171,19 @@ public class BlockManager {
*/ */
private final Set<Block> postponedMisreplicatedBlocks = Sets.newHashSet(); private final Set<Block> postponedMisreplicatedBlocks = Sets.newHashSet();
// /**
// Keeps a TreeSet for every named node. Each treeset contains * Maps a StorageID to the set of blocks that are "extra" for this
// a list of the blocks that are "extra" at that location. We'll * DataNode. We'll eventually remove these extras.
// eventually remove these extras. */
// Mapping: StorageID -> TreeSet<Block>
//
public final Map<String, LightWeightLinkedSet<Block>> excessReplicateMap = public final Map<String, LightWeightLinkedSet<Block>> excessReplicateMap =
new TreeMap<String, LightWeightLinkedSet<Block>>(); new TreeMap<String, LightWeightLinkedSet<Block>>();
// /**
// Store set of Blocks that need to be replicated 1 or more times. * Store set of Blocks that need to be replicated 1 or more times.
// We also store pending replication-orders. * We also store pending replication-orders.
// */
public final UnderReplicatedBlocks neededReplications = new UnderReplicatedBlocks(); public final UnderReplicatedBlocks neededReplications = new UnderReplicatedBlocks();
@VisibleForTesting @VisibleForTesting
final PendingReplicationBlocks pendingReplications; final PendingReplicationBlocks pendingReplications;

View File

@ -970,29 +970,27 @@ public class DataNode extends Configured
dnId.setStorageID(createNewStorageId(dnId.getXferPort())); dnId.setStorageID(createNewStorageId(dnId.getXferPort()));
} }
/**
* @return a unique storage ID of form "DS-randInt-ipaddr-port-timestamp"
*/
static String createNewStorageId(int port) { static String createNewStorageId(int port) {
/* Return // It is unlikely that we will create a non-unique storage ID
* "DS-randInt-ipaddr-currentTimeMillis" // for the following reasons:
* It is considered extermely rare for all these numbers to match // a) SecureRandom is a cryptographically strong random number generator
* on a different machine accidentally for the following // b) IP addresses will likely differ on different hosts
* a) SecureRandom(INT_MAX) is pretty much random (1 in 2 billion), and // c) DataNode xfer ports will differ on the same host
* b) Good chance ip address would be different, and // d) StorageIDs will likely be generated at different times (in ms)
* c) Even on the same machine, Datanode is designed to use different ports. // A conflict requires that all four conditions are violated.
* d) Good chance that these are started at different times. // NB: The format of this string can be changed in the future without
* For a confict to occur all the 4 above have to match!. // requiring that old SotrageIDs be updated.
* The format of this string can be changed anytime in future without
* affecting its functionality.
*/
String ip = "unknownIP"; String ip = "unknownIP";
try { try {
ip = DNS.getDefaultIP("default"); ip = DNS.getDefaultIP("default");
} catch (UnknownHostException ignored) { } catch (UnknownHostException ignored) {
LOG.warn("Could not find ip address of \"default\" inteface."); LOG.warn("Could not find an IP address for the \"default\" inteface.");
} }
int rand = DFSUtil.getSecureRandom().nextInt(Integer.MAX_VALUE); int rand = DFSUtil.getSecureRandom().nextInt(Integer.MAX_VALUE);
return "DS-" + rand + "-" + ip + "-" + port + "-" return "DS-" + rand + "-" + ip + "-" + port + "-" + Time.now();
+ Time.now();
} }
/** Ensure the authentication method is kerberos */ /** Ensure the authentication method is kerberos */

View File

@ -62,7 +62,7 @@ import org.apache.hadoop.util.DiskChecker;
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class DataStorage extends Storage { public class DataStorage extends Storage {
// Constants
public final static String BLOCK_SUBDIR_PREFIX = "subdir"; public final static String BLOCK_SUBDIR_PREFIX = "subdir";
final static String BLOCK_FILE_PREFIX = "blk_"; final static String BLOCK_FILE_PREFIX = "blk_";
final static String COPY_FILE_PREFIX = "dncp_"; final static String COPY_FILE_PREFIX = "dncp_";
@ -71,13 +71,13 @@ public class DataStorage extends Storage {
public final static String STORAGE_DIR_FINALIZED = "finalized"; public final static String STORAGE_DIR_FINALIZED = "finalized";
public final static String STORAGE_DIR_TMP = "tmp"; public final static String STORAGE_DIR_TMP = "tmp";
/** Access to this variable is guarded by "this" */ /** Unique storage ID. {@see DataNode#createNewStorageId(int)} for details */
private String storageID; private String storageID;
// flag to ensure initialzing storage occurs only once // Flag to ensure we only initialize storage once
private boolean initilized = false; private boolean initialized = false;
// BlockPoolStorage is map of <Block pool Id, BlockPoolStorage> // Maps block pool IDs to block pool storage
private Map<String, BlockPoolSliceStorage> bpStorageMap private Map<String, BlockPoolSliceStorage> bpStorageMap
= Collections.synchronizedMap(new HashMap<String, BlockPoolSliceStorage>()); = Collections.synchronizedMap(new HashMap<String, BlockPoolSliceStorage>());
@ -130,7 +130,7 @@ public class DataStorage extends Storage {
synchronized void recoverTransitionRead(DataNode datanode, synchronized void recoverTransitionRead(DataNode datanode,
NamespaceInfo nsInfo, Collection<File> dataDirs, StartupOption startOpt) NamespaceInfo nsInfo, Collection<File> dataDirs, StartupOption startOpt)
throws IOException { throws IOException {
if (initilized) { if (initialized) {
// DN storage has been initialized, no need to do anything // DN storage has been initialized, no need to do anything
return; return;
} }
@ -200,7 +200,7 @@ public class DataStorage extends Storage {
this.writeAll(); this.writeAll();
// 4. mark DN storage is initilized // 4. mark DN storage is initilized
this.initilized = true; this.initialized = true;
} }
/** /**