HDFS-5472. Fix TestDatanodeManager, TestSafeMode and TestNNThroughputBenchmark. (Contributed by szetszwo)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1539736 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Arpit Agarwal 2013-11-07 17:57:30 +00:00
parent 75777f1626
commit a604e3b73b
5 changed files with 43 additions and 17 deletions

View File

@ -80,4 +80,7 @@ IMPROVEMENTS:
Agarwal) Agarwal)
HDFS-5470. Add back trunk's reportDiff algorithm to the branch. HDFS-5470. Add back trunk's reportDiff algorithm to the branch.
(szetszwo) (Contributed by szetszwo)
HDFS-5472. Fix TestDatanodeManager, TestSafeMode and
TestNNThroughputBenchmark (Contributed by szetszwo)

View File

@ -21,8 +21,6 @@ package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import java.util.UUID;
/** /**
* This class represents the primary identifier for a Datanode. * This class represents the primary identifier for a Datanode.
* Datanodes are identified by how they can be contacted (hostname * Datanodes are identified by how they can be contacted (hostname

View File

@ -215,6 +215,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
*/ */
public DatanodeDescriptor(DatanodeID nodeID) { public DatanodeDescriptor(DatanodeID nodeID) {
super(nodeID); super(nodeID);
updateHeartbeat(StorageReport.EMPTY_ARRAY, 0L, 0L, 0, 0);
} }
/** /**
@ -225,6 +226,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
public DatanodeDescriptor(DatanodeID nodeID, public DatanodeDescriptor(DatanodeID nodeID,
String networkLocation) { String networkLocation) {
super(nodeID, networkLocation); super(nodeID, networkLocation);
updateHeartbeat(StorageReport.EMPTY_ARRAY, 0L, 0L, 0, 0);
} }
/** /**

View File

@ -693,6 +693,10 @@ public class DataNode extends Configured
readaheadPool = ReadaheadPool.getInstance(); readaheadPool = ReadaheadPool.getInstance();
} }
public static String generateUuid() {
return UUID.randomUUID().toString();
}
/** /**
* Verify that the DatanodeUuid has been initialized. If this is a new * Verify that the DatanodeUuid has been initialized. If this is a new
* datanode then we generate a new Datanode Uuid and persist it to disk. * datanode then we generate a new Datanode Uuid and persist it to disk.
@ -701,7 +705,7 @@ public class DataNode extends Configured
*/ */
private synchronized void checkDatanodeUuid() throws IOException { private synchronized void checkDatanodeUuid() throws IOException {
if (storage.getDatanodeUuid() == null) { if (storage.getDatanodeUuid() == null) {
storage.setDatanodeUuid(UUID.randomUUID().toString()); storage.setDatanodeUuid(generateUuid());
storage.writeAll(); storage.writeAll();
LOG.info("Generated and persisted new Datanode UUID " + LOG.info("Generated and persisted new Datanode UUID " +
storage.getDatanodeUuid()); storage.getDatanodeUuid());

View File

@ -17,6 +17,14 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.List;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
@ -25,28 +33,40 @@ import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.*; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataStorage; import org.apache.hadoop.hdfs.server.datanode.DataStorage;
import org.apache.hadoop.hdfs.server.protocol.*; import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.net.DNS; import org.apache.hadoop.net.DNS;
import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.security.Groups; import org.apache.hadoop.security.Groups;
import org.apache.hadoop.util.*; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.util.VersionInfo;
import org.apache.log4j.Level; import org.apache.log4j.Level;
import org.apache.log4j.LogManager; import org.apache.log4j.LogManager;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.List;
/** /**
* Main class for a series of name-node benchmarks. * Main class for a series of name-node benchmarks.
* *
@ -817,13 +837,12 @@ public class NNThroughputBenchmark implements Tool {
dnRegistration = new DatanodeRegistration( dnRegistration = new DatanodeRegistration(
new DatanodeID(DNS.getDefaultIP("default"), new DatanodeID(DNS.getDefaultIP("default"),
DNS.getDefaultHost("default", "default"), DNS.getDefaultHost("default", "default"),
"", getNodePort(dnIdx), DataNode.generateUuid(), getNodePort(dnIdx),
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT), DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
new DataStorage(nsInfo), new DataStorage(nsInfo),
new ExportedBlockKeys(), VersionInfo.getVersion()); new ExportedBlockKeys(), VersionInfo.getVersion());
// TODO: Fix NNThroughputBenchmark.
// register datanode // register datanode
dnRegistration = nameNodeProto.registerDatanode(dnRegistration); dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
//first block reports //first block reports