HDFS-3416. Cleanup DatanodeID and DatanodeRegistration constructors used by testing. Contributed by Eli Collins

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1338784 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2012-05-15 16:23:52 +00:00
parent e4df14f8f1
commit e9a7648f62
8 changed files with 56 additions and 94 deletions

View File

@ -465,6 +465,9 @@ Release 2.0.0 - UNRELEASED
HDFS-3417. Rename BalancerDatanode#getName to getDisplayName to be
consistent with Datanode. (eli)
HDFS-3416. Cleanup DatanodeID and DatanodeRegistration
constructors used by testing. (eli)
OPTIMIZATIONS
HDFS-3024. Improve performance of stringification in addStoredBlock (todd)

View File

@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.DFSConfigKeys;
/**
* This class represents the primary identifier for a Datanode.
@ -45,23 +44,6 @@ public class DatanodeID implements Comparable<DatanodeID> {
protected int infoPort; // info server port
protected int ipcPort; // IPC server port
public DatanodeID(String ipAddr, int xferPort) {
this(ipAddr, "", "", xferPort,
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
}
public DatanodeID(String ipAddr, String hostName, int xferPort) {
this(ipAddr, hostName, "", xferPort,
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
}
/**
* DatanodeID copy constructor
*
* @param from
*/
public DatanodeID(DatanodeID from) {
this(from.getIpAddr(),
from.getHostName(),
@ -72,7 +54,7 @@ public class DatanodeID implements Comparable<DatanodeID> {
}
/**
* Create DatanodeID
* Create a DatanodeID
* @param ipAddr IP
* @param hostName hostname
* @param storageID data storage ID
@ -94,22 +76,6 @@ public class DatanodeID implements Comparable<DatanodeID> {
this.ipAddr = ipAddr;
}
public void setHostName(String hostName) {
this.hostName = hostName;
}
public void setXferPort(int xferPort) {
this.xferPort = xferPort;
}
public void setInfoPort(int infoPort) {
this.infoPort = infoPort;
}
public void setIpcPort(int ipcPort) {
this.ipcPort = ipcPort;
}
public void setStorageID(String storageID) {
this.storageID = storageID;
}

View File

@ -101,10 +101,6 @@ public class DatanodeManager {
* <li>removed if and only if an existing datanode is restarted to serve a
* different storage id.</li>
* </ul> <br>
* The list of the {@link DatanodeDescriptor}s in the map is checkpointed
* in the namespace image file. Only the {@link DatanodeInfo} part is
* persistent, the list of blocks is restored from the datanode block
* reports.
* <p>
* Mapping: StorageID -> DatanodeDescriptor
*/
@ -832,7 +828,9 @@ public class DatanodeManager {
if (InetAddresses.isInetAddress(hostStr)) {
// The IP:port is sufficient for listing in a report
dnId = new DatanodeID(hostStr, "", port);
dnId = new DatanodeID(hostStr, "", "", port,
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
} else {
String ipAddr = "";
try {
@ -840,7 +838,9 @@ public class DatanodeManager {
} catch (UnknownHostException e) {
LOG.warn("Invalid hostname " + hostStr + " in hosts file");
}
dnId = new DatanodeID(ipAddr, hostStr, port);
dnId = new DatanodeID(ipAddr, hostStr, "", port,
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
}
return dnId;
}

View File

@ -667,23 +667,16 @@ public class DataNode extends Configured
* @param nsInfo the namespace info from the first part of the NN handshake
*/
DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
final String xferIp = streamingAddr.getAddress().getHostAddress();
DatanodeRegistration bpRegistration = new DatanodeRegistration(xferIp, getXferPort());
bpRegistration.setInfoPort(getInfoPort());
bpRegistration.setIpcPort(getIpcPort());
bpRegistration.setHostName(hostName);
bpRegistration.setStorageID(getStorageId());
bpRegistration.setSoftwareVersion(VersionInfo.getVersion());
StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
if (storageInfo == null) {
// it's null in the case of SimulatedDataSet
bpRegistration.getStorageInfo().layoutVersion = HdfsConstants.LAYOUT_VERSION;
bpRegistration.setStorageInfo(nsInfo);
} else {
bpRegistration.setStorageInfo(storageInfo);
storageInfo = new StorageInfo(nsInfo);
}
return bpRegistration;
DatanodeID dnId = new DatanodeID(
streamingAddr.getAddress().getHostAddress(), hostName,
getStorageId(), getXferPort(), getInfoPort(), getIpcPort());
return new DatanodeRegistration(dnId, storageInfo,
new ExportedBlockKeys(), VersionInfo.getVersion());
}
/**

View File

@ -47,21 +47,6 @@ public class DatanodeRegistration extends DatanodeID
this.softwareVersion = softwareVersion;
}
public DatanodeRegistration(String ipAddr, int xferPort) {
this(ipAddr, xferPort, new StorageInfo(), new ExportedBlockKeys());
}
public DatanodeRegistration(String ipAddr, int xferPort, StorageInfo info,
ExportedBlockKeys keys) {
super(ipAddr, xferPort);
this.storageInfo = info;
this.exportedKeys = keys;
}
public void setStorageInfo(StorageInfo storage) {
this.storageInfo = new StorageInfo(storage);
}
public StorageInfo getStorageInfo() {
return storageInfo;
}
@ -74,10 +59,6 @@ public class DatanodeRegistration extends DatanodeID
return exportedKeys;
}
public void setSoftwareVersion(String softwareVersion) {
this.softwareVersion = softwareVersion;
}
public String getSoftwareVersion() {
return softwareVersion;
}

View File

@ -67,19 +67,23 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.VersionInfo;
import com.google.common.base.Joiner;
@ -708,13 +712,14 @@ public class DFSTestUtil {
}
private static DatanodeID getDatanodeID(String ipAddr) {
return new DatanodeID(ipAddr, "localhost",
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT);
return new DatanodeID(ipAddr, "localhost", "",
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
}
public static DatanodeID getLocalDatanodeID() {
return new DatanodeID("127.0.0.1", "localhost",
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT);
return getDatanodeID("127.0.0.1");
}
public static DatanodeID getLocalDatanodeID(int port) {
@ -740,12 +745,14 @@ public class DFSTestUtil {
public static DatanodeInfo getDatanodeInfo(String ipAddr,
String host, int port) {
return new DatanodeInfo(new DatanodeID(ipAddr, host, port));
return new DatanodeInfo(new DatanodeID(ipAddr, host, "",
port, DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT));
}
public static DatanodeInfo getLocalDatanodeInfo(String ipAddr,
String hostname, AdminStates adminState) {
return new DatanodeInfo(ipAddr, hostname, "storage",
return new DatanodeInfo(ipAddr, hostname, "",
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT,
@ -760,6 +767,14 @@ public class DFSTestUtil {
public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
int port, String rackLocation) {
return new DatanodeDescriptor(new DatanodeID(ipAddr, port), rackLocation);
DatanodeID dnId = new DatanodeID(ipAddr, "host", "", port,
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
return new DatanodeDescriptor(dnId, rackLocation);
}
public static DatanodeRegistration getLocalDatanodeRegistration() {
return new DatanodeRegistration(getLocalDatanodeID(),
new StorageInfo(), new ExportedBlockKeys(), VersionInfo.getVersion());
}
}

View File

@ -29,6 +29,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@ -115,7 +116,7 @@ public class TestBPOfferService {
0, HdfsConstants.LAYOUT_VERSION))
.when(mock).versionRequest();
Mockito.doReturn(new DatanodeRegistration("1.2.3.4", 100))
Mockito.doReturn(DFSTestUtil.getLocalDatanodeRegistration())
.when(mock).registerDatanode(Mockito.any(DatanodeRegistration.class));
Mockito.doAnswer(new HeartbeatAnswer(nnIdx))

View File

@ -35,10 +35,12 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
@ -765,6 +767,7 @@ public class NNThroughputBenchmark {
ArrayList<Block> blocks;
int nrBlocks; // actual number of blocks
long[] blockReportList;
int dnIdx;
/**
* Return a a 6 digit integer port.
@ -780,11 +783,7 @@ public class NNThroughputBenchmark {
}
TinyDatanode(int dnIdx, int blockCapacity) throws IOException {
String ipAddr = DNS.getDefaultIP("default");
String hostName = DNS.getDefaultHost("default", "default");
dnRegistration = new DatanodeRegistration(ipAddr, getNodePort(dnIdx));
dnRegistration.setHostName(hostName);
dnRegistration.setSoftwareVersion(VersionInfo.getVersion());
this.dnIdx = dnIdx;
this.blocks = new ArrayList<Block>(blockCapacity);
this.nrBlocks = 0;
}
@ -800,7 +799,14 @@ public class NNThroughputBenchmark {
void register() throws IOException {
// get versions from the namenode
nsInfo = nameNodeProto.versionRequest();
dnRegistration.setStorageInfo(new DataStorage(nsInfo, ""));
dnRegistration = new DatanodeRegistration(
new DatanodeID(DNS.getDefaultIP("default"),
DNS.getDefaultHost("default", "default"),
"", getNodePort(dnIdx),
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
new DataStorage(nsInfo, ""),
new ExportedBlockKeys(), VersionInfo.getVersion());
DataNode.setNewStorageID(dnRegistration);
// register datanode
dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
@ -896,12 +902,9 @@ public class NNThroughputBenchmark {
for(int t = 0; t < blockTargets.length; t++) {
DatanodeInfo dnInfo = blockTargets[t];
DatanodeRegistration receivedDNReg;
receivedDNReg =
new DatanodeRegistration(dnInfo.getIpAddr(), dnInfo.getXferPort());
receivedDNReg.setStorageInfo(
new DataStorage(nsInfo, dnInfo.getStorageID()));
receivedDNReg.setInfoPort(dnInfo.getInfoPort());
receivedDNReg.setIpcPort(dnInfo.getIpcPort());
receivedDNReg = new DatanodeRegistration(dnInfo,
new DataStorage(nsInfo, dnInfo.getStorageID()),
new ExportedBlockKeys(), VersionInfo.getVersion());
ReceivedDeletedBlockInfo[] rdBlocks = {
new ReceivedDeletedBlockInfo(
blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,