HDFS-3416. svn merge -c 1338784 from trunk
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1338800 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
b20ef722c5
commit
5ea3d6cf5d
|
@ -329,6 +329,9 @@ Release 2.0.0 - UNRELEASED
|
||||||
HDFS-3417. Rename BalancerDatanode#getName to getDisplayName to be
|
HDFS-3417. Rename BalancerDatanode#getName to getDisplayName to be
|
||||||
consistent with Datanode. (eli)
|
consistent with Datanode. (eli)
|
||||||
|
|
||||||
|
HDFS-3416. Cleanup DatanodeID and DatanodeRegistration
|
||||||
|
constructors used by testing. (eli)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-2477. Optimize computing the diff between a block report and the
|
HDFS-2477. Optimize computing the diff between a block report and the
|
||||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.protocol;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This class represents the primary identifier for a Datanode.
|
* This class represents the primary identifier for a Datanode.
|
||||||
|
@ -45,23 +44,6 @@ public class DatanodeID implements Comparable<DatanodeID> {
|
||||||
protected int infoPort; // info server port
|
protected int infoPort; // info server port
|
||||||
protected int ipcPort; // IPC server port
|
protected int ipcPort; // IPC server port
|
||||||
|
|
||||||
public DatanodeID(String ipAddr, int xferPort) {
|
|
||||||
this(ipAddr, "", "", xferPort,
|
|
||||||
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
|
||||||
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
|
|
||||||
}
|
|
||||||
|
|
||||||
public DatanodeID(String ipAddr, String hostName, int xferPort) {
|
|
||||||
this(ipAddr, hostName, "", xferPort,
|
|
||||||
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
|
||||||
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* DatanodeID copy constructor
|
|
||||||
*
|
|
||||||
* @param from
|
|
||||||
*/
|
|
||||||
public DatanodeID(DatanodeID from) {
|
public DatanodeID(DatanodeID from) {
|
||||||
this(from.getIpAddr(),
|
this(from.getIpAddr(),
|
||||||
from.getHostName(),
|
from.getHostName(),
|
||||||
|
@ -72,7 +54,7 @@ public class DatanodeID implements Comparable<DatanodeID> {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create DatanodeID
|
* Create a DatanodeID
|
||||||
* @param ipAddr IP
|
* @param ipAddr IP
|
||||||
* @param hostName hostname
|
* @param hostName hostname
|
||||||
* @param storageID data storage ID
|
* @param storageID data storage ID
|
||||||
|
@ -94,22 +76,6 @@ public class DatanodeID implements Comparable<DatanodeID> {
|
||||||
this.ipAddr = ipAddr;
|
this.ipAddr = ipAddr;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setHostName(String hostName) {
|
|
||||||
this.hostName = hostName;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setXferPort(int xferPort) {
|
|
||||||
this.xferPort = xferPort;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setInfoPort(int infoPort) {
|
|
||||||
this.infoPort = infoPort;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setIpcPort(int ipcPort) {
|
|
||||||
this.ipcPort = ipcPort;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setStorageID(String storageID) {
|
public void setStorageID(String storageID) {
|
||||||
this.storageID = storageID;
|
this.storageID = storageID;
|
||||||
}
|
}
|
||||||
|
|
|
@ -100,11 +100,7 @@ public class DatanodeManager {
|
||||||
* with the same storage id; and </li>
|
* with the same storage id; and </li>
|
||||||
* <li>removed if and only if an existing datanode is restarted to serve a
|
* <li>removed if and only if an existing datanode is restarted to serve a
|
||||||
* different storage id.</li>
|
* different storage id.</li>
|
||||||
* </ul> <br>
|
* </ul> <br>
|
||||||
* The list of the {@link DatanodeDescriptor}s in the map is checkpointed
|
|
||||||
* in the namespace image file. Only the {@link DatanodeInfo} part is
|
|
||||||
* persistent, the list of blocks is restored from the datanode block
|
|
||||||
* reports.
|
|
||||||
* <p>
|
* <p>
|
||||||
* Mapping: StorageID -> DatanodeDescriptor
|
* Mapping: StorageID -> DatanodeDescriptor
|
||||||
*/
|
*/
|
||||||
|
@ -832,7 +828,9 @@ public class DatanodeManager {
|
||||||
|
|
||||||
if (InetAddresses.isInetAddress(hostStr)) {
|
if (InetAddresses.isInetAddress(hostStr)) {
|
||||||
// The IP:port is sufficient for listing in a report
|
// The IP:port is sufficient for listing in a report
|
||||||
dnId = new DatanodeID(hostStr, "", port);
|
dnId = new DatanodeID(hostStr, "", "", port,
|
||||||
|
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
||||||
|
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
|
||||||
} else {
|
} else {
|
||||||
String ipAddr = "";
|
String ipAddr = "";
|
||||||
try {
|
try {
|
||||||
|
@ -840,7 +838,9 @@ public class DatanodeManager {
|
||||||
} catch (UnknownHostException e) {
|
} catch (UnknownHostException e) {
|
||||||
LOG.warn("Invalid hostname " + hostStr + " in hosts file");
|
LOG.warn("Invalid hostname " + hostStr + " in hosts file");
|
||||||
}
|
}
|
||||||
dnId = new DatanodeID(ipAddr, hostStr, port);
|
dnId = new DatanodeID(ipAddr, hostStr, "", port,
|
||||||
|
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
||||||
|
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
|
||||||
}
|
}
|
||||||
return dnId;
|
return dnId;
|
||||||
}
|
}
|
||||||
|
|
|
@ -671,23 +671,16 @@ public class DataNode extends Configured
|
||||||
* @param nsInfo the namespace info from the first part of the NN handshake
|
* @param nsInfo the namespace info from the first part of the NN handshake
|
||||||
*/
|
*/
|
||||||
DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
|
DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
|
||||||
final String xferIp = streamingAddr.getAddress().getHostAddress();
|
|
||||||
DatanodeRegistration bpRegistration = new DatanodeRegistration(xferIp, getXferPort());
|
|
||||||
bpRegistration.setInfoPort(getInfoPort());
|
|
||||||
bpRegistration.setIpcPort(getIpcPort());
|
|
||||||
bpRegistration.setHostName(hostName);
|
|
||||||
bpRegistration.setStorageID(getStorageId());
|
|
||||||
bpRegistration.setSoftwareVersion(VersionInfo.getVersion());
|
|
||||||
|
|
||||||
StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
|
StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
|
||||||
if (storageInfo == null) {
|
if (storageInfo == null) {
|
||||||
// it's null in the case of SimulatedDataSet
|
// it's null in the case of SimulatedDataSet
|
||||||
bpRegistration.getStorageInfo().layoutVersion = HdfsConstants.LAYOUT_VERSION;
|
storageInfo = new StorageInfo(nsInfo);
|
||||||
bpRegistration.setStorageInfo(nsInfo);
|
|
||||||
} else {
|
|
||||||
bpRegistration.setStorageInfo(storageInfo);
|
|
||||||
}
|
}
|
||||||
return bpRegistration;
|
DatanodeID dnId = new DatanodeID(
|
||||||
|
streamingAddr.getAddress().getHostAddress(), hostName,
|
||||||
|
getStorageId(), getXferPort(), getInfoPort(), getIpcPort());
|
||||||
|
return new DatanodeRegistration(dnId, storageInfo,
|
||||||
|
new ExportedBlockKeys(), VersionInfo.getVersion());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -47,21 +47,6 @@ public class DatanodeRegistration extends DatanodeID
|
||||||
this.softwareVersion = softwareVersion;
|
this.softwareVersion = softwareVersion;
|
||||||
}
|
}
|
||||||
|
|
||||||
public DatanodeRegistration(String ipAddr, int xferPort) {
|
|
||||||
this(ipAddr, xferPort, new StorageInfo(), new ExportedBlockKeys());
|
|
||||||
}
|
|
||||||
|
|
||||||
public DatanodeRegistration(String ipAddr, int xferPort, StorageInfo info,
|
|
||||||
ExportedBlockKeys keys) {
|
|
||||||
super(ipAddr, xferPort);
|
|
||||||
this.storageInfo = info;
|
|
||||||
this.exportedKeys = keys;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setStorageInfo(StorageInfo storage) {
|
|
||||||
this.storageInfo = new StorageInfo(storage);
|
|
||||||
}
|
|
||||||
|
|
||||||
public StorageInfo getStorageInfo() {
|
public StorageInfo getStorageInfo() {
|
||||||
return storageInfo;
|
return storageInfo;
|
||||||
}
|
}
|
||||||
|
@ -74,10 +59,6 @@ public class DatanodeRegistration extends DatanodeID
|
||||||
return exportedKeys;
|
return exportedKeys;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setSoftwareVersion(String softwareVersion) {
|
|
||||||
this.softwareVersion = softwareVersion;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getSoftwareVersion() {
|
public String getSoftwareVersion() {
|
||||||
return softwareVersion;
|
return softwareVersion;
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,19 +67,23 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
|
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
|
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw;
|
import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
|
import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
|
import org.apache.hadoop.util.VersionInfo;
|
||||||
|
|
||||||
import com.google.common.base.Joiner;
|
import com.google.common.base.Joiner;
|
||||||
|
|
||||||
|
@ -708,13 +712,14 @@ public class DFSTestUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static DatanodeID getDatanodeID(String ipAddr) {
|
private static DatanodeID getDatanodeID(String ipAddr) {
|
||||||
return new DatanodeID(ipAddr, "localhost",
|
return new DatanodeID(ipAddr, "localhost", "",
|
||||||
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT);
|
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
|
||||||
|
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
||||||
|
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static DatanodeID getLocalDatanodeID() {
|
public static DatanodeID getLocalDatanodeID() {
|
||||||
return new DatanodeID("127.0.0.1", "localhost",
|
return getDatanodeID("127.0.0.1");
|
||||||
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static DatanodeID getLocalDatanodeID(int port) {
|
public static DatanodeID getLocalDatanodeID(int port) {
|
||||||
|
@ -740,12 +745,14 @@ public class DFSTestUtil {
|
||||||
|
|
||||||
public static DatanodeInfo getDatanodeInfo(String ipAddr,
|
public static DatanodeInfo getDatanodeInfo(String ipAddr,
|
||||||
String host, int port) {
|
String host, int port) {
|
||||||
return new DatanodeInfo(new DatanodeID(ipAddr, host, port));
|
return new DatanodeInfo(new DatanodeID(ipAddr, host, "",
|
||||||
|
port, DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
||||||
|
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT));
|
||||||
}
|
}
|
||||||
|
|
||||||
public static DatanodeInfo getLocalDatanodeInfo(String ipAddr,
|
public static DatanodeInfo getLocalDatanodeInfo(String ipAddr,
|
||||||
String hostname, AdminStates adminState) {
|
String hostname, AdminStates adminState) {
|
||||||
return new DatanodeInfo(ipAddr, hostname, "storage",
|
return new DatanodeInfo(ipAddr, hostname, "",
|
||||||
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
|
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
|
||||||
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
||||||
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT,
|
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT,
|
||||||
|
@ -760,6 +767,14 @@ public class DFSTestUtil {
|
||||||
|
|
||||||
public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
|
public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
|
||||||
int port, String rackLocation) {
|
int port, String rackLocation) {
|
||||||
return new DatanodeDescriptor(new DatanodeID(ipAddr, port), rackLocation);
|
DatanodeID dnId = new DatanodeID(ipAddr, "host", "", port,
|
||||||
|
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
||||||
|
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
|
||||||
|
return new DatanodeDescriptor(dnId, rackLocation);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static DatanodeRegistration getLocalDatanodeRegistration() {
|
||||||
|
return new DatanodeRegistration(getLocalDatanodeID(),
|
||||||
|
new StorageInfo(), new ExportedBlockKeys(), VersionInfo.getVersion());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.commons.logging.impl.Log4JLogger;
|
import org.apache.commons.logging.impl.Log4JLogger;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
|
@ -115,7 +116,7 @@ public class TestBPOfferService {
|
||||||
0, HdfsConstants.LAYOUT_VERSION))
|
0, HdfsConstants.LAYOUT_VERSION))
|
||||||
.when(mock).versionRequest();
|
.when(mock).versionRequest();
|
||||||
|
|
||||||
Mockito.doReturn(new DatanodeRegistration("1.2.3.4", 100))
|
Mockito.doReturn(DFSTestUtil.getLocalDatanodeRegistration())
|
||||||
.when(mock).registerDatanode(Mockito.any(DatanodeRegistration.class));
|
.when(mock).registerDatanode(Mockito.any(DatanodeRegistration.class));
|
||||||
|
|
||||||
Mockito.doAnswer(new HeartbeatAnswer(nnIdx))
|
Mockito.doAnswer(new HeartbeatAnswer(nnIdx))
|
||||||
|
|
|
@ -35,10 +35,12 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
|
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
|
||||||
|
@ -765,6 +767,7 @@ public class NNThroughputBenchmark {
|
||||||
ArrayList<Block> blocks;
|
ArrayList<Block> blocks;
|
||||||
int nrBlocks; // actual number of blocks
|
int nrBlocks; // actual number of blocks
|
||||||
long[] blockReportList;
|
long[] blockReportList;
|
||||||
|
int dnIdx;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return a a 6 digit integer port.
|
* Return a a 6 digit integer port.
|
||||||
|
@ -780,11 +783,7 @@ public class NNThroughputBenchmark {
|
||||||
}
|
}
|
||||||
|
|
||||||
TinyDatanode(int dnIdx, int blockCapacity) throws IOException {
|
TinyDatanode(int dnIdx, int blockCapacity) throws IOException {
|
||||||
String ipAddr = DNS.getDefaultIP("default");
|
this.dnIdx = dnIdx;
|
||||||
String hostName = DNS.getDefaultHost("default", "default");
|
|
||||||
dnRegistration = new DatanodeRegistration(ipAddr, getNodePort(dnIdx));
|
|
||||||
dnRegistration.setHostName(hostName);
|
|
||||||
dnRegistration.setSoftwareVersion(VersionInfo.getVersion());
|
|
||||||
this.blocks = new ArrayList<Block>(blockCapacity);
|
this.blocks = new ArrayList<Block>(blockCapacity);
|
||||||
this.nrBlocks = 0;
|
this.nrBlocks = 0;
|
||||||
}
|
}
|
||||||
|
@ -800,7 +799,14 @@ public class NNThroughputBenchmark {
|
||||||
void register() throws IOException {
|
void register() throws IOException {
|
||||||
// get versions from the namenode
|
// get versions from the namenode
|
||||||
nsInfo = nameNodeProto.versionRequest();
|
nsInfo = nameNodeProto.versionRequest();
|
||||||
dnRegistration.setStorageInfo(new DataStorage(nsInfo, ""));
|
dnRegistration = new DatanodeRegistration(
|
||||||
|
new DatanodeID(DNS.getDefaultIP("default"),
|
||||||
|
DNS.getDefaultHost("default", "default"),
|
||||||
|
"", getNodePort(dnIdx),
|
||||||
|
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
||||||
|
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
|
||||||
|
new DataStorage(nsInfo, ""),
|
||||||
|
new ExportedBlockKeys(), VersionInfo.getVersion());
|
||||||
DataNode.setNewStorageID(dnRegistration);
|
DataNode.setNewStorageID(dnRegistration);
|
||||||
// register datanode
|
// register datanode
|
||||||
dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
|
dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
|
||||||
|
@ -896,12 +902,9 @@ public class NNThroughputBenchmark {
|
||||||
for(int t = 0; t < blockTargets.length; t++) {
|
for(int t = 0; t < blockTargets.length; t++) {
|
||||||
DatanodeInfo dnInfo = blockTargets[t];
|
DatanodeInfo dnInfo = blockTargets[t];
|
||||||
DatanodeRegistration receivedDNReg;
|
DatanodeRegistration receivedDNReg;
|
||||||
receivedDNReg =
|
receivedDNReg = new DatanodeRegistration(dnInfo,
|
||||||
new DatanodeRegistration(dnInfo.getIpAddr(), dnInfo.getXferPort());
|
new DataStorage(nsInfo, dnInfo.getStorageID()),
|
||||||
receivedDNReg.setStorageInfo(
|
new ExportedBlockKeys(), VersionInfo.getVersion());
|
||||||
new DataStorage(nsInfo, dnInfo.getStorageID()));
|
|
||||||
receivedDNReg.setInfoPort(dnInfo.getInfoPort());
|
|
||||||
receivedDNReg.setIpcPort(dnInfo.getIpcPort());
|
|
||||||
ReceivedDeletedBlockInfo[] rdBlocks = {
|
ReceivedDeletedBlockInfo[] rdBlocks = {
|
||||||
new ReceivedDeletedBlockInfo(
|
new ReceivedDeletedBlockInfo(
|
||||||
blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
|
blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
|
||||||
|
|
Loading…
Reference in New Issue