HDFS-9482. Replace DatanodeInfo constructors with a builder pattern. Contributed by Brahma Reddy Battula.

This commit is contained in:
Brahma Reddy Battula 2016-11-09 09:58:36 +05:30
parent f5c9f9f1b2
commit 53ff3c9e03
15 changed files with 271 additions and 112 deletions

View File

@ -86,7 +86,7 @@ public static AdminStates fromValue(final String value) {
protected AdminStates adminState; protected AdminStates adminState;
private long maintenanceExpireTimeInMS; private long maintenanceExpireTimeInMS;
public DatanodeInfo(DatanodeInfo from) { protected DatanodeInfo(DatanodeInfo from) {
super(from); super(from);
this.capacity = from.getCapacity(); this.capacity = from.getCapacity();
this.dfsUsed = from.getDfsUsed(); this.dfsUsed = from.getDfsUsed();
@ -103,7 +103,7 @@ public DatanodeInfo(DatanodeInfo from) {
this.upgradeDomain = from.getUpgradeDomain(); this.upgradeDomain = from.getUpgradeDomain();
} }
public DatanodeInfo(DatanodeID nodeID) { protected DatanodeInfo(DatanodeID nodeID) {
super(nodeID); super(nodeID);
this.capacity = 0L; this.capacity = 0L;
this.dfsUsed = 0L; this.dfsUsed = 0L;
@ -118,57 +118,13 @@ public DatanodeInfo(DatanodeID nodeID) {
this.adminState = null; this.adminState = null;
} }
public DatanodeInfo(DatanodeID nodeID, String location) { protected DatanodeInfo(DatanodeID nodeID, String location) {
this(nodeID); this(nodeID);
this.location = location; this.location = location;
} }
public DatanodeInfo(DatanodeID nodeID, String location,
final long capacity, final long dfsUsed, final long remaining,
final long blockPoolUsed, final long cacheCapacity, final long cacheUsed,
final long lastUpdate, final long lastUpdateMonotonic,
final int xceiverCount, final AdminStates adminState,
final String upgradeDomain) {
this(nodeID.getIpAddr(), nodeID.getHostName(), nodeID.getDatanodeUuid(),
nodeID.getXferPort(), nodeID.getInfoPort(), nodeID.getInfoSecurePort(),
nodeID.getIpcPort(), capacity, dfsUsed, remaining, blockPoolUsed,
cacheCapacity, cacheUsed, lastUpdate, lastUpdateMonotonic,
xceiverCount, location, adminState, upgradeDomain);
}
/** Constructor */
public DatanodeInfo(final String ipAddr, final String hostName,
final String datanodeUuid, final int xferPort, final int infoPort,
final int infoSecurePort, final int ipcPort,
final long capacity, final long dfsUsed, final long remaining,
final long blockPoolUsed, final long cacheCapacity, final long cacheUsed,
final long lastUpdate, final long lastUpdateMonotonic,
final int xceiverCount, final String networkLocation,
final AdminStates adminState) {
this(ipAddr, hostName, datanodeUuid, xferPort, infoPort, infoSecurePort,
ipcPort, capacity, dfsUsed, remaining, blockPoolUsed, cacheCapacity,
cacheUsed, lastUpdate, lastUpdateMonotonic, xceiverCount,
networkLocation, adminState, null);
}
/** Constructor */
public DatanodeInfo(final String ipAddr, final String hostName,
final String datanodeUuid, final int xferPort, final int infoPort,
final int infoSecurePort, final int ipcPort,
final long capacity, final long dfsUsed, final long remaining,
final long blockPoolUsed, final long cacheCapacity, final long cacheUsed,
final long lastUpdate, final long lastUpdateMonotonic,
final int xceiverCount, final String networkLocation,
final AdminStates adminState,
final String upgradeDomain) {
this(ipAddr, hostName, datanodeUuid, xferPort, infoPort, infoSecurePort,
ipcPort, capacity, dfsUsed, 0L, remaining, blockPoolUsed,
cacheCapacity, cacheUsed, lastUpdate, lastUpdateMonotonic,
xceiverCount, networkLocation, adminState, upgradeDomain);
}
/** Constructor. */ /** Constructor. */
public DatanodeInfo(final String ipAddr, final String hostName, private DatanodeInfo(final String ipAddr, final String hostName,
final String datanodeUuid, final int xferPort, final int infoPort, final String datanodeUuid, final int xferPort, final int infoPort,
final int infoSecurePort, final int ipcPort, final long capacity, final int infoSecurePort, final int ipcPort, final long capacity,
final long dfsUsed, final long nonDfsUsed, final long remaining, final long dfsUsed, final long nonDfsUsed, final long remaining,
@ -660,4 +616,169 @@ public String getSoftwareVersion() {
public void setSoftwareVersion(String softwareVersion) { public void setSoftwareVersion(String softwareVersion) {
this.softwareVersion = softwareVersion; this.softwareVersion = softwareVersion;
} }
/**
* Building the DataNodeInfo.
*/
public static class DatanodeInfoBuilder {
private String location = NetworkTopology.DEFAULT_RACK;
private long capacity;
private long dfsUsed;
private long remaining;
private long blockPoolUsed;
private long cacheCapacity;
private long cacheUsed;
private long lastUpdate;
private long lastUpdateMonotonic;
private int xceiverCount;
private DatanodeInfo.AdminStates adminState;
private String upgradeDomain;
private String ipAddr;
private String hostName;
private String datanodeUuid;
private int xferPort;
private int infoPort;
private int infoSecurePort;
private int ipcPort;
private long nonDfsUsed = 0L;
public DatanodeInfoBuilder setFrom(DatanodeInfo from) {
this.capacity = from.getCapacity();
this.dfsUsed = from.getDfsUsed();
this.nonDfsUsed = from.getNonDfsUsed();
this.remaining = from.getRemaining();
this.blockPoolUsed = from.getBlockPoolUsed();
this.cacheCapacity = from.getCacheCapacity();
this.cacheUsed = from.getCacheUsed();
this.lastUpdate = from.getLastUpdate();
this.lastUpdateMonotonic = from.getLastUpdateMonotonic();
this.xceiverCount = from.getXceiverCount();
this.location = from.getNetworkLocation();
this.adminState = from.getAdminState();
this.upgradeDomain = from.getUpgradeDomain();
setNodeID(from);
return this;
}
public DatanodeInfoBuilder setNodeID(DatanodeID nodeID) {
this.ipAddr = nodeID.getIpAddr();
this.hostName = nodeID.getHostName();
this.datanodeUuid = nodeID.getDatanodeUuid();
this.xferPort = nodeID.getXferPort();
this.infoPort = nodeID.getInfoPort();
this.infoSecurePort = nodeID.getInfoSecurePort();
this.ipcPort = nodeID.getIpcPort();
return this;
}
public DatanodeInfoBuilder setCapacity(long capacity) {
this.capacity = capacity;
return this;
}
public DatanodeInfoBuilder setDfsUsed(long dfsUsed) {
this.dfsUsed = dfsUsed;
return this;
}
public DatanodeInfoBuilder setRemaining(long remaining) {
this.remaining = remaining;
return this;
}
public DatanodeInfoBuilder setBlockPoolUsed(long blockPoolUsed) {
this.blockPoolUsed = blockPoolUsed;
return this;
}
public DatanodeInfoBuilder setCacheCapacity(long cacheCapacity) {
this.cacheCapacity = cacheCapacity;
return this;
}
public DatanodeInfoBuilder setCacheUsed(long cacheUsed) {
this.cacheUsed = cacheUsed;
return this;
}
public DatanodeInfoBuilder setLastUpdate(long lastUpdate) {
this.lastUpdate = lastUpdate;
return this;
}
public DatanodeInfoBuilder setLastUpdateMonotonic(
long lastUpdateMonotonic) {
this.lastUpdateMonotonic = lastUpdateMonotonic;
return this;
}
public DatanodeInfoBuilder setXceiverCount(int xceiverCount) {
this.xceiverCount = xceiverCount;
return this;
}
public DatanodeInfoBuilder setAdminState(
DatanodeInfo.AdminStates adminState) {
this.adminState = adminState;
return this;
}
public DatanodeInfoBuilder setUpgradeDomain(String upgradeDomain) {
this.upgradeDomain = upgradeDomain;
return this;
}
public DatanodeInfoBuilder setIpAddr(String ipAddr) {
this.ipAddr = ipAddr;
return this;
}
public DatanodeInfoBuilder setHostName(String hostName) {
this.hostName = hostName;
return this;
}
public DatanodeInfoBuilder setDatanodeUuid(String datanodeUuid) {
this.datanodeUuid = datanodeUuid;
return this;
}
public DatanodeInfoBuilder setXferPort(int xferPort) {
this.xferPort = xferPort;
return this;
}
public DatanodeInfoBuilder setInfoPort(int infoPort) {
this.infoPort = infoPort;
return this;
}
public DatanodeInfoBuilder setInfoSecurePort(int infoSecurePort) {
this.infoSecurePort = infoSecurePort;
return this;
}
public DatanodeInfoBuilder setIpcPort(int ipcPort) {
this.ipcPort = ipcPort;
return this;
}
public DatanodeInfoBuilder setNetworkLocation(String networkLocation) {
this.location = networkLocation;
return this;
}
public DatanodeInfoBuilder setNonDfsUsed(long nonDfsUsed) {
this.nonDfsUsed = nonDfsUsed;
return this;
}
public DatanodeInfo build() {
return new DatanodeInfo(ipAddr, hostName, datanodeUuid, xferPort,
infoPort, infoSecurePort, ipcPort, capacity, dfsUsed, nonDfsUsed,
remaining, blockPoolUsed, cacheCapacity, cacheUsed, lastUpdate,
lastUpdateMonotonic, xceiverCount, location, adminState,
upgradeDomain);
}
}
} }

View File

@ -69,6 +69,7 @@
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo; import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
@ -551,12 +552,17 @@ static public DatanodeInfo convert(DatanodeInfoProto di) {
if (di == null) { if (di == null) {
return null; return null;
} }
DatanodeInfo dinfo = new DatanodeInfo(convert(di.getId()), DatanodeInfoBuilder dinfo =
di.hasLocation() ? di.getLocation() : null, di.getCapacity(), new DatanodeInfoBuilder().setNodeID(convert(di.getId()))
di.getDfsUsed(), di.getRemaining(), di.getBlockPoolUsed(), .setNetworkLocation(di.hasLocation() ? di.getLocation() : null)
di.getCacheCapacity(), di.getCacheUsed(), di.getLastUpdate(), .setCapacity(di.getCapacity()).setDfsUsed(di.getDfsUsed())
di.getLastUpdateMonotonic(), di.getXceiverCount(), .setRemaining(di.getRemaining())
convert(di.getAdminState()), .setBlockPoolUsed(di.getBlockPoolUsed())
.setCacheCapacity(di.getCacheCapacity())
.setCacheUsed(di.getCacheUsed()).setLastUpdate(di.getLastUpdate())
.setLastUpdateMonotonic(di.getLastUpdateMonotonic())
.setXceiverCount(di.getXceiverCount())
.setAdminState(convert(di.getAdminState())).setUpgradeDomain(
di.hasUpgradeDomain() ? di.getUpgradeDomain() : null); di.hasUpgradeDomain() ? di.getUpgradeDomain() : null);
if (di.hasNonDfsUsed()) { if (di.hasNonDfsUsed()) {
dinfo.setNonDfsUsed(di.getNonDfsUsed()); dinfo.setNonDfsUsed(di.getNonDfsUsed());
@ -565,7 +571,7 @@ static public DatanodeInfo convert(DatanodeInfoProto di) {
long nonDFSUsed = di.getCapacity() - di.getDfsUsed() - di.getRemaining(); long nonDFSUsed = di.getCapacity() - di.getDfsUsed() - di.getRemaining();
dinfo.setNonDfsUsed(nonDFSUsed < 0 ? 0 : nonDFSUsed); dinfo.setNonDfsUsed(nonDFSUsed < 0 ? 0 : nonDFSUsed);
} }
return dinfo; return dinfo.build();
} }
public static StorageType[] convertStorageTypes( public static StorageType[] convertStorageTypes(

View File

@ -34,6 +34,7 @@
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension; import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
@ -271,27 +272,26 @@ static DatanodeInfo toDatanodeInfo(final Map<?, ?> m)
} }
// TODO: Fix storageID // TODO: Fix storageID
return new DatanodeInfo( return new DatanodeInfoBuilder().setIpAddr(ipAddr)
ipAddr, .setHostName((String) m.get("hostName"))
(String)m.get("hostName"), .setDatanodeUuid((String) m.get("storageID")).setXferPort(xferPort)
(String)m.get("storageID"), .setInfoPort(((Number) m.get("infoPort")).intValue())
xferPort, .setInfoSecurePort(getInt(m, "infoSecurePort", 0))
((Number) m.get("infoPort")).intValue(), .setIpcPort(((Number) m.get("ipcPort")).intValue())
getInt(m, "infoSecurePort", 0), .setCapacity(getLong(m, "capacity", 0L))
((Number) m.get("ipcPort")).intValue(), .setDfsUsed(getLong(m, "dfsUsed", 0L))
.setRemaining(getLong(m, "remaining", 0L))
getLong(m, "capacity", 0l), .setBlockPoolUsed(getLong(m, "blockPoolUsed", 0L))
getLong(m, "dfsUsed", 0l), .setCacheCapacity(getLong(m, "cacheCapacity", 0L))
getLong(m, "remaining", 0l), .setCacheUsed(getLong(m, "cacheUsed", 0L))
getLong(m, "blockPoolUsed", 0l), .setLastUpdate(getLong(m, "lastUpdate", 0L))
getLong(m, "cacheCapacity", 0l), .setLastUpdateMonotonic(getLong(m, "lastUpdateMonotonic", 0L))
getLong(m, "cacheUsed", 0l), .setXceiverCount(getInt(m, "xceiverCount", 0))
getLong(m, "lastUpdate", 0l), .setNetworkLocation(getString(m, "networkLocation", "")).setAdminState(
getLong(m, "lastUpdateMonotonic", 0l), DatanodeInfo.AdminStates
getInt(m, "xceiverCount", 0), .valueOf(getString(m, "adminState", "NORMAL")))
getString(m, "networkLocation", ""), .setUpgradeDomain(getString(m, "upgradeDomain", ""))
DatanodeInfo.AdminStates.valueOf(getString(m, "adminState", "NORMAL")), .build();
getString(m, "upgradeDomain", ""));
} }
/** Convert an Object[] to a DatanodeInfo[]. */ /** Convert an Object[] to a DatanodeInfo[]. */

View File

@ -20,6 +20,7 @@
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto; import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
@ -78,8 +79,9 @@ public NamenodeProtocolServerSideTranslatorPB(NamenodeProtocol impl) {
@Override @Override
public GetBlocksResponseProto getBlocks(RpcController unused, public GetBlocksResponseProto getBlocks(RpcController unused,
GetBlocksRequestProto request) throws ServiceException { GetBlocksRequestProto request) throws ServiceException {
DatanodeInfo dnInfo = new DatanodeInfo(PBHelperClient.convert(request DatanodeInfo dnInfo = new DatanodeInfoBuilder()
.getDatanode())); .setNodeID(PBHelperClient.convert(request.getDatanode()))
.build();
BlocksWithLocations blocks; BlocksWithLocations blocks;
try { try {
blocks = impl.getBlocks(dnInfo, request.getSize()); blocks = impl.getBlocks(dnInfo, request.getSize());

View File

@ -125,6 +125,7 @@
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo; import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata; import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
@ -2352,7 +2353,8 @@ public void run() {
in = new DataInputStream(unbufIn); in = new DataInputStream(unbufIn);
blockSender = new BlockSender(b, 0, b.getNumBytes(), blockSender = new BlockSender(b, 0, b.getNumBytes(),
false, false, true, DataNode.this, null, cachingStrategy); false, false, true, DataNode.this, null, cachingStrategy);
DatanodeInfo srcNode = new DatanodeInfo(bpReg); DatanodeInfo srcNode = new DatanodeInfoBuilder().setNodeID(bpReg)
.build();
new Sender(out).writeBlock(b, targetStorageTypes[0], accessToken, new Sender(out).writeBlock(b, targetStorageTypes[0], accessToken,
clientname, targets, targetStorageTypes, srcNode, clientname, targets, targetStorageTypes, srcNode,

View File

@ -22,6 +22,7 @@
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
@ -52,7 +53,8 @@ public void reportTo(DatanodeProtocolClientSideTranslatorPB bpNamenode,
if (bpRegistration == null) { if (bpRegistration == null) {
return; return;
} }
DatanodeInfo[] dnArr = { new DatanodeInfo(bpRegistration) }; DatanodeInfo[] dnArr = {new DatanodeInfoBuilder()
.setNodeID(bpRegistration).build()};
String[] uuids = { storageUuid }; String[] uuids = { storageUuid };
StorageType[] types = { storageType }; StorageType[] types = { storageType };
LocatedBlock[] locatedBlock = { new LocatedBlock(block, LocatedBlock[] locatedBlock = { new LocatedBlock(block,

View File

@ -186,6 +186,7 @@
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@ -4019,7 +4020,8 @@ DatanodeInfo[] datanodeReport(final DatanodeReportType type
DatanodeInfo[] arr = new DatanodeInfo[results.size()]; DatanodeInfo[] arr = new DatanodeInfo[results.size()];
for (int i=0; i<arr.length; i++) { for (int i=0; i<arr.length; i++) {
arr[i] = new DatanodeInfo(results.get(i)); arr[i] = new DatanodeInfoBuilder().setFrom(results.get(i))
.build();
} }
return arr; return arr;
} finally { } finally {
@ -4040,7 +4042,8 @@ DatanodeStorageReport[] getDatanodeStorageReport(final DatanodeReportType type
DatanodeStorageReport[] reports = new DatanodeStorageReport[datanodes.size()]; DatanodeStorageReport[] reports = new DatanodeStorageReport[datanodes.size()];
for (int i = 0; i < reports.length; i++) { for (int i = 0; i < reports.length; i++) {
final DatanodeDescriptor d = datanodes.get(i); final DatanodeDescriptor d = datanodes.get(i);
reports[i] = new DatanodeStorageReport(new DatanodeInfo(d), reports[i] = new DatanodeStorageReport(
new DatanodeInfoBuilder().setFrom(d).build(),
d.getStorageReports()); d.getStorageReports());
} }
return reports; return reports;

View File

@ -103,6 +103,7 @@
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion;
@ -1056,34 +1057,42 @@ public static DatanodeDescriptor getLocalDatanodeDescriptor() {
} }
public static DatanodeInfo getLocalDatanodeInfo() { public static DatanodeInfo getLocalDatanodeInfo() {
return new DatanodeInfo(getLocalDatanodeID()); return new DatanodeInfoBuilder().setNodeID(getLocalDatanodeID())
.build();
} }
public static DatanodeInfo getDatanodeInfo(String ipAddr) { public static DatanodeInfo getDatanodeInfo(String ipAddr) {
return new DatanodeInfo(getDatanodeID(ipAddr)); return new DatanodeInfoBuilder().setNodeID(getDatanodeID(ipAddr))
.build();
} }
public static DatanodeInfo getLocalDatanodeInfo(int port) { public static DatanodeInfo getLocalDatanodeInfo(int port) {
return new DatanodeInfo(getLocalDatanodeID(port)); return new DatanodeInfoBuilder().setNodeID(getLocalDatanodeID(port))
.build();
} }
public static DatanodeInfo getDatanodeInfo(String ipAddr, public static DatanodeInfo getDatanodeInfo(String ipAddr,
String host, int port) { String host, int port) {
return new DatanodeInfo(new DatanodeID(ipAddr, host, return new DatanodeInfoBuilder().setNodeID(
UUID.randomUUID().toString(), port, new DatanodeID(ipAddr, host, UUID.randomUUID().toString(), port,
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT)); DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT)).build();
} }
public static DatanodeInfo getLocalDatanodeInfo(String ipAddr, public static DatanodeInfo getLocalDatanodeInfo(String ipAddr,
String hostname, AdminStates adminState) { String hostname, AdminStates adminState) {
return new DatanodeInfo(ipAddr, hostname, "", return new DatanodeInfoBuilder().setIpAddr(ipAddr).setHostName(hostname)
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT, .setDatanodeUuid("")
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT, .setXferPort(DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT)
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT, .setInfoPort(DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT)
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT, .setInfoSecurePort(DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT)
1l, 2l, 3l, 4l, 0l, 0l, 0l, 5, 6, "local", adminState); .setIpcPort(DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT).setCapacity(1L)
.setDfsUsed(2L).setRemaining(3L).setBlockPoolUsed(4L)
.setCacheCapacity(0L).setCacheUsed(0L).setLastUpdate(0L)
.setLastUpdateMonotonic(5).setXceiverCount(6)
.setNetworkLocation("local").setAdminState(adminState)
.build();
} }
public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr, public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.hdfs; package org.apache.hadoop.hdfs;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level; import org.apache.log4j.Level;
@ -91,7 +91,9 @@ private int getSendBufferSize(Configuration conf) throws IOException {
cluster.waitActive(); cluster.waitActive();
LOG.info("MiniDFSCluster started."); LOG.info("MiniDFSCluster started.");
try (Socket socket = DataStreamer.createSocketForPipeline( try (Socket socket = DataStreamer.createSocketForPipeline(
new DatanodeInfo(cluster.dataNodes.get(0).datanode.getDatanodeId()), new DatanodeInfoBuilder()
.setNodeID(cluster.dataNodes.get(0).datanode.getDatanodeId())
.build(),
1, cluster.getFileSystem().getClient())) { 1, cluster.getFileSystem().getClient())) {
return socket.getSendBufferSize(); return socket.getSendBufferSize();
} }

View File

@ -20,6 +20,7 @@
import com.google.common.base.Supplier; import com.google.common.base.Supplier;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
@ -43,7 +44,6 @@
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
@ -161,8 +161,9 @@ public void testArrayOutOfBoundsException() throws Exception {
FSNamesystem ns = cluster.getNamesystem(); FSNamesystem ns = cluster.getNamesystem();
ns.writeLock(); ns.writeLock();
try { try {
cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt( cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(blk,
blk, new DatanodeInfo(dnR), "TEST", "STORAGE_ID"); new DatanodeInfoBuilder().setNodeID(dnR).build(), "TEST",
"STORAGE_ID");
} finally { } finally {
ns.writeUnlock(); ns.writeUnlock();
} }

View File

@ -48,6 +48,7 @@
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.PerDatanodeVisitorInfo; import org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.PerDatanodeVisitorInfo;
import org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.Visitor; import org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.Visitor;
@ -333,8 +334,9 @@ public void testShortCircuitReadFromServerWithoutShm() throws Exception {
Assert.assertTrue(Arrays.equals(contents, expected)); Assert.assertTrue(Arrays.equals(contents, expected));
final ShortCircuitCache cache = final ShortCircuitCache cache =
fs.getClient().getClientContext().getShortCircuitCache(); fs.getClient().getClientContext().getShortCircuitCache();
final DatanodeInfo datanode = final DatanodeInfo datanode = new DatanodeInfoBuilder()
new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId()); .setNodeID(cluster.getDataNodes().get(0).getDatanodeId())
.build();
cache.getDfsClientShmManager().visit(new Visitor() { cache.getDfsClientShmManager().visit(new Visitor() {
@Override @Override
public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info) public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)

View File

@ -37,6 +37,7 @@
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@ -658,7 +659,8 @@ private void assertDnInfosEqual(DatanodeInfo[] dnInfos1,
@Test @Test
public void testDataNodeInfoPBHelper() { public void testDataNodeInfoPBHelper() {
DatanodeID id = DFSTestUtil.getLocalDatanodeID(); DatanodeID id = DFSTestUtil.getLocalDatanodeID();
DatanodeInfo dnInfos0 = new DatanodeInfo(id); DatanodeInfo dnInfos0 = new DatanodeInfoBuilder().setNodeID(id)
.build();
dnInfos0.setCapacity(3500L); dnInfos0.setCapacity(3500L);
dnInfos0.setDfsUsed(1000L); dnInfos0.setDfsUsed(1000L);
dnInfos0.setNonDfsUsed(2000L); dnInfos0.setNonDfsUsed(2000L);

View File

@ -65,6 +65,7 @@
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
@ -478,8 +479,9 @@ public void testRWRReplicas() throws IOException {
private Collection<RecoveringBlock> initRecoveringBlocks() throws IOException { private Collection<RecoveringBlock> initRecoveringBlocks() throws IOException {
Collection<RecoveringBlock> blocks = new ArrayList<RecoveringBlock>(1); Collection<RecoveringBlock> blocks = new ArrayList<RecoveringBlock>(1);
DatanodeInfo mockOtherDN = DFSTestUtil.getLocalDatanodeInfo(); DatanodeInfo mockOtherDN = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] locs = new DatanodeInfo[] { DatanodeInfo[] locs = new DatanodeInfo[] {new DatanodeInfoBuilder()
new DatanodeInfo(dn.getDNRegistrationForBP(block.getBlockPoolId())), .setNodeID(dn.getDNRegistrationForBP(
block.getBlockPoolId())).build(),
mockOtherDN }; mockOtherDN };
RecoveringBlock rBlock = new RecoveringBlock(block, locs, RECOVERY_ID); RecoveringBlock rBlock = new RecoveringBlock(block, locs, RECOVERY_ID);
blocks.add(rBlock); blocks.add(rBlock);

View File

@ -38,6 +38,7 @@
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@ -400,7 +401,8 @@ public void testInterDNProtocolTimeout() throws Throwable {
final InetSocketAddress addr = NetUtils.getConnectAddress(server); final InetSocketAddress addr = NetUtils.getConnectAddress(server);
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort()); DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
DatanodeInfo dInfo = new DatanodeInfo(fakeDnId); DatanodeInfo dInfo = new DatanodeInfoBuilder().setNodeID(fakeDnId)
.build();
InterDatanodeProtocol proxy = null; InterDatanodeProtocol proxy = null;
try { try {

View File

@ -51,6 +51,7 @@
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.net.DomainPeer; import org.apache.hadoop.hdfs.net.DomainPeer;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector; import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
@ -430,8 +431,9 @@ public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
DomainPeer peer = getDomainPeerToDn(conf); DomainPeer peer = getDomainPeerToDn(conf);
MutableBoolean usedPeer = new MutableBoolean(false); MutableBoolean usedPeer = new MutableBoolean(false);
ExtendedBlockId blockId = new ExtendedBlockId(123, "xyz"); ExtendedBlockId blockId = new ExtendedBlockId(123, "xyz");
final DatanodeInfo datanode = final DatanodeInfo datanode = new DatanodeInfoBuilder()
new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId()); .setNodeID(cluster.getDataNodes().get(0).getDatanodeId())
.build();
// Allocating the first shm slot requires using up a peer. // Allocating the first shm slot requires using up a peer.
Slot slot = cache.allocShmSlot(datanode, peer, usedPeer, Slot slot = cache.allocShmSlot(datanode, peer, usedPeer,
blockId, "testAllocShm_client"); blockId, "testAllocShm_client");
@ -571,8 +573,9 @@ public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
Assert.assertTrue(Arrays.equals(contents, expected)); Assert.assertTrue(Arrays.equals(contents, expected));
// Loading this file brought the ShortCircuitReplica into our local // Loading this file brought the ShortCircuitReplica into our local
// replica cache. // replica cache.
final DatanodeInfo datanode = final DatanodeInfo datanode = new DatanodeInfoBuilder()
new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId()); .setNodeID(cluster.getDataNodes().get(0).getDatanodeId())
.build();
cache.getDfsClientShmManager().visit(new Visitor() { cache.getDfsClientShmManager().visit(new Visitor() {
@Override @Override
public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info) public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)