HDFS-9482. Replace DatanodeInfo constructors with a builder pattern. Contributed by Brahma Reddy Battula.
This commit is contained in:
parent
f5c9f9f1b2
commit
53ff3c9e03
|
@ -86,7 +86,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
|
|||
protected AdminStates adminState;
|
||||
private long maintenanceExpireTimeInMS;
|
||||
|
||||
public DatanodeInfo(DatanodeInfo from) {
|
||||
protected DatanodeInfo(DatanodeInfo from) {
|
||||
super(from);
|
||||
this.capacity = from.getCapacity();
|
||||
this.dfsUsed = from.getDfsUsed();
|
||||
|
@ -103,7 +103,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
|
|||
this.upgradeDomain = from.getUpgradeDomain();
|
||||
}
|
||||
|
||||
public DatanodeInfo(DatanodeID nodeID) {
|
||||
protected DatanodeInfo(DatanodeID nodeID) {
|
||||
super(nodeID);
|
||||
this.capacity = 0L;
|
||||
this.dfsUsed = 0L;
|
||||
|
@ -118,57 +118,13 @@ public class DatanodeInfo extends DatanodeID implements Node {
|
|||
this.adminState = null;
|
||||
}
|
||||
|
||||
public DatanodeInfo(DatanodeID nodeID, String location) {
|
||||
protected DatanodeInfo(DatanodeID nodeID, String location) {
|
||||
this(nodeID);
|
||||
this.location = location;
|
||||
}
|
||||
|
||||
public DatanodeInfo(DatanodeID nodeID, String location,
|
||||
final long capacity, final long dfsUsed, final long remaining,
|
||||
final long blockPoolUsed, final long cacheCapacity, final long cacheUsed,
|
||||
final long lastUpdate, final long lastUpdateMonotonic,
|
||||
final int xceiverCount, final AdminStates adminState,
|
||||
final String upgradeDomain) {
|
||||
this(nodeID.getIpAddr(), nodeID.getHostName(), nodeID.getDatanodeUuid(),
|
||||
nodeID.getXferPort(), nodeID.getInfoPort(), nodeID.getInfoSecurePort(),
|
||||
nodeID.getIpcPort(), capacity, dfsUsed, remaining, blockPoolUsed,
|
||||
cacheCapacity, cacheUsed, lastUpdate, lastUpdateMonotonic,
|
||||
xceiverCount, location, adminState, upgradeDomain);
|
||||
}
|
||||
|
||||
/** Constructor */
|
||||
public DatanodeInfo(final String ipAddr, final String hostName,
|
||||
final String datanodeUuid, final int xferPort, final int infoPort,
|
||||
final int infoSecurePort, final int ipcPort,
|
||||
final long capacity, final long dfsUsed, final long remaining,
|
||||
final long blockPoolUsed, final long cacheCapacity, final long cacheUsed,
|
||||
final long lastUpdate, final long lastUpdateMonotonic,
|
||||
final int xceiverCount, final String networkLocation,
|
||||
final AdminStates adminState) {
|
||||
this(ipAddr, hostName, datanodeUuid, xferPort, infoPort, infoSecurePort,
|
||||
ipcPort, capacity, dfsUsed, remaining, blockPoolUsed, cacheCapacity,
|
||||
cacheUsed, lastUpdate, lastUpdateMonotonic, xceiverCount,
|
||||
networkLocation, adminState, null);
|
||||
}
|
||||
|
||||
/** Constructor */
|
||||
public DatanodeInfo(final String ipAddr, final String hostName,
|
||||
final String datanodeUuid, final int xferPort, final int infoPort,
|
||||
final int infoSecurePort, final int ipcPort,
|
||||
final long capacity, final long dfsUsed, final long remaining,
|
||||
final long blockPoolUsed, final long cacheCapacity, final long cacheUsed,
|
||||
final long lastUpdate, final long lastUpdateMonotonic,
|
||||
final int xceiverCount, final String networkLocation,
|
||||
final AdminStates adminState,
|
||||
final String upgradeDomain) {
|
||||
this(ipAddr, hostName, datanodeUuid, xferPort, infoPort, infoSecurePort,
|
||||
ipcPort, capacity, dfsUsed, 0L, remaining, blockPoolUsed,
|
||||
cacheCapacity, cacheUsed, lastUpdate, lastUpdateMonotonic,
|
||||
xceiverCount, networkLocation, adminState, upgradeDomain);
|
||||
}
|
||||
|
||||
/** Constructor. */
|
||||
public DatanodeInfo(final String ipAddr, final String hostName,
|
||||
private DatanodeInfo(final String ipAddr, final String hostName,
|
||||
final String datanodeUuid, final int xferPort, final int infoPort,
|
||||
final int infoSecurePort, final int ipcPort, final long capacity,
|
||||
final long dfsUsed, final long nonDfsUsed, final long remaining,
|
||||
|
@ -660,4 +616,169 @@ public class DatanodeInfo extends DatanodeID implements Node {
|
|||
public void setSoftwareVersion(String softwareVersion) {
|
||||
this.softwareVersion = softwareVersion;
|
||||
}
|
||||
|
||||
/**
|
||||
* Building the DataNodeInfo.
|
||||
*/
|
||||
public static class DatanodeInfoBuilder {
|
||||
private String location = NetworkTopology.DEFAULT_RACK;
|
||||
private long capacity;
|
||||
private long dfsUsed;
|
||||
private long remaining;
|
||||
private long blockPoolUsed;
|
||||
private long cacheCapacity;
|
||||
private long cacheUsed;
|
||||
private long lastUpdate;
|
||||
private long lastUpdateMonotonic;
|
||||
private int xceiverCount;
|
||||
private DatanodeInfo.AdminStates adminState;
|
||||
private String upgradeDomain;
|
||||
private String ipAddr;
|
||||
private String hostName;
|
||||
private String datanodeUuid;
|
||||
private int xferPort;
|
||||
private int infoPort;
|
||||
private int infoSecurePort;
|
||||
private int ipcPort;
|
||||
private long nonDfsUsed = 0L;
|
||||
|
||||
public DatanodeInfoBuilder setFrom(DatanodeInfo from) {
|
||||
this.capacity = from.getCapacity();
|
||||
this.dfsUsed = from.getDfsUsed();
|
||||
this.nonDfsUsed = from.getNonDfsUsed();
|
||||
this.remaining = from.getRemaining();
|
||||
this.blockPoolUsed = from.getBlockPoolUsed();
|
||||
this.cacheCapacity = from.getCacheCapacity();
|
||||
this.cacheUsed = from.getCacheUsed();
|
||||
this.lastUpdate = from.getLastUpdate();
|
||||
this.lastUpdateMonotonic = from.getLastUpdateMonotonic();
|
||||
this.xceiverCount = from.getXceiverCount();
|
||||
this.location = from.getNetworkLocation();
|
||||
this.adminState = from.getAdminState();
|
||||
this.upgradeDomain = from.getUpgradeDomain();
|
||||
setNodeID(from);
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setNodeID(DatanodeID nodeID) {
|
||||
this.ipAddr = nodeID.getIpAddr();
|
||||
this.hostName = nodeID.getHostName();
|
||||
this.datanodeUuid = nodeID.getDatanodeUuid();
|
||||
this.xferPort = nodeID.getXferPort();
|
||||
this.infoPort = nodeID.getInfoPort();
|
||||
this.infoSecurePort = nodeID.getInfoSecurePort();
|
||||
this.ipcPort = nodeID.getIpcPort();
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setCapacity(long capacity) {
|
||||
this.capacity = capacity;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setDfsUsed(long dfsUsed) {
|
||||
this.dfsUsed = dfsUsed;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setRemaining(long remaining) {
|
||||
this.remaining = remaining;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setBlockPoolUsed(long blockPoolUsed) {
|
||||
this.blockPoolUsed = blockPoolUsed;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setCacheCapacity(long cacheCapacity) {
|
||||
this.cacheCapacity = cacheCapacity;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setCacheUsed(long cacheUsed) {
|
||||
this.cacheUsed = cacheUsed;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setLastUpdate(long lastUpdate) {
|
||||
this.lastUpdate = lastUpdate;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setLastUpdateMonotonic(
|
||||
long lastUpdateMonotonic) {
|
||||
this.lastUpdateMonotonic = lastUpdateMonotonic;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setXceiverCount(int xceiverCount) {
|
||||
this.xceiverCount = xceiverCount;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setAdminState(
|
||||
DatanodeInfo.AdminStates adminState) {
|
||||
this.adminState = adminState;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setUpgradeDomain(String upgradeDomain) {
|
||||
this.upgradeDomain = upgradeDomain;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setIpAddr(String ipAddr) {
|
||||
this.ipAddr = ipAddr;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setHostName(String hostName) {
|
||||
this.hostName = hostName;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setDatanodeUuid(String datanodeUuid) {
|
||||
this.datanodeUuid = datanodeUuid;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setXferPort(int xferPort) {
|
||||
this.xferPort = xferPort;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setInfoPort(int infoPort) {
|
||||
this.infoPort = infoPort;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setInfoSecurePort(int infoSecurePort) {
|
||||
this.infoSecurePort = infoSecurePort;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setIpcPort(int ipcPort) {
|
||||
this.ipcPort = ipcPort;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setNetworkLocation(String networkLocation) {
|
||||
this.location = networkLocation;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setNonDfsUsed(long nonDfsUsed) {
|
||||
this.nonDfsUsed = nonDfsUsed;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfo build() {
|
||||
return new DatanodeInfo(ipAddr, hostName, datanodeUuid, xferPort,
|
||||
infoPort, infoSecurePort, ipcPort, capacity, dfsUsed, nonDfsUsed,
|
||||
remaining, blockPoolUsed, cacheCapacity, cacheUsed, lastUpdate,
|
||||
lastUpdateMonotonic, xceiverCount, location, adminState,
|
||||
upgradeDomain);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -69,6 +69,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
|||
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||
|
@ -551,13 +552,18 @@ public class PBHelperClient {
|
|||
if (di == null) {
|
||||
return null;
|
||||
}
|
||||
DatanodeInfo dinfo = new DatanodeInfo(convert(di.getId()),
|
||||
di.hasLocation() ? di.getLocation() : null, di.getCapacity(),
|
||||
di.getDfsUsed(), di.getRemaining(), di.getBlockPoolUsed(),
|
||||
di.getCacheCapacity(), di.getCacheUsed(), di.getLastUpdate(),
|
||||
di.getLastUpdateMonotonic(), di.getXceiverCount(),
|
||||
convert(di.getAdminState()),
|
||||
di.hasUpgradeDomain() ? di.getUpgradeDomain() : null);
|
||||
DatanodeInfoBuilder dinfo =
|
||||
new DatanodeInfoBuilder().setNodeID(convert(di.getId()))
|
||||
.setNetworkLocation(di.hasLocation() ? di.getLocation() : null)
|
||||
.setCapacity(di.getCapacity()).setDfsUsed(di.getDfsUsed())
|
||||
.setRemaining(di.getRemaining())
|
||||
.setBlockPoolUsed(di.getBlockPoolUsed())
|
||||
.setCacheCapacity(di.getCacheCapacity())
|
||||
.setCacheUsed(di.getCacheUsed()).setLastUpdate(di.getLastUpdate())
|
||||
.setLastUpdateMonotonic(di.getLastUpdateMonotonic())
|
||||
.setXceiverCount(di.getXceiverCount())
|
||||
.setAdminState(convert(di.getAdminState())).setUpgradeDomain(
|
||||
di.hasUpgradeDomain() ? di.getUpgradeDomain() : null);
|
||||
if (di.hasNonDfsUsed()) {
|
||||
dinfo.setNonDfsUsed(di.getNonDfsUsed());
|
||||
} else {
|
||||
|
@ -565,7 +571,7 @@ public class PBHelperClient {
|
|||
long nonDFSUsed = di.getCapacity() - di.getDfsUsed() - di.getRemaining();
|
||||
dinfo.setNonDfsUsed(nonDFSUsed < 0 ? 0 : nonDFSUsed);
|
||||
}
|
||||
return dinfo;
|
||||
return dinfo.build();
|
||||
}
|
||||
|
||||
public static StorageType[] convertStorageTypes(
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.hadoop.fs.permission.AclStatus;
|
|||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
|
||||
|
@ -271,27 +272,26 @@ class JsonUtilClient {
|
|||
}
|
||||
|
||||
// TODO: Fix storageID
|
||||
return new DatanodeInfo(
|
||||
ipAddr,
|
||||
(String)m.get("hostName"),
|
||||
(String)m.get("storageID"),
|
||||
xferPort,
|
||||
((Number) m.get("infoPort")).intValue(),
|
||||
getInt(m, "infoSecurePort", 0),
|
||||
((Number) m.get("ipcPort")).intValue(),
|
||||
|
||||
getLong(m, "capacity", 0l),
|
||||
getLong(m, "dfsUsed", 0l),
|
||||
getLong(m, "remaining", 0l),
|
||||
getLong(m, "blockPoolUsed", 0l),
|
||||
getLong(m, "cacheCapacity", 0l),
|
||||
getLong(m, "cacheUsed", 0l),
|
||||
getLong(m, "lastUpdate", 0l),
|
||||
getLong(m, "lastUpdateMonotonic", 0l),
|
||||
getInt(m, "xceiverCount", 0),
|
||||
getString(m, "networkLocation", ""),
|
||||
DatanodeInfo.AdminStates.valueOf(getString(m, "adminState", "NORMAL")),
|
||||
getString(m, "upgradeDomain", ""));
|
||||
return new DatanodeInfoBuilder().setIpAddr(ipAddr)
|
||||
.setHostName((String) m.get("hostName"))
|
||||
.setDatanodeUuid((String) m.get("storageID")).setXferPort(xferPort)
|
||||
.setInfoPort(((Number) m.get("infoPort")).intValue())
|
||||
.setInfoSecurePort(getInt(m, "infoSecurePort", 0))
|
||||
.setIpcPort(((Number) m.get("ipcPort")).intValue())
|
||||
.setCapacity(getLong(m, "capacity", 0L))
|
||||
.setDfsUsed(getLong(m, "dfsUsed", 0L))
|
||||
.setRemaining(getLong(m, "remaining", 0L))
|
||||
.setBlockPoolUsed(getLong(m, "blockPoolUsed", 0L))
|
||||
.setCacheCapacity(getLong(m, "cacheCapacity", 0L))
|
||||
.setCacheUsed(getLong(m, "cacheUsed", 0L))
|
||||
.setLastUpdate(getLong(m, "lastUpdate", 0L))
|
||||
.setLastUpdateMonotonic(getLong(m, "lastUpdateMonotonic", 0L))
|
||||
.setXceiverCount(getInt(m, "xceiverCount", 0))
|
||||
.setNetworkLocation(getString(m, "networkLocation", "")).setAdminState(
|
||||
DatanodeInfo.AdminStates
|
||||
.valueOf(getString(m, "adminState", "NORMAL")))
|
||||
.setUpgradeDomain(getString(m, "upgradeDomain", ""))
|
||||
.build();
|
||||
}
|
||||
|
||||
/** Convert an Object[] to a DatanodeInfo[]. */
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.protocolPB;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
|
||||
|
@ -78,8 +79,9 @@ public class NamenodeProtocolServerSideTranslatorPB implements
|
|||
@Override
|
||||
public GetBlocksResponseProto getBlocks(RpcController unused,
|
||||
GetBlocksRequestProto request) throws ServiceException {
|
||||
DatanodeInfo dnInfo = new DatanodeInfo(PBHelperClient.convert(request
|
||||
.getDatanode()));
|
||||
DatanodeInfo dnInfo = new DatanodeInfoBuilder()
|
||||
.setNodeID(PBHelperClient.convert(request.getDatanode()))
|
||||
.build();
|
||||
BlocksWithLocations blocks;
|
||||
try {
|
||||
blocks = impl.getBlocks(dnInfo, request.getSize());
|
||||
|
|
|
@ -125,6 +125,7 @@ import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
|
|||
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
|
||||
|
@ -2352,7 +2353,8 @@ public class DataNode extends ReconfigurableBase
|
|||
in = new DataInputStream(unbufIn);
|
||||
blockSender = new BlockSender(b, 0, b.getNumBytes(),
|
||||
false, false, true, DataNode.this, null, cachingStrategy);
|
||||
DatanodeInfo srcNode = new DatanodeInfo(bpReg);
|
||||
DatanodeInfo srcNode = new DatanodeInfoBuilder().setNodeID(bpReg)
|
||||
.build();
|
||||
|
||||
new Sender(out).writeBlock(b, targetStorageTypes[0], accessToken,
|
||||
clientname, targets, targetStorageTypes, srcNode,
|
||||
|
|
|
@ -22,6 +22,7 @@ import java.io.IOException;
|
|||
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
|
||||
|
@ -52,7 +53,8 @@ public class ReportBadBlockAction implements BPServiceActorAction {
|
|||
if (bpRegistration == null) {
|
||||
return;
|
||||
}
|
||||
DatanodeInfo[] dnArr = { new DatanodeInfo(bpRegistration) };
|
||||
DatanodeInfo[] dnArr = {new DatanodeInfoBuilder()
|
||||
.setNodeID(bpRegistration).build()};
|
||||
String[] uuids = { storageUuid };
|
||||
StorageType[] types = { storageType };
|
||||
LocatedBlock[] locatedBlock = { new LocatedBlock(block,
|
||||
|
|
|
@ -186,6 +186,7 @@ import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
|||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
|
@ -4019,7 +4020,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
|
||||
DatanodeInfo[] arr = new DatanodeInfo[results.size()];
|
||||
for (int i=0; i<arr.length; i++) {
|
||||
arr[i] = new DatanodeInfo(results.get(i));
|
||||
arr[i] = new DatanodeInfoBuilder().setFrom(results.get(i))
|
||||
.build();
|
||||
}
|
||||
return arr;
|
||||
} finally {
|
||||
|
@ -4040,7 +4042,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
DatanodeStorageReport[] reports = new DatanodeStorageReport[datanodes.size()];
|
||||
for (int i = 0; i < reports.length; i++) {
|
||||
final DatanodeDescriptor d = datanodes.get(i);
|
||||
reports[i] = new DatanodeStorageReport(new DatanodeInfo(d),
|
||||
reports[i] = new DatanodeStorageReport(
|
||||
new DatanodeInfoBuilder().setFrom(d).build(),
|
||||
d.getStorageReports());
|
||||
}
|
||||
return reports;
|
||||
|
|
|
@ -103,6 +103,7 @@ import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
|||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||
|
@ -1056,34 +1057,42 @@ public class DFSTestUtil {
|
|||
}
|
||||
|
||||
public static DatanodeInfo getLocalDatanodeInfo() {
|
||||
return new DatanodeInfo(getLocalDatanodeID());
|
||||
return new DatanodeInfoBuilder().setNodeID(getLocalDatanodeID())
|
||||
.build();
|
||||
}
|
||||
|
||||
public static DatanodeInfo getDatanodeInfo(String ipAddr) {
|
||||
return new DatanodeInfo(getDatanodeID(ipAddr));
|
||||
return new DatanodeInfoBuilder().setNodeID(getDatanodeID(ipAddr))
|
||||
.build();
|
||||
}
|
||||
|
||||
public static DatanodeInfo getLocalDatanodeInfo(int port) {
|
||||
return new DatanodeInfo(getLocalDatanodeID(port));
|
||||
return new DatanodeInfoBuilder().setNodeID(getLocalDatanodeID(port))
|
||||
.build();
|
||||
}
|
||||
|
||||
public static DatanodeInfo getDatanodeInfo(String ipAddr,
|
||||
String host, int port) {
|
||||
return new DatanodeInfo(new DatanodeID(ipAddr, host,
|
||||
UUID.randomUUID().toString(), port,
|
||||
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
||||
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
|
||||
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT));
|
||||
return new DatanodeInfoBuilder().setNodeID(
|
||||
new DatanodeID(ipAddr, host, UUID.randomUUID().toString(), port,
|
||||
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
||||
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
|
||||
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT)).build();
|
||||
}
|
||||
|
||||
public static DatanodeInfo getLocalDatanodeInfo(String ipAddr,
|
||||
String hostname, AdminStates adminState) {
|
||||
return new DatanodeInfo(ipAddr, hostname, "",
|
||||
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
|
||||
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
||||
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
|
||||
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT,
|
||||
1l, 2l, 3l, 4l, 0l, 0l, 0l, 5, 6, "local", adminState);
|
||||
return new DatanodeInfoBuilder().setIpAddr(ipAddr).setHostName(hostname)
|
||||
.setDatanodeUuid("")
|
||||
.setXferPort(DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT)
|
||||
.setInfoPort(DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT)
|
||||
.setInfoSecurePort(DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT)
|
||||
.setIpcPort(DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT).setCapacity(1L)
|
||||
.setDfsUsed(2L).setRemaining(3L).setBlockPoolUsed(4L)
|
||||
.setCacheCapacity(0L).setCacheUsed(0L).setLastUpdate(0L)
|
||||
.setLastUpdateMonotonic(5).setXceiverCount(6)
|
||||
.setNetworkLocation("local").setAdminState(adminState)
|
||||
.build();
|
||||
}
|
||||
|
||||
public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.log4j.Level;
|
||||
|
||||
|
@ -91,7 +91,9 @@ public class TestDFSClientSocketSize {
|
|||
cluster.waitActive();
|
||||
LOG.info("MiniDFSCluster started.");
|
||||
try (Socket socket = DataStreamer.createSocketForPipeline(
|
||||
new DatanodeInfo(cluster.dataNodes.get(0).datanode.getDatanodeId()),
|
||||
new DatanodeInfoBuilder()
|
||||
.setNodeID(cluster.dataNodes.get(0).datanode.getDatanodeId())
|
||||
.build(),
|
||||
1, cluster.getFileSystem().getClient())) {
|
||||
return socket.getSendBufferSize();
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
|
|||
|
||||
import com.google.common.base.Supplier;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
|
@ -43,7 +44,6 @@ import org.apache.hadoop.fs.FSDataOutputStream;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||
|
@ -161,8 +161,9 @@ public class TestFileCorruption {
|
|||
FSNamesystem ns = cluster.getNamesystem();
|
||||
ns.writeLock();
|
||||
try {
|
||||
cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(
|
||||
blk, new DatanodeInfo(dnR), "TEST", "STORAGE_ID");
|
||||
cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(blk,
|
||||
new DatanodeInfoBuilder().setNodeID(dnR).build(), "TEST",
|
||||
"STORAGE_ID");
|
||||
} finally {
|
||||
ns.writeUnlock();
|
||||
}
|
||||
|
|
|
@ -48,6 +48,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
|
|||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.PerDatanodeVisitorInfo;
|
||||
import org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.Visitor;
|
||||
|
@ -333,8 +334,9 @@ public class TestBlockReaderFactory {
|
|||
Assert.assertTrue(Arrays.equals(contents, expected));
|
||||
final ShortCircuitCache cache =
|
||||
fs.getClient().getClientContext().getShortCircuitCache();
|
||||
final DatanodeInfo datanode =
|
||||
new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
|
||||
final DatanodeInfo datanode = new DatanodeInfoBuilder()
|
||||
.setNodeID(cluster.getDataNodes().get(0).getDatanodeId())
|
||||
.build();
|
||||
cache.getDfsClientShmManager().visit(new Visitor() {
|
||||
@Override
|
||||
public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
|
|||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
|
@ -658,7 +659,8 @@ public class TestPBHelper {
|
|||
@Test
|
||||
public void testDataNodeInfoPBHelper() {
|
||||
DatanodeID id = DFSTestUtil.getLocalDatanodeID();
|
||||
DatanodeInfo dnInfos0 = new DatanodeInfo(id);
|
||||
DatanodeInfo dnInfos0 = new DatanodeInfoBuilder().setNodeID(id)
|
||||
.build();
|
||||
dnInfos0.setCapacity(3500L);
|
||||
dnInfos0.setDfsUsed(1000L);
|
||||
dnInfos0.setNonDfsUsed(2000L);
|
||||
|
|
|
@ -65,6 +65,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
|
|||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
|
||||
|
@ -478,8 +479,9 @@ public class TestBlockRecovery {
|
|||
private Collection<RecoveringBlock> initRecoveringBlocks() throws IOException {
|
||||
Collection<RecoveringBlock> blocks = new ArrayList<RecoveringBlock>(1);
|
||||
DatanodeInfo mockOtherDN = DFSTestUtil.getLocalDatanodeInfo();
|
||||
DatanodeInfo[] locs = new DatanodeInfo[] {
|
||||
new DatanodeInfo(dn.getDNRegistrationForBP(block.getBlockPoolId())),
|
||||
DatanodeInfo[] locs = new DatanodeInfo[] {new DatanodeInfoBuilder()
|
||||
.setNodeID(dn.getDNRegistrationForBP(
|
||||
block.getBlockPoolId())).build(),
|
||||
mockOtherDN };
|
||||
RecoveringBlock rBlock = new RecoveringBlock(block, locs, RECOVERY_ID);
|
||||
blocks.add(rBlock);
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
|
|||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
|
@ -400,7 +401,8 @@ public class TestInterDatanodeProtocol {
|
|||
|
||||
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
||||
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
|
||||
DatanodeInfo dInfo = new DatanodeInfo(fakeDnId);
|
||||
DatanodeInfo dInfo = new DatanodeInfoBuilder().setNodeID(fakeDnId)
|
||||
.build();
|
||||
InterDatanodeProtocol proxy = null;
|
||||
|
||||
try {
|
||||
|
|
|
@ -51,6 +51,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
|
|||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||
import org.apache.hadoop.hdfs.net.DomainPeer;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
|
||||
|
@ -430,8 +431,9 @@ public class TestShortCircuitCache {
|
|||
DomainPeer peer = getDomainPeerToDn(conf);
|
||||
MutableBoolean usedPeer = new MutableBoolean(false);
|
||||
ExtendedBlockId blockId = new ExtendedBlockId(123, "xyz");
|
||||
final DatanodeInfo datanode =
|
||||
new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
|
||||
final DatanodeInfo datanode = new DatanodeInfoBuilder()
|
||||
.setNodeID(cluster.getDataNodes().get(0).getDatanodeId())
|
||||
.build();
|
||||
// Allocating the first shm slot requires using up a peer.
|
||||
Slot slot = cache.allocShmSlot(datanode, peer, usedPeer,
|
||||
blockId, "testAllocShm_client");
|
||||
|
@ -571,8 +573,9 @@ public class TestShortCircuitCache {
|
|||
Assert.assertTrue(Arrays.equals(contents, expected));
|
||||
// Loading this file brought the ShortCircuitReplica into our local
|
||||
// replica cache.
|
||||
final DatanodeInfo datanode =
|
||||
new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
|
||||
final DatanodeInfo datanode = new DatanodeInfoBuilder()
|
||||
.setNodeID(cluster.getDataNodes().get(0).getDatanodeId())
|
||||
.build();
|
||||
cache.getDfsClientShmManager().visit(new Visitor() {
|
||||
@Override
|
||||
public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
|
||||
|
|
Loading…
Reference in New Issue