|
|
@ -17,6 +17,7 @@
|
|
|
|
*/
|
|
|
|
*/
|
|
|
|
package org.apache.hadoop.hdfs.server.blockmanagement;
|
|
|
|
package org.apache.hadoop.hdfs.server.blockmanagement;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import static org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol.DNA_ERASURE_CODING_RECOVERY;
|
|
|
|
import static org.apache.hadoop.util.Time.monotonicNow;
|
|
|
|
import static org.apache.hadoop.util.Time.monotonicNow;
|
|
|
|
|
|
|
|
|
|
|
|
import com.google.common.annotations.VisibleForTesting;
|
|
|
|
import com.google.common.annotations.VisibleForTesting;
|
|
|
@ -162,7 +163,7 @@ public class DatanodeManager {
|
|
|
|
* during rolling upgrades.
|
|
|
|
* during rolling upgrades.
|
|
|
|
* Software version -> Number of datanodes with this version
|
|
|
|
* Software version -> Number of datanodes with this version
|
|
|
|
*/
|
|
|
|
*/
|
|
|
|
private HashMap<String, Integer> datanodesSoftwareVersions =
|
|
|
|
private final HashMap<String, Integer> datanodesSoftwareVersions =
|
|
|
|
new HashMap<>(4, 0.75f);
|
|
|
|
new HashMap<>(4, 0.75f);
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
/**
|
|
|
@ -352,15 +353,9 @@ public class DatanodeManager {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
private boolean isInactive(DatanodeInfo datanode) {
|
|
|
|
private boolean isInactive(DatanodeInfo datanode) {
|
|
|
|
if (datanode.isDecommissioned()) {
|
|
|
|
return datanode.isDecommissioned() ||
|
|
|
|
return true;
|
|
|
|
(avoidStaleDataNodesForRead && datanode.isStale(staleInterval));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (avoidStaleDataNodesForRead) {
|
|
|
|
|
|
|
|
return datanode.isStale(staleInterval);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/** Sort the located blocks by the distance to the target host. */
|
|
|
|
/** Sort the located blocks by the distance to the target host. */
|
|
|
@ -479,9 +474,10 @@ public class DatanodeManager {
|
|
|
|
if (datanodeUuid == null) {
|
|
|
|
if (datanodeUuid == null) {
|
|
|
|
return null;
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
synchronized (this) {
|
|
|
|
return datanodeMap.get(datanodeUuid);
|
|
|
|
return datanodeMap.get(datanodeUuid);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
/**
|
|
|
|
* Get data node by datanode ID.
|
|
|
|
* Get data node by datanode ID.
|
|
|
@ -490,8 +486,8 @@ public class DatanodeManager {
|
|
|
|
* @return DatanodeDescriptor or null if the node is not found.
|
|
|
|
* @return DatanodeDescriptor or null if the node is not found.
|
|
|
|
* @throws UnregisteredNodeException
|
|
|
|
* @throws UnregisteredNodeException
|
|
|
|
*/
|
|
|
|
*/
|
|
|
|
public DatanodeDescriptor getDatanode(DatanodeID nodeID
|
|
|
|
public DatanodeDescriptor getDatanode(DatanodeID nodeID)
|
|
|
|
) throws UnregisteredNodeException {
|
|
|
|
throws UnregisteredNodeException {
|
|
|
|
final DatanodeDescriptor node = getDatanode(nodeID.getDatanodeUuid());
|
|
|
|
final DatanodeDescriptor node = getDatanode(nodeID.getDatanodeUuid());
|
|
|
|
if (node == null)
|
|
|
|
if (node == null)
|
|
|
|
return null;
|
|
|
|
return null;
|
|
|
@ -535,15 +531,15 @@ public class DatanodeManager {
|
|
|
|
|
|
|
|
|
|
|
|
/** Prints information about all datanodes. */
|
|
|
|
/** Prints information about all datanodes. */
|
|
|
|
void datanodeDump(final PrintWriter out) {
|
|
|
|
void datanodeDump(final PrintWriter out) {
|
|
|
|
synchronized (datanodeMap) {
|
|
|
|
final Map<String,DatanodeDescriptor> sortedDatanodeMap;
|
|
|
|
Map<String,DatanodeDescriptor> sortedDatanodeMap =
|
|
|
|
synchronized (this) {
|
|
|
|
new TreeMap<>(datanodeMap);
|
|
|
|
sortedDatanodeMap = new TreeMap<>(datanodeMap);
|
|
|
|
out.println("Metasave: Number of datanodes: " + datanodeMap.size());
|
|
|
|
}
|
|
|
|
|
|
|
|
out.println("Metasave: Number of datanodes: " + sortedDatanodeMap.size());
|
|
|
|
for (DatanodeDescriptor node : sortedDatanodeMap.values()) {
|
|
|
|
for (DatanodeDescriptor node : sortedDatanodeMap.values()) {
|
|
|
|
out.println(node.dumpDatanode());
|
|
|
|
out.println(node.dumpDatanode());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
/**
|
|
|
|
* Remove a datanode descriptor.
|
|
|
|
* Remove a datanode descriptor.
|
|
|
@ -567,8 +563,8 @@ public class DatanodeManager {
|
|
|
|
* Remove a datanode
|
|
|
|
* Remove a datanode
|
|
|
|
* @throws UnregisteredNodeException
|
|
|
|
* @throws UnregisteredNodeException
|
|
|
|
*/
|
|
|
|
*/
|
|
|
|
public void removeDatanode(final DatanodeID node
|
|
|
|
public void removeDatanode(final DatanodeID node)
|
|
|
|
) throws UnregisteredNodeException {
|
|
|
|
throws UnregisteredNodeException {
|
|
|
|
namesystem.writeLock();
|
|
|
|
namesystem.writeLock();
|
|
|
|
try {
|
|
|
|
try {
|
|
|
|
final DatanodeDescriptor descriptor = getDatanode(node);
|
|
|
|
final DatanodeDescriptor descriptor = getDatanode(node);
|
|
|
@ -585,7 +581,6 @@ public class DatanodeManager {
|
|
|
|
|
|
|
|
|
|
|
|
/** Remove a dead datanode. */
|
|
|
|
/** Remove a dead datanode. */
|
|
|
|
void removeDeadDatanode(final DatanodeID nodeID) {
|
|
|
|
void removeDeadDatanode(final DatanodeID nodeID) {
|
|
|
|
synchronized(datanodeMap) {
|
|
|
|
|
|
|
|
DatanodeDescriptor d;
|
|
|
|
DatanodeDescriptor d;
|
|
|
|
try {
|
|
|
|
try {
|
|
|
|
d = getDatanode(nodeID);
|
|
|
|
d = getDatanode(nodeID);
|
|
|
@ -598,7 +593,6 @@ public class DatanodeManager {
|
|
|
|
removeDatanode(d);
|
|
|
|
removeDatanode(d);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/** Is the datanode dead? */
|
|
|
|
/** Is the datanode dead? */
|
|
|
|
boolean isDatanodeDead(DatanodeDescriptor node) {
|
|
|
|
boolean isDatanodeDead(DatanodeDescriptor node) {
|
|
|
@ -611,14 +605,13 @@ public class DatanodeManager {
|
|
|
|
// To keep host2DatanodeMap consistent with datanodeMap,
|
|
|
|
// To keep host2DatanodeMap consistent with datanodeMap,
|
|
|
|
// remove from host2DatanodeMap the datanodeDescriptor removed
|
|
|
|
// remove from host2DatanodeMap the datanodeDescriptor removed
|
|
|
|
// from datanodeMap before adding node to host2DatanodeMap.
|
|
|
|
// from datanodeMap before adding node to host2DatanodeMap.
|
|
|
|
synchronized(datanodeMap) {
|
|
|
|
synchronized(this) {
|
|
|
|
host2DatanodeMap.remove(datanodeMap.put(node.getDatanodeUuid(), node));
|
|
|
|
host2DatanodeMap.remove(datanodeMap.put(node.getDatanodeUuid(), node));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
networktopology.add(node); // may throw InvalidTopologyException
|
|
|
|
networktopology.add(node); // may throw InvalidTopologyException
|
|
|
|
host2DatanodeMap.add(node);
|
|
|
|
host2DatanodeMap.add(node);
|
|
|
|
checkIfClusterIsNowMultiRack(node);
|
|
|
|
checkIfClusterIsNowMultiRack(node);
|
|
|
|
blockManager.getBlockReportLeaseManager().register(node);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (LOG.isDebugEnabled()) {
|
|
|
|
if (LOG.isDebugEnabled()) {
|
|
|
|
LOG.debug(getClass().getSimpleName() + ".addDatanode: "
|
|
|
|
LOG.debug(getClass().getSimpleName() + ".addDatanode: "
|
|
|
@ -629,11 +622,9 @@ public class DatanodeManager {
|
|
|
|
/** Physically remove node from datanodeMap. */
|
|
|
|
/** Physically remove node from datanodeMap. */
|
|
|
|
private void wipeDatanode(final DatanodeID node) {
|
|
|
|
private void wipeDatanode(final DatanodeID node) {
|
|
|
|
final String key = node.getDatanodeUuid();
|
|
|
|
final String key = node.getDatanodeUuid();
|
|
|
|
synchronized (datanodeMap) {
|
|
|
|
synchronized (this) {
|
|
|
|
host2DatanodeMap.remove(datanodeMap.remove(key));
|
|
|
|
host2DatanodeMap.remove(datanodeMap.remove(key));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Also remove all block invalidation tasks under this node
|
|
|
|
|
|
|
|
blockManager.removeFromInvalidates(new DatanodeInfo(node));
|
|
|
|
|
|
|
|
if (LOG.isDebugEnabled()) {
|
|
|
|
if (LOG.isDebugEnabled()) {
|
|
|
|
LOG.debug(getClass().getSimpleName() + ".wipeDatanode("
|
|
|
|
LOG.debug(getClass().getSimpleName() + ".wipeDatanode("
|
|
|
|
+ node + "): storage " + key
|
|
|
|
+ node + "): storage " + key
|
|
|
@ -645,7 +636,7 @@ public class DatanodeManager {
|
|
|
|
if (version == null) {
|
|
|
|
if (version == null) {
|
|
|
|
return;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
synchronized(datanodeMap) {
|
|
|
|
synchronized(this) {
|
|
|
|
Integer count = this.datanodesSoftwareVersions.get(version);
|
|
|
|
Integer count = this.datanodesSoftwareVersions.get(version);
|
|
|
|
count = count == null ? 1 : count + 1;
|
|
|
|
count = count == null ? 1 : count + 1;
|
|
|
|
this.datanodesSoftwareVersions.put(version, count);
|
|
|
|
this.datanodesSoftwareVersions.put(version, count);
|
|
|
@ -656,7 +647,7 @@ public class DatanodeManager {
|
|
|
|
if (version == null) {
|
|
|
|
if (version == null) {
|
|
|
|
return;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
synchronized(datanodeMap) {
|
|
|
|
synchronized(this) {
|
|
|
|
Integer count = this.datanodesSoftwareVersions.get(version);
|
|
|
|
Integer count = this.datanodesSoftwareVersions.get(version);
|
|
|
|
if(count != null) {
|
|
|
|
if(count != null) {
|
|
|
|
if(count > 1) {
|
|
|
|
if(count > 1) {
|
|
|
@ -674,24 +665,22 @@ public class DatanodeManager {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
private void countSoftwareVersions() {
|
|
|
|
private void countSoftwareVersions() {
|
|
|
|
synchronized(datanodeMap) {
|
|
|
|
synchronized(this) {
|
|
|
|
HashMap<String, Integer> versionCount = new HashMap<>();
|
|
|
|
datanodesSoftwareVersions.clear();
|
|
|
|
for(DatanodeDescriptor dn: datanodeMap.values()) {
|
|
|
|
for(DatanodeDescriptor dn: datanodeMap.values()) {
|
|
|
|
// Check isAlive too because right after removeDatanode(),
|
|
|
|
// Check isAlive too because right after removeDatanode(),
|
|
|
|
// isDatanodeDead() is still true
|
|
|
|
// isDatanodeDead() is still true
|
|
|
|
if(shouldCountVersion(dn))
|
|
|
|
if (shouldCountVersion(dn)) {
|
|
|
|
{
|
|
|
|
Integer num = datanodesSoftwareVersions.get(dn.getSoftwareVersion());
|
|
|
|
Integer num = versionCount.get(dn.getSoftwareVersion());
|
|
|
|
|
|
|
|
num = num == null ? 1 : num+1;
|
|
|
|
num = num == null ? 1 : num+1;
|
|
|
|
versionCount.put(dn.getSoftwareVersion(), num);
|
|
|
|
datanodesSoftwareVersions.put(dn.getSoftwareVersion(), num);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
this.datanodesSoftwareVersions = versionCount;
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
public HashMap<String, Integer> getDatanodesSoftwareVersions() {
|
|
|
|
public HashMap<String, Integer> getDatanodesSoftwareVersions() {
|
|
|
|
synchronized(datanodeMap) {
|
|
|
|
synchronized(this) {
|
|
|
|
return new HashMap<> (this.datanodesSoftwareVersions);
|
|
|
|
return new HashMap<> (this.datanodesSoftwareVersions);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -747,13 +736,11 @@ public class DatanodeManager {
|
|
|
|
/**
|
|
|
|
/**
|
|
|
|
* Resolve network locations for specified hosts
|
|
|
|
* Resolve network locations for specified hosts
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* @param names
|
|
|
|
|
|
|
|
* @return Network locations if available, Else returns null
|
|
|
|
* @return Network locations if available, Else returns null
|
|
|
|
*/
|
|
|
|
*/
|
|
|
|
public List<String> resolveNetworkLocation(List<String> names) {
|
|
|
|
public List<String> resolveNetworkLocation(List<String> names) {
|
|
|
|
// resolve its network location
|
|
|
|
// resolve its network location
|
|
|
|
List<String> rName = dnsToSwitchMapping.resolve(names);
|
|
|
|
return dnsToSwitchMapping.resolve(names);
|
|
|
|
return rName;
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
/**
|
|
|
@ -807,10 +794,9 @@ public class DatanodeManager {
|
|
|
|
* This is used to not to display a decommissioned datanode to the operators.
|
|
|
|
* This is used to not to display a decommissioned datanode to the operators.
|
|
|
|
* @param nodeList , array list of live or dead nodes.
|
|
|
|
* @param nodeList , array list of live or dead nodes.
|
|
|
|
*/
|
|
|
|
*/
|
|
|
|
private void removeDecomNodeFromList(
|
|
|
|
private static void removeDecomNodeFromList(
|
|
|
|
final List<DatanodeDescriptor> nodeList) {
|
|
|
|
final List<DatanodeDescriptor> nodeList) {
|
|
|
|
Iterator<DatanodeDescriptor> it=null;
|
|
|
|
for (Iterator<DatanodeDescriptor> it = nodeList.iterator(); it.hasNext();) {
|
|
|
|
for (it = nodeList.iterator(); it.hasNext();) {
|
|
|
|
|
|
|
|
DatanodeDescriptor node = it.next();
|
|
|
|
DatanodeDescriptor node = it.next();
|
|
|
|
if (node.isDecommissioned()) {
|
|
|
|
if (node.isDecommissioned()) {
|
|
|
|
it.remove();
|
|
|
|
it.remove();
|
|
|
@ -968,6 +954,7 @@ public class DatanodeManager {
|
|
|
|
|
|
|
|
|
|
|
|
// register new datanode
|
|
|
|
// register new datanode
|
|
|
|
addDatanode(nodeDescr);
|
|
|
|
addDatanode(nodeDescr);
|
|
|
|
|
|
|
|
blockManager.getBlockReportLeaseManager().register(nodeDescr);
|
|
|
|
// also treat the registration message as a heartbeat
|
|
|
|
// also treat the registration message as a heartbeat
|
|
|
|
// no need to update its timestamp
|
|
|
|
// no need to update its timestamp
|
|
|
|
// because its is done when the descriptor is created
|
|
|
|
// because its is done when the descriptor is created
|
|
|
@ -1030,7 +1017,11 @@ public class DatanodeManager {
|
|
|
|
* 4. Removed from exclude --> stop decommission.
|
|
|
|
* 4. Removed from exclude --> stop decommission.
|
|
|
|
*/
|
|
|
|
*/
|
|
|
|
private void refreshDatanodes() {
|
|
|
|
private void refreshDatanodes() {
|
|
|
|
for(DatanodeDescriptor node : datanodeMap.values()) {
|
|
|
|
final Map<String, DatanodeDescriptor> copy;
|
|
|
|
|
|
|
|
synchronized (this) {
|
|
|
|
|
|
|
|
copy = new HashMap<>(datanodeMap);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
for (DatanodeDescriptor node : copy.values()) {
|
|
|
|
// Check if not include.
|
|
|
|
// Check if not include.
|
|
|
|
if (!hostFileManager.isIncluded(node)) {
|
|
|
|
if (!hostFileManager.isIncluded(node)) {
|
|
|
|
node.setDisallowed(true); // case 2.
|
|
|
|
node.setDisallowed(true); // case 2.
|
|
|
@ -1047,7 +1038,7 @@ public class DatanodeManager {
|
|
|
|
/** @return the number of live datanodes. */
|
|
|
|
/** @return the number of live datanodes. */
|
|
|
|
public int getNumLiveDataNodes() {
|
|
|
|
public int getNumLiveDataNodes() {
|
|
|
|
int numLive = 0;
|
|
|
|
int numLive = 0;
|
|
|
|
synchronized (datanodeMap) {
|
|
|
|
synchronized (this) {
|
|
|
|
for(DatanodeDescriptor dn : datanodeMap.values()) {
|
|
|
|
for(DatanodeDescriptor dn : datanodeMap.values()) {
|
|
|
|
if (!isDatanodeDead(dn) ) {
|
|
|
|
if (!isDatanodeDead(dn) ) {
|
|
|
|
numLive++;
|
|
|
|
numLive++;
|
|
|
@ -1252,7 +1243,7 @@ public class DatanodeManager {
|
|
|
|
final HostFileManager.HostSet includedNodes = hostFileManager.getIncludes();
|
|
|
|
final HostFileManager.HostSet includedNodes = hostFileManager.getIncludes();
|
|
|
|
final HostFileManager.HostSet excludedNodes = hostFileManager.getExcludes();
|
|
|
|
final HostFileManager.HostSet excludedNodes = hostFileManager.getExcludes();
|
|
|
|
|
|
|
|
|
|
|
|
synchronized(datanodeMap) {
|
|
|
|
synchronized(this) {
|
|
|
|
nodes = new ArrayList<>(datanodeMap.size());
|
|
|
|
nodes = new ArrayList<>(datanodeMap.size());
|
|
|
|
for (DatanodeDescriptor dn : datanodeMap.values()) {
|
|
|
|
for (DatanodeDescriptor dn : datanodeMap.values()) {
|
|
|
|
final boolean isDead = isDatanodeDead(dn);
|
|
|
|
final boolean isDead = isDatanodeDead(dn);
|
|
|
@ -1327,52 +1318,18 @@ public class DatanodeManager {
|
|
|
|
node.setLastUpdateMonotonic(0);
|
|
|
|
node.setLastUpdateMonotonic(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/** Handle heartbeat from datanodes. */
|
|
|
|
private BlockRecoveryCommand getBlockRecoveryCommand(String blockPoolId,
|
|
|
|
public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg,
|
|
|
|
DatanodeDescriptor nodeinfo) {
|
|
|
|
StorageReport[] reports, final String blockPoolId,
|
|
|
|
|
|
|
|
long cacheCapacity, long cacheUsed, int xceiverCount,
|
|
|
|
|
|
|
|
int maxTransfers, int failedVolumes,
|
|
|
|
|
|
|
|
VolumeFailureSummary volumeFailureSummary) throws IOException {
|
|
|
|
|
|
|
|
synchronized (heartbeatManager) {
|
|
|
|
|
|
|
|
synchronized (datanodeMap) {
|
|
|
|
|
|
|
|
DatanodeDescriptor nodeinfo;
|
|
|
|
|
|
|
|
try {
|
|
|
|
|
|
|
|
nodeinfo = getDatanode(nodeReg);
|
|
|
|
|
|
|
|
} catch(UnregisteredNodeException e) {
|
|
|
|
|
|
|
|
return new DatanodeCommand[]{RegisterCommand.REGISTER};
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Check if this datanode should actually be shutdown instead.
|
|
|
|
|
|
|
|
if (nodeinfo != null && nodeinfo.isDisallowed()) {
|
|
|
|
|
|
|
|
setDatanodeDead(nodeinfo);
|
|
|
|
|
|
|
|
throw new DisallowedDatanodeException(nodeinfo);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (nodeinfo == null || !nodeinfo.isAlive()) {
|
|
|
|
|
|
|
|
return new DatanodeCommand[]{RegisterCommand.REGISTER};
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
heartbeatManager.updateHeartbeat(nodeinfo, reports,
|
|
|
|
|
|
|
|
cacheCapacity, cacheUsed,
|
|
|
|
|
|
|
|
xceiverCount, failedVolumes,
|
|
|
|
|
|
|
|
volumeFailureSummary);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// If we are in safemode, do not send back any recovery / replication
|
|
|
|
|
|
|
|
// requests. Don't even drain the existing queue of work.
|
|
|
|
|
|
|
|
if(namesystem.isInSafeMode()) {
|
|
|
|
|
|
|
|
return new DatanodeCommand[0];
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//check lease recovery
|
|
|
|
|
|
|
|
BlockInfo[] blocks = nodeinfo.getLeaseRecoveryCommand(Integer.MAX_VALUE);
|
|
|
|
BlockInfo[] blocks = nodeinfo.getLeaseRecoveryCommand(Integer.MAX_VALUE);
|
|
|
|
if (blocks != null) {
|
|
|
|
if (blocks == null) {
|
|
|
|
BlockRecoveryCommand brCommand = new BlockRecoveryCommand(
|
|
|
|
return null;
|
|
|
|
blocks.length);
|
|
|
|
}
|
|
|
|
|
|
|
|
BlockRecoveryCommand brCommand = new BlockRecoveryCommand(blocks.length);
|
|
|
|
for (BlockInfo b : blocks) {
|
|
|
|
for (BlockInfo b : blocks) {
|
|
|
|
BlockUnderConstructionFeature uc = b.getUnderConstructionFeature();
|
|
|
|
BlockUnderConstructionFeature uc = b.getUnderConstructionFeature();
|
|
|
|
assert uc != null;
|
|
|
|
assert uc != null;
|
|
|
|
final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations();
|
|
|
|
final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations();
|
|
|
|
// Skip stale nodes during recovery - not heart beated for some time (30s by default).
|
|
|
|
// Skip stale nodes during recovery
|
|
|
|
final List<DatanodeStorageInfo> recoveryLocations =
|
|
|
|
final List<DatanodeStorageInfo> recoveryLocations =
|
|
|
|
new ArrayList<>(storages.length);
|
|
|
|
new ArrayList<>(storages.length);
|
|
|
|
for (DatanodeStorageInfo storage : storages) {
|
|
|
|
for (DatanodeStorageInfo storage : storages) {
|
|
|
@ -1388,72 +1345,50 @@ public class DatanodeManager {
|
|
|
|
ExtendedBlock primaryBlock = (copyOnTruncateRecovery) ?
|
|
|
|
ExtendedBlock primaryBlock = (copyOnTruncateRecovery) ?
|
|
|
|
new ExtendedBlock(blockPoolId, uc.getTruncateBlock()) :
|
|
|
|
new ExtendedBlock(blockPoolId, uc.getTruncateBlock()) :
|
|
|
|
new ExtendedBlock(blockPoolId, b);
|
|
|
|
new ExtendedBlock(blockPoolId, b);
|
|
|
|
// If we only get 1 replica after eliminating stale nodes, then choose all
|
|
|
|
// If we only get 1 replica after eliminating stale nodes, choose all
|
|
|
|
// replicas for recovery and let the primary data node handle failures.
|
|
|
|
// replicas for recovery and let the primary data node handle failures.
|
|
|
|
DatanodeInfo[] recoveryInfos;
|
|
|
|
DatanodeInfo[] recoveryInfos;
|
|
|
|
if (recoveryLocations.size() > 1) {
|
|
|
|
if (recoveryLocations.size() > 1) {
|
|
|
|
if (recoveryLocations.size() != storages.length) {
|
|
|
|
if (recoveryLocations.size() != storages.length) {
|
|
|
|
LOG.info("Skipped stale nodes for recovery : " +
|
|
|
|
LOG.info("Skipped stale nodes for recovery : "
|
|
|
|
(storages.length - recoveryLocations.size()));
|
|
|
|
+ (storages.length - recoveryLocations.size()));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
recoveryInfos =
|
|
|
|
recoveryInfos = DatanodeStorageInfo.toDatanodeInfos(recoveryLocations);
|
|
|
|
DatanodeStorageInfo.toDatanodeInfos(recoveryLocations);
|
|
|
|
|
|
|
|
} else {
|
|
|
|
} else {
|
|
|
|
// If too many replicas are stale, then choose all replicas to participate
|
|
|
|
// If too many replicas are stale, then choose all replicas to
|
|
|
|
// in block recovery.
|
|
|
|
// participate in block recovery.
|
|
|
|
recoveryInfos = DatanodeStorageInfo.toDatanodeInfos(storages);
|
|
|
|
recoveryInfos = DatanodeStorageInfo.toDatanodeInfos(storages);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
RecoveringBlock rBlock;
|
|
|
|
RecoveringBlock rBlock;
|
|
|
|
if(truncateRecovery) {
|
|
|
|
if (truncateRecovery) {
|
|
|
|
Block recoveryBlock = (copyOnTruncateRecovery) ? b :
|
|
|
|
Block recoveryBlock = (copyOnTruncateRecovery) ? b : uc.getTruncateBlock();
|
|
|
|
uc.getTruncateBlock();
|
|
|
|
rBlock = new RecoveringBlock(primaryBlock, recoveryInfos, recoveryBlock);
|
|
|
|
rBlock = new RecoveringBlock(primaryBlock, recoveryInfos,
|
|
|
|
|
|
|
|
recoveryBlock);
|
|
|
|
|
|
|
|
} else {
|
|
|
|
} else {
|
|
|
|
rBlock = new RecoveringBlock(primaryBlock, recoveryInfos,
|
|
|
|
rBlock = new RecoveringBlock(primaryBlock, recoveryInfos,
|
|
|
|
uc.getBlockRecoveryId());
|
|
|
|
uc.getBlockRecoveryId());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
brCommand.add(rBlock);
|
|
|
|
brCommand.add(rBlock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return new DatanodeCommand[] { brCommand };
|
|
|
|
return brCommand;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
final List<DatanodeCommand> cmds = new ArrayList<>();
|
|
|
|
private void addCacheCommands(String blockPoolId, DatanodeDescriptor nodeinfo,
|
|
|
|
//check pending replication
|
|
|
|
List<DatanodeCommand> cmds) {
|
|
|
|
List<BlockTargetPair> pendingList = nodeinfo.getReplicationCommand(
|
|
|
|
|
|
|
|
maxTransfers);
|
|
|
|
|
|
|
|
if (pendingList != null) {
|
|
|
|
|
|
|
|
cmds.add(new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blockPoolId,
|
|
|
|
|
|
|
|
pendingList));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// checking pending erasure coding tasks
|
|
|
|
|
|
|
|
List<BlockECRecoveryInfo> pendingECList =
|
|
|
|
|
|
|
|
nodeinfo.getErasureCodeCommand(maxTransfers);
|
|
|
|
|
|
|
|
if (pendingECList != null) {
|
|
|
|
|
|
|
|
cmds.add(new BlockECRecoveryCommand(DatanodeProtocol.DNA_ERASURE_CODING_RECOVERY,
|
|
|
|
|
|
|
|
pendingECList));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
//check block invalidation
|
|
|
|
|
|
|
|
Block[] blks = nodeinfo.getInvalidateBlocks(blockInvalidateLimit);
|
|
|
|
|
|
|
|
if (blks != null) {
|
|
|
|
|
|
|
|
cmds.add(new BlockCommand(DatanodeProtocol.DNA_INVALIDATE,
|
|
|
|
|
|
|
|
blockPoolId, blks));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
boolean sendingCachingCommands = false;
|
|
|
|
boolean sendingCachingCommands = false;
|
|
|
|
long nowMs = monotonicNow();
|
|
|
|
final long nowMs = monotonicNow();
|
|
|
|
if (shouldSendCachingCommands &&
|
|
|
|
if (shouldSendCachingCommands &&
|
|
|
|
((nowMs - nodeinfo.getLastCachingDirectiveSentTimeMs()) >=
|
|
|
|
((nowMs - nodeinfo.getLastCachingDirectiveSentTimeMs()) >=
|
|
|
|
timeBetweenResendingCachingDirectivesMs)) {
|
|
|
|
timeBetweenResendingCachingDirectivesMs)) {
|
|
|
|
DatanodeCommand pendingCacheCommand =
|
|
|
|
DatanodeCommand pendingCacheCommand = getCacheCommand(
|
|
|
|
getCacheCommand(nodeinfo.getPendingCached(), nodeinfo,
|
|
|
|
nodeinfo.getPendingCached(), DatanodeProtocol.DNA_CACHE,
|
|
|
|
DatanodeProtocol.DNA_CACHE, blockPoolId);
|
|
|
|
blockPoolId);
|
|
|
|
if (pendingCacheCommand != null) {
|
|
|
|
if (pendingCacheCommand != null) {
|
|
|
|
cmds.add(pendingCacheCommand);
|
|
|
|
cmds.add(pendingCacheCommand);
|
|
|
|
sendingCachingCommands = true;
|
|
|
|
sendingCachingCommands = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
DatanodeCommand pendingUncacheCommand =
|
|
|
|
DatanodeCommand pendingUncacheCommand = getCacheCommand(
|
|
|
|
getCacheCommand(nodeinfo.getPendingUncached(), nodeinfo,
|
|
|
|
nodeinfo.getPendingUncached(), DatanodeProtocol.DNA_UNCACHE,
|
|
|
|
DatanodeProtocol.DNA_UNCACHE, blockPoolId);
|
|
|
|
blockPoolId);
|
|
|
|
if (pendingUncacheCommand != null) {
|
|
|
|
if (pendingUncacheCommand != null) {
|
|
|
|
cmds.add(pendingUncacheCommand);
|
|
|
|
cmds.add(pendingUncacheCommand);
|
|
|
|
sendingCachingCommands = true;
|
|
|
|
sendingCachingCommands = true;
|
|
|
@ -1462,7 +1397,70 @@ public class DatanodeManager {
|
|
|
|
nodeinfo.setLastCachingDirectiveSentTimeMs(nowMs);
|
|
|
|
nodeinfo.setLastCachingDirectiveSentTimeMs(nowMs);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/** Handle heartbeat from datanodes. */
|
|
|
|
|
|
|
|
public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg,
|
|
|
|
|
|
|
|
StorageReport[] reports, final String blockPoolId,
|
|
|
|
|
|
|
|
long cacheCapacity, long cacheUsed, int xceiverCount,
|
|
|
|
|
|
|
|
int maxTransfers, int failedVolumes,
|
|
|
|
|
|
|
|
VolumeFailureSummary volumeFailureSummary) throws IOException {
|
|
|
|
|
|
|
|
final DatanodeDescriptor nodeinfo;
|
|
|
|
|
|
|
|
try {
|
|
|
|
|
|
|
|
nodeinfo = getDatanode(nodeReg);
|
|
|
|
|
|
|
|
} catch (UnregisteredNodeException e) {
|
|
|
|
|
|
|
|
return new DatanodeCommand[]{RegisterCommand.REGISTER};
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Check if this datanode should actually be shutdown instead.
|
|
|
|
|
|
|
|
if (nodeinfo != null && nodeinfo.isDisallowed()) {
|
|
|
|
|
|
|
|
setDatanodeDead(nodeinfo);
|
|
|
|
|
|
|
|
throw new DisallowedDatanodeException(nodeinfo);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (nodeinfo == null || !nodeinfo.isAlive()) {
|
|
|
|
|
|
|
|
return new DatanodeCommand[]{RegisterCommand.REGISTER};
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
heartbeatManager.updateHeartbeat(nodeinfo, reports, cacheCapacity,
|
|
|
|
|
|
|
|
cacheUsed, xceiverCount, failedVolumes, volumeFailureSummary);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// If we are in safemode, do not send back any recovery / replication
|
|
|
|
|
|
|
|
// requests. Don't even drain the existing queue of work.
|
|
|
|
|
|
|
|
if (namesystem.isInSafeMode()) {
|
|
|
|
|
|
|
|
return new DatanodeCommand[0];
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// block recovery command
|
|
|
|
|
|
|
|
final BlockRecoveryCommand brCommand = getBlockRecoveryCommand(blockPoolId,
|
|
|
|
|
|
|
|
nodeinfo);
|
|
|
|
|
|
|
|
if (brCommand != null) {
|
|
|
|
|
|
|
|
return new DatanodeCommand[]{brCommand};
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
final List<DatanodeCommand> cmds = new ArrayList<>();
|
|
|
|
|
|
|
|
// check pending replication
|
|
|
|
|
|
|
|
List<BlockTargetPair> pendingList = nodeinfo.getReplicationCommand(
|
|
|
|
|
|
|
|
maxTransfers);
|
|
|
|
|
|
|
|
if (pendingList != null) {
|
|
|
|
|
|
|
|
cmds.add(new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blockPoolId,
|
|
|
|
|
|
|
|
pendingList));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// check pending erasure coding tasks
|
|
|
|
|
|
|
|
List<BlockECRecoveryInfo> pendingECList = nodeinfo.getErasureCodeCommand(
|
|
|
|
|
|
|
|
maxTransfers);
|
|
|
|
|
|
|
|
if (pendingECList != null) {
|
|
|
|
|
|
|
|
cmds.add(new BlockECRecoveryCommand(DNA_ERASURE_CODING_RECOVERY,
|
|
|
|
|
|
|
|
pendingECList));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// check block invalidation
|
|
|
|
|
|
|
|
Block[] blks = nodeinfo.getInvalidateBlocks(blockInvalidateLimit);
|
|
|
|
|
|
|
|
if (blks != null) {
|
|
|
|
|
|
|
|
cmds.add(new BlockCommand(DatanodeProtocol.DNA_INVALIDATE, blockPoolId,
|
|
|
|
|
|
|
|
blks));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// cache commands
|
|
|
|
|
|
|
|
addCacheCommands(blockPoolId, nodeinfo, cmds);
|
|
|
|
|
|
|
|
// key update command
|
|
|
|
blockManager.addKeyUpdateCommand(cmds, nodeinfo);
|
|
|
|
blockManager.addKeyUpdateCommand(cmds, nodeinfo);
|
|
|
|
|
|
|
|
|
|
|
|
// check for balancer bandwidth update
|
|
|
|
// check for balancer bandwidth update
|
|
|
@ -1475,8 +1473,6 @@ public class DatanodeManager {
|
|
|
|
if (!cmds.isEmpty()) {
|
|
|
|
if (!cmds.isEmpty()) {
|
|
|
|
return cmds.toArray(new DatanodeCommand[cmds.size()]);
|
|
|
|
return cmds.toArray(new DatanodeCommand[cmds.size()]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return new DatanodeCommand[0];
|
|
|
|
return new DatanodeCommand[0];
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -1486,14 +1482,13 @@ public class DatanodeManager {
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* @param list The {@link CachedBlocksList}. This function
|
|
|
|
* @param list The {@link CachedBlocksList}. This function
|
|
|
|
* clears the list.
|
|
|
|
* clears the list.
|
|
|
|
* @param datanode The datanode.
|
|
|
|
|
|
|
|
* @param action The action to perform in the command.
|
|
|
|
* @param action The action to perform in the command.
|
|
|
|
* @param poolId The block pool id.
|
|
|
|
* @param poolId The block pool id.
|
|
|
|
* @return A DatanodeCommand to be sent back to the DN, or null if
|
|
|
|
* @return A DatanodeCommand to be sent back to the DN, or null if
|
|
|
|
* there is nothing to be done.
|
|
|
|
* there is nothing to be done.
|
|
|
|
*/
|
|
|
|
*/
|
|
|
|
private DatanodeCommand getCacheCommand(CachedBlocksList list,
|
|
|
|
private DatanodeCommand getCacheCommand(CachedBlocksList list, int action,
|
|
|
|
DatanodeDescriptor datanode, int action, String poolId) {
|
|
|
|
String poolId) {
|
|
|
|
int length = list.size();
|
|
|
|
int length = list.size();
|
|
|
|
if (length == 0) {
|
|
|
|
if (length == 0) {
|
|
|
|
return null;
|
|
|
|
return null;
|
|
|
@ -1501,9 +1496,7 @@ public class DatanodeManager {
|
|
|
|
// Read the existing cache commands.
|
|
|
|
// Read the existing cache commands.
|
|
|
|
long[] blockIds = new long[length];
|
|
|
|
long[] blockIds = new long[length];
|
|
|
|
int i = 0;
|
|
|
|
int i = 0;
|
|
|
|
for (Iterator<CachedBlock> iter = list.iterator();
|
|
|
|
for (CachedBlock cachedBlock : list) {
|
|
|
|
iter.hasNext(); ) {
|
|
|
|
|
|
|
|
CachedBlock cachedBlock = iter.next();
|
|
|
|
|
|
|
|
blockIds[i++] = cachedBlock.getBlockId();
|
|
|
|
blockIds[i++] = cachedBlock.getBlockId();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return new BlockIdCommand(action, poolId, blockIds);
|
|
|
|
return new BlockIdCommand(action, poolId, blockIds);
|
|
|
@ -1524,7 +1517,7 @@ public class DatanodeManager {
|
|
|
|
* @throws IOException
|
|
|
|
* @throws IOException
|
|
|
|
*/
|
|
|
|
*/
|
|
|
|
public void setBalancerBandwidth(long bandwidth) throws IOException {
|
|
|
|
public void setBalancerBandwidth(long bandwidth) throws IOException {
|
|
|
|
synchronized(datanodeMap) {
|
|
|
|
synchronized(this) {
|
|
|
|
for (DatanodeDescriptor nodeInfo : datanodeMap.values()) {
|
|
|
|
for (DatanodeDescriptor nodeInfo : datanodeMap.values()) {
|
|
|
|
nodeInfo.setBalancerBandwidth(bandwidth);
|
|
|
|
nodeInfo.setBalancerBandwidth(bandwidth);
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -1533,7 +1526,7 @@ public class DatanodeManager {
|
|
|
|
|
|
|
|
|
|
|
|
public void markAllDatanodesStale() {
|
|
|
|
public void markAllDatanodesStale() {
|
|
|
|
LOG.info("Marking all datandoes as stale");
|
|
|
|
LOG.info("Marking all datandoes as stale");
|
|
|
|
synchronized (datanodeMap) {
|
|
|
|
synchronized (this) {
|
|
|
|
for (DatanodeDescriptor dn : datanodeMap.values()) {
|
|
|
|
for (DatanodeDescriptor dn : datanodeMap.values()) {
|
|
|
|
for(DatanodeStorageInfo storage : dn.getStorageInfos()) {
|
|
|
|
for(DatanodeStorageInfo storage : dn.getStorageInfos()) {
|
|
|
|
storage.markStaleAfterFailover();
|
|
|
|
storage.markStaleAfterFailover();
|
|
|
@ -1548,7 +1541,7 @@ public class DatanodeManager {
|
|
|
|
* recoveries, and replication requests.
|
|
|
|
* recoveries, and replication requests.
|
|
|
|
*/
|
|
|
|
*/
|
|
|
|
public void clearPendingQueues() {
|
|
|
|
public void clearPendingQueues() {
|
|
|
|
synchronized (datanodeMap) {
|
|
|
|
synchronized (this) {
|
|
|
|
for (DatanodeDescriptor dn : datanodeMap.values()) {
|
|
|
|
for (DatanodeDescriptor dn : datanodeMap.values()) {
|
|
|
|
dn.clearBlockQueues();
|
|
|
|
dn.clearBlockQueues();
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -1560,7 +1553,7 @@ public class DatanodeManager {
|
|
|
|
* know about.
|
|
|
|
* know about.
|
|
|
|
*/
|
|
|
|
*/
|
|
|
|
public void resetLastCachingDirectiveSentTime() {
|
|
|
|
public void resetLastCachingDirectiveSentTime() {
|
|
|
|
synchronized (datanodeMap) {
|
|
|
|
synchronized (this) {
|
|
|
|
for (DatanodeDescriptor dn : datanodeMap.values()) {
|
|
|
|
for (DatanodeDescriptor dn : datanodeMap.values()) {
|
|
|
|
dn.setLastCachingDirectiveSentTimeMs(0L);
|
|
|
|
dn.setLastCachingDirectiveSentTimeMs(0L);
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -1573,11 +1566,13 @@ public class DatanodeManager {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
public void clearPendingCachingCommands() {
|
|
|
|
public void clearPendingCachingCommands() {
|
|
|
|
|
|
|
|
synchronized (this) {
|
|
|
|
for (DatanodeDescriptor dn : datanodeMap.values()) {
|
|
|
|
for (DatanodeDescriptor dn : datanodeMap.values()) {
|
|
|
|
dn.getPendingCached().clear();
|
|
|
|
dn.getPendingCached().clear();
|
|
|
|
dn.getPendingUncached().clear();
|
|
|
|
dn.getPendingUncached().clear();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
public void setShouldSendCachingCommands(boolean shouldSendCachingCommands) {
|
|
|
|
public void setShouldSendCachingCommands(boolean shouldSendCachingCommands) {
|
|
|
|
this.shouldSendCachingCommands = shouldSendCachingCommands;
|
|
|
|
this.shouldSendCachingCommands = shouldSendCachingCommands;
|
|
|
|