HDFS-5306. Datanode https port is not available at the namenode. Contributed by Suresh Srinivas.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1529562 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
52b0ce3565
commit
8e08046661
|
@ -397,6 +397,9 @@ Release 2.1.2 - UNRELEASED
|
||||||
HDFS-5300. FSNameSystem#deleteSnapshot() should not check owner in case of
|
HDFS-5300. FSNameSystem#deleteSnapshot() should not check owner in case of
|
||||||
permissions disabled. (Vinay via jing9)
|
permissions disabled. (Vinay via jing9)
|
||||||
|
|
||||||
|
HDFS-5306. Datanode https port is not available at the namenode. (Suresh
|
||||||
|
Srinivas via brandonli)
|
||||||
|
|
||||||
Release 2.1.1-beta - 2013-09-23
|
Release 2.1.1-beta - 2013-09-23
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -43,6 +43,7 @@ public class DatanodeID implements Comparable<DatanodeID> {
|
||||||
private String storageID; // unique per cluster storageID
|
private String storageID; // unique per cluster storageID
|
||||||
private int xferPort; // data streaming port
|
private int xferPort; // data streaming port
|
||||||
private int infoPort; // info server port
|
private int infoPort; // info server port
|
||||||
|
private int infoSecurePort; // info server port
|
||||||
private int ipcPort; // IPC server port
|
private int ipcPort; // IPC server port
|
||||||
|
|
||||||
public DatanodeID(DatanodeID from) {
|
public DatanodeID(DatanodeID from) {
|
||||||
|
@ -51,6 +52,7 @@ public class DatanodeID implements Comparable<DatanodeID> {
|
||||||
from.getStorageID(),
|
from.getStorageID(),
|
||||||
from.getXferPort(),
|
from.getXferPort(),
|
||||||
from.getInfoPort(),
|
from.getInfoPort(),
|
||||||
|
from.getInfoSecurePort(),
|
||||||
from.getIpcPort());
|
from.getIpcPort());
|
||||||
this.peerHostName = from.getPeerHostName();
|
this.peerHostName = from.getPeerHostName();
|
||||||
}
|
}
|
||||||
|
@ -65,12 +67,13 @@ public class DatanodeID implements Comparable<DatanodeID> {
|
||||||
* @param ipcPort ipc server port
|
* @param ipcPort ipc server port
|
||||||
*/
|
*/
|
||||||
public DatanodeID(String ipAddr, String hostName, String storageID,
|
public DatanodeID(String ipAddr, String hostName, String storageID,
|
||||||
int xferPort, int infoPort, int ipcPort) {
|
int xferPort, int infoPort, int infoSecurePort, int ipcPort) {
|
||||||
this.ipAddr = ipAddr;
|
this.ipAddr = ipAddr;
|
||||||
this.hostName = hostName;
|
this.hostName = hostName;
|
||||||
this.storageID = storageID;
|
this.storageID = storageID;
|
||||||
this.xferPort = xferPort;
|
this.xferPort = xferPort;
|
||||||
this.infoPort = infoPort;
|
this.infoPort = infoPort;
|
||||||
|
this.infoSecurePort = infoSecurePort;
|
||||||
this.ipcPort = ipcPort;
|
this.ipcPort = ipcPort;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -128,6 +131,13 @@ public class DatanodeID implements Comparable<DatanodeID> {
|
||||||
return ipAddr + ":" + infoPort;
|
return ipAddr + ":" + infoPort;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return IP:infoPort string
|
||||||
|
*/
|
||||||
|
public String getInfoSecureAddr() {
|
||||||
|
return ipAddr + ":" + infoSecurePort;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return hostname:xferPort
|
* @return hostname:xferPort
|
||||||
*/
|
*/
|
||||||
|
@ -179,6 +189,13 @@ public class DatanodeID implements Comparable<DatanodeID> {
|
||||||
return infoPort;
|
return infoPort;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return infoSecurePort (the port at which the HTTPS server bound to)
|
||||||
|
*/
|
||||||
|
public int getInfoSecurePort() {
|
||||||
|
return infoSecurePort;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return ipcPort (the port at which the IPC server bound to)
|
* @return ipcPort (the port at which the IPC server bound to)
|
||||||
*/
|
*/
|
||||||
|
@ -218,13 +235,14 @@ public class DatanodeID implements Comparable<DatanodeID> {
|
||||||
peerHostName = nodeReg.getPeerHostName();
|
peerHostName = nodeReg.getPeerHostName();
|
||||||
xferPort = nodeReg.getXferPort();
|
xferPort = nodeReg.getXferPort();
|
||||||
infoPort = nodeReg.getInfoPort();
|
infoPort = nodeReg.getInfoPort();
|
||||||
|
infoSecurePort = nodeReg.getInfoSecurePort();
|
||||||
ipcPort = nodeReg.getIpcPort();
|
ipcPort = nodeReg.getIpcPort();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Compare based on data transfer address.
|
* Compare based on data transfer address.
|
||||||
*
|
*
|
||||||
* @param that
|
* @param that datanode to compare with
|
||||||
* @return as specified by Comparable
|
* @return as specified by Comparable
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -17,10 +17,6 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.protocol;
|
package org.apache.hadoop.hdfs.protocol;
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.DFSUtil.percent2String;
|
|
||||||
|
|
||||||
import java.util.Date;
|
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
@ -32,6 +28,10 @@ import org.apache.hadoop.net.NodeBase;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
|
|
||||||
|
import java.util.Date;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.hdfs.DFSUtil.percent2String;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This class extends the primary identifier of a Datanode with ephemeral
|
* This class extends the primary identifier of a Datanode with ephemeral
|
||||||
* state, eg usage information, current administrative state, and the
|
* state, eg usage information, current administrative state, and the
|
||||||
|
@ -108,18 +108,21 @@ public class DatanodeInfo extends DatanodeID implements Node {
|
||||||
final long capacity, final long dfsUsed, final long remaining,
|
final long capacity, final long dfsUsed, final long remaining,
|
||||||
final long blockPoolUsed, final long lastUpdate, final int xceiverCount,
|
final long blockPoolUsed, final long lastUpdate, final int xceiverCount,
|
||||||
final AdminStates adminState) {
|
final AdminStates adminState) {
|
||||||
this(nodeID.getIpAddr(), nodeID.getHostName(), nodeID.getStorageID(), nodeID.getXferPort(),
|
this(nodeID.getIpAddr(), nodeID.getHostName(), nodeID.getStorageID(),
|
||||||
nodeID.getInfoPort(), nodeID.getIpcPort(), capacity, dfsUsed, remaining,
|
nodeID.getXferPort(), nodeID.getInfoPort(), nodeID.getInfoSecurePort(),
|
||||||
blockPoolUsed, lastUpdate, xceiverCount, location, adminState);
|
nodeID.getIpcPort(), capacity, dfsUsed, remaining, blockPoolUsed,
|
||||||
|
lastUpdate, xceiverCount, location, adminState);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Constructor */
|
/** Constructor */
|
||||||
public DatanodeInfo(final String ipAddr, final String hostName,
|
public DatanodeInfo(final String ipAddr, final String hostName,
|
||||||
final String storageID, final int xferPort, final int infoPort, final int ipcPort,
|
final String storageID, final int xferPort, final int infoPort,
|
||||||
|
final int infoSecurePort, final int ipcPort,
|
||||||
final long capacity, final long dfsUsed, final long remaining,
|
final long capacity, final long dfsUsed, final long remaining,
|
||||||
final long blockPoolUsed, final long lastUpdate, final int xceiverCount,
|
final long blockPoolUsed, final long lastUpdate, final int xceiverCount,
|
||||||
final String networkLocation, final AdminStates adminState) {
|
final String networkLocation, final AdminStates adminState) {
|
||||||
super(ipAddr, hostName, storageID, xferPort, infoPort, ipcPort);
|
super(ipAddr, hostName, storageID, xferPort, infoPort,
|
||||||
|
infoSecurePort, ipcPort);
|
||||||
this.capacity = capacity;
|
this.capacity = capacity;
|
||||||
this.dfsUsed = dfsUsed;
|
this.dfsUsed = dfsUsed;
|
||||||
this.remaining = remaining;
|
this.remaining = remaining;
|
||||||
|
|
|
@ -223,7 +223,8 @@ public class PBHelper {
|
||||||
// DatanodeId
|
// DatanodeId
|
||||||
public static DatanodeID convert(DatanodeIDProto dn) {
|
public static DatanodeID convert(DatanodeIDProto dn) {
|
||||||
return new DatanodeID(dn.getIpAddr(), dn.getHostName(), dn.getStorageID(),
|
return new DatanodeID(dn.getIpAddr(), dn.getHostName(), dn.getStorageID(),
|
||||||
dn.getXferPort(), dn.getInfoPort(), dn.getIpcPort());
|
dn.getXferPort(), dn.getInfoPort(), dn.hasInfoSecurePort() ? dn
|
||||||
|
.getInfoSecurePort() : 0, dn.getIpcPort());
|
||||||
}
|
}
|
||||||
|
|
||||||
public static DatanodeIDProto convert(DatanodeID dn) {
|
public static DatanodeIDProto convert(DatanodeID dn) {
|
||||||
|
@ -233,6 +234,7 @@ public class PBHelper {
|
||||||
.setStorageID(dn.getStorageID())
|
.setStorageID(dn.getStorageID())
|
||||||
.setXferPort(dn.getXferPort())
|
.setXferPort(dn.getXferPort())
|
||||||
.setInfoPort(dn.getInfoPort())
|
.setInfoPort(dn.getInfoPort())
|
||||||
|
.setInfoSecurePort(dn.getInfoSecurePort())
|
||||||
.setIpcPort(dn.getIpcPort()).build();
|
.setIpcPort(dn.getIpcPort()).build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,21 +17,9 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.blockmanagement;
|
package org.apache.hadoop.hdfs.server.blockmanagement;
|
||||||
|
|
||||||
import static org.apache.hadoop.util.Time.now;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
import java.io.IOException;
|
import com.google.common.net.InetAddresses;
|
||||||
import java.io.PrintWriter;
|
|
||||||
import java.net.InetAddress;
|
|
||||||
import java.net.UnknownHostException;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.Comparator;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.Iterator;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.NavigableMap;
|
|
||||||
import java.util.TreeMap;
|
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
|
@ -41,13 +29,8 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.*;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
|
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.HostFileManager;
|
import org.apache.hadoop.hdfs.server.namenode.HostFileManager;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.HostFileManager.Entry;
|
import org.apache.hadoop.hdfs.server.namenode.HostFileManager.Entry;
|
||||||
|
@ -55,32 +38,23 @@ import org.apache.hadoop.hdfs.server.namenode.HostFileManager.EntrySet;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.HostFileManager.MutableEntrySet;
|
import org.apache.hadoop.hdfs.server.namenode.HostFileManager.MutableEntrySet;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
|
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand;
|
import org.apache.hadoop.hdfs.server.protocol.*;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
|
|
||||||
import org.apache.hadoop.hdfs.util.CyclicIteration;
|
import org.apache.hadoop.hdfs.util.CyclicIteration;
|
||||||
import org.apache.hadoop.ipc.Server;
|
import org.apache.hadoop.ipc.Server;
|
||||||
import org.apache.hadoop.net.CachedDNSToSwitchMapping;
|
import org.apache.hadoop.net.*;
|
||||||
import org.apache.hadoop.net.DNSToSwitchMapping;
|
|
||||||
import org.apache.hadoop.net.NetUtils;
|
|
||||||
import org.apache.hadoop.net.NetworkTopology;
|
|
||||||
import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
|
import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
|
||||||
import org.apache.hadoop.net.Node;
|
|
||||||
import org.apache.hadoop.net.NodeBase;
|
|
||||||
import org.apache.hadoop.net.ScriptBasedMapping;
|
|
||||||
import org.apache.hadoop.util.Daemon;
|
import org.apache.hadoop.util.Daemon;
|
||||||
import org.apache.hadoop.util.ReflectionUtils;
|
import org.apache.hadoop.util.ReflectionUtils;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import java.io.IOException;
|
||||||
import com.google.common.base.Preconditions;
|
import java.io.PrintWriter;
|
||||||
import com.google.common.net.InetAddresses;
|
import java.net.InetAddress;
|
||||||
|
import java.net.UnknownHostException;
|
||||||
|
import java.util.*;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.util.Time.now;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Manage datanodes, include decommission and other activities.
|
* Manage datanodes, include decommission and other activities.
|
||||||
|
@ -127,6 +101,8 @@ public class DatanodeManager {
|
||||||
|
|
||||||
private final int defaultInfoPort;
|
private final int defaultInfoPort;
|
||||||
|
|
||||||
|
private final int defaultInfoSecurePort;
|
||||||
|
|
||||||
private final int defaultIpcPort;
|
private final int defaultIpcPort;
|
||||||
|
|
||||||
/** Read include/exclude files*/
|
/** Read include/exclude files*/
|
||||||
|
@ -188,7 +164,10 @@ public class DatanodeManager {
|
||||||
DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT)).getPort();
|
DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT)).getPort();
|
||||||
this.defaultInfoPort = NetUtils.createSocketAddr(
|
this.defaultInfoPort = NetUtils.createSocketAddr(
|
||||||
conf.get(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY,
|
conf.get(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY,
|
||||||
DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_DEFAULT)).getPort();
|
DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_DEFAULT)).getPort();
|
||||||
|
this.defaultInfoSecurePort = NetUtils.createSocketAddr(
|
||||||
|
conf.get(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY,
|
||||||
|
DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_DEFAULT)).getPort();
|
||||||
this.defaultIpcPort = NetUtils.createSocketAddr(
|
this.defaultIpcPort = NetUtils.createSocketAddr(
|
||||||
conf.get(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY,
|
conf.get(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY,
|
||||||
DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort();
|
DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort();
|
||||||
|
@ -1128,6 +1107,7 @@ public class DatanodeManager {
|
||||||
// The IP:port is sufficient for listing in a report
|
// The IP:port is sufficient for listing in a report
|
||||||
dnId = new DatanodeID(hostStr, "", "", port,
|
dnId = new DatanodeID(hostStr, "", "", port,
|
||||||
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
||||||
|
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
|
||||||
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
|
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
|
||||||
} else {
|
} else {
|
||||||
String ipAddr = "";
|
String ipAddr = "";
|
||||||
|
@ -1138,6 +1118,7 @@ public class DatanodeManager {
|
||||||
}
|
}
|
||||||
dnId = new DatanodeID(ipAddr, hostStr, "", port,
|
dnId = new DatanodeID(ipAddr, hostStr, "", port,
|
||||||
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
||||||
|
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
|
||||||
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
|
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
|
||||||
}
|
}
|
||||||
return dnId;
|
return dnId;
|
||||||
|
@ -1185,7 +1166,7 @@ public class DatanodeManager {
|
||||||
new DatanodeDescriptor(new DatanodeID(entry.getIpAddress(),
|
new DatanodeDescriptor(new DatanodeID(entry.getIpAddress(),
|
||||||
entry.getPrefix(), "",
|
entry.getPrefix(), "",
|
||||||
entry.getPort() == 0 ? defaultXferPort : entry.getPort(),
|
entry.getPort() == 0 ? defaultXferPort : entry.getPort(),
|
||||||
defaultInfoPort, defaultIpcPort));
|
defaultInfoPort, defaultInfoSecurePort, defaultIpcPort));
|
||||||
dn.setLastUpdate(0); // Consider this node dead for reporting
|
dn.setLastUpdate(0); // Consider this node dead for reporting
|
||||||
nodes.add(dn);
|
nodes.add(dn);
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,25 +18,7 @@
|
||||||
|
|
||||||
package org.apache.hadoop.hdfs.server.common;
|
package org.apache.hadoop.hdfs.server.common;
|
||||||
|
|
||||||
import java.io.ByteArrayInputStream;
|
import com.google.common.base.Charsets;
|
||||||
import java.io.DataInputStream;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.UnsupportedEncodingException;
|
|
||||||
import java.net.InetSocketAddress;
|
|
||||||
import java.net.Socket;
|
|
||||||
import java.net.URL;
|
|
||||||
import java.net.URLEncoder;
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.Comparator;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.TreeSet;
|
|
||||||
|
|
||||||
import javax.servlet.ServletContext;
|
|
||||||
import javax.servlet.http.HttpServletRequest;
|
|
||||||
import javax.servlet.jsp.JspWriter;
|
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
@ -47,13 +29,9 @@ import org.apache.hadoop.hdfs.BlockReaderFactory;
|
||||||
import org.apache.hadoop.hdfs.DFSClient;
|
import org.apache.hadoop.hdfs.DFSClient;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.net.TcpPeerServer;
|
import org.apache.hadoop.hdfs.net.TcpPeerServer;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.*;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
|
||||||
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
|
||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
|
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
|
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
|
||||||
|
@ -74,10 +52,21 @@ import org.apache.hadoop.security.authorize.ProxyUsers;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.util.VersionInfo;
|
import org.apache.hadoop.util.VersionInfo;
|
||||||
|
|
||||||
import com.google.common.base.Charsets;
|
import javax.servlet.ServletContext;
|
||||||
|
import javax.servlet.http.HttpServletRequest;
|
||||||
|
import javax.servlet.jsp.JspWriter;
|
||||||
|
import java.io.ByteArrayInputStream;
|
||||||
|
import java.io.DataInputStream;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.UnsupportedEncodingException;
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
|
import java.net.Socket;
|
||||||
|
import java.net.URL;
|
||||||
|
import java.net.URLEncoder;
|
||||||
|
import java.util.*;
|
||||||
|
|
||||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
|
|
||||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER;
|
import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER;
|
||||||
|
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
|
||||||
|
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class JspHelper {
|
public class JspHelper {
|
||||||
|
@ -217,7 +206,7 @@ public class JspHelper {
|
||||||
offsetIntoBlock, amtToRead, true,
|
offsetIntoBlock, amtToRead, true,
|
||||||
"JspHelper", TcpPeerServer.peerFromSocketAndKey(s, encryptionKey),
|
"JspHelper", TcpPeerServer.peerFromSocketAndKey(s, encryptionKey),
|
||||||
new DatanodeID(addr.getAddress().getHostAddress(),
|
new DatanodeID(addr.getAddress().getHostAddress(),
|
||||||
addr.getHostName(), poolId, addr.getPort(), 0, 0), null,
|
addr.getHostName(), poolId, addr.getPort(), 0, 0, 0), null,
|
||||||
null, null, false, CachingStrategy.newDefaultStrategy());
|
null, null, false, CachingStrategy.newDefaultStrategy());
|
||||||
|
|
||||||
final byte[] buf = new byte[amtToRead];
|
final byte[] buf = new byte[amtToRead];
|
||||||
|
|
|
@ -18,66 +18,10 @@
|
||||||
package org.apache.hadoop.hdfs.server.datanode;
|
package org.apache.hadoop.hdfs.server.datanode;
|
||||||
|
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT;
|
import com.google.common.base.Joiner;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY;
|
import com.google.common.base.Preconditions;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT;
|
import com.google.protobuf.BlockingService;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_DEFAULT;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_DEFAULT;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_DEFAULT;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_DEFAULT;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PLUGINS_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTPS_ENABLE_KEY;
|
|
||||||
import static org.apache.hadoop.util.ExitUtil.terminate;
|
|
||||||
|
|
||||||
import java.io.BufferedOutputStream;
|
|
||||||
import java.io.ByteArrayInputStream;
|
|
||||||
import java.io.DataInputStream;
|
|
||||||
import java.io.DataOutputStream;
|
|
||||||
import java.io.File;
|
|
||||||
import java.io.FileInputStream;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.InputStream;
|
|
||||||
import java.io.OutputStream;
|
|
||||||
import java.io.PrintStream;
|
|
||||||
import java.net.InetSocketAddress;
|
|
||||||
import java.net.Socket;
|
|
||||||
import java.net.SocketException;
|
|
||||||
import java.net.SocketTimeoutException;
|
|
||||||
import java.net.URI;
|
|
||||||
import java.net.UnknownHostException;
|
|
||||||
import java.nio.channels.ClosedByInterruptException;
|
|
||||||
import java.nio.channels.SocketChannel;
|
|
||||||
import java.security.PrivilegedExceptionAction;
|
|
||||||
import java.util.AbstractList;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.EnumSet;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Set;
|
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
@ -94,37 +38,15 @@ import org.apache.hadoop.hdfs.HDFSPolicyProvider;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.net.DomainPeerServer;
|
import org.apache.hadoop.hdfs.net.DomainPeerServer;
|
||||||
import org.apache.hadoop.hdfs.net.TcpPeerServer;
|
import org.apache.hadoop.hdfs.net.TcpPeerServer;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.*;
|
||||||
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.*;
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InterDatanodeProtocolService;
|
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InterDatanodeProtocolService;
|
||||||
import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolPB;
|
import org.apache.hadoop.hdfs.protocolPB.*;
|
||||||
import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolServerSideTranslatorPB;
|
import org.apache.hadoop.hdfs.security.token.block.*;
|
||||||
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
|
|
||||||
import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolPB;
|
|
||||||
import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolServerSideTranslatorPB;
|
|
||||||
import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB;
|
|
||||||
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
|
|
||||||
import org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager;
|
|
||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
|
||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
|
|
||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode;
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
|
||||||
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
|
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
|
@ -139,11 +61,7 @@ import org.apache.hadoop.hdfs.server.datanode.web.resources.DatanodeWebHdfsMetho
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets;
|
import org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.StreamFile;
|
import org.apache.hadoop.hdfs.server.namenode.StreamFile;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
|
import org.apache.hadoop.hdfs.server.protocol.*;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
|
|
||||||
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
||||||
import org.apache.hadoop.hdfs.web.resources.Param;
|
import org.apache.hadoop.hdfs.web.resources.Param;
|
||||||
import org.apache.hadoop.http.HttpServer;
|
import org.apache.hadoop.http.HttpServer;
|
||||||
|
@ -164,22 +82,21 @@ import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
||||||
import org.apache.hadoop.security.authorize.AccessControlList;
|
import org.apache.hadoop.security.authorize.AccessControlList;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.security.token.TokenIdentifier;
|
import org.apache.hadoop.security.token.TokenIdentifier;
|
||||||
import org.apache.hadoop.util.Daemon;
|
import org.apache.hadoop.util.*;
|
||||||
import org.apache.hadoop.util.DiskChecker;
|
|
||||||
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
||||||
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
|
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
|
||||||
import org.apache.hadoop.util.GenericOptionsParser;
|
|
||||||
import org.apache.hadoop.util.JvmPauseMonitor;
|
|
||||||
import org.apache.hadoop.util.ServicePlugin;
|
|
||||||
import org.apache.hadoop.util.StringUtils;
|
|
||||||
import org.apache.hadoop.util.Time;
|
|
||||||
import org.apache.hadoop.util.VersionInfo;
|
|
||||||
import org.mortbay.util.ajax.JSON;
|
import org.mortbay.util.ajax.JSON;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import java.io.*;
|
||||||
import com.google.common.base.Joiner;
|
import java.net.*;
|
||||||
import com.google.common.base.Preconditions;
|
import java.nio.channels.ClosedByInterruptException;
|
||||||
import com.google.protobuf.BlockingService;
|
import java.nio.channels.SocketChannel;
|
||||||
|
import java.security.PrivilegedExceptionAction;
|
||||||
|
import java.util.*;
|
||||||
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
||||||
|
import static org.apache.hadoop.util.ExitUtil.terminate;
|
||||||
|
|
||||||
/**********************************************************
|
/**********************************************************
|
||||||
* DataNode is a class (and program) that stores a set of
|
* DataNode is a class (and program) that stores a set of
|
||||||
|
@ -261,6 +178,7 @@ public class DataNode extends Configured
|
||||||
private volatile boolean heartbeatsDisabledForTests = false;
|
private volatile boolean heartbeatsDisabledForTests = false;
|
||||||
private DataStorage storage = null;
|
private DataStorage storage = null;
|
||||||
private HttpServer infoServer = null;
|
private HttpServer infoServer = null;
|
||||||
|
private int infoSecurePort;
|
||||||
DataNodeMetrics metrics;
|
DataNodeMetrics metrics;
|
||||||
private InetSocketAddress streamingAddr;
|
private InetSocketAddress streamingAddr;
|
||||||
|
|
||||||
|
@ -384,16 +302,13 @@ public class DataNode extends Configured
|
||||||
InetSocketAddress infoSocAddr = DataNode.getInfoAddr(conf);
|
InetSocketAddress infoSocAddr = DataNode.getInfoAddr(conf);
|
||||||
String infoHost = infoSocAddr.getHostName();
|
String infoHost = infoSocAddr.getHostName();
|
||||||
int tmpInfoPort = infoSocAddr.getPort();
|
int tmpInfoPort = infoSocAddr.getPort();
|
||||||
this.infoServer = (secureResources == null)
|
HttpServer.Builder builder = new HttpServer.Builder().setName("datanode")
|
||||||
? new HttpServer.Builder().setName("datanode")
|
.setBindAddress(infoHost).setPort(tmpInfoPort)
|
||||||
.setBindAddress(infoHost).setPort(tmpInfoPort)
|
.setFindPort(tmpInfoPort == 0).setConf(conf)
|
||||||
.setFindPort(tmpInfoPort == 0).setConf(conf)
|
.setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")));
|
||||||
.setACL(new AccessControlList(conf.get(DFS_ADMIN, " "))).build()
|
this.infoServer = (secureResources == null) ? builder.build() :
|
||||||
: new HttpServer.Builder().setName("datanode")
|
builder.setConnector(secureResources.getListener()).build();
|
||||||
.setBindAddress(infoHost).setPort(tmpInfoPort)
|
|
||||||
.setFindPort(tmpInfoPort == 0).setConf(conf)
|
|
||||||
.setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")))
|
|
||||||
.setConnector(secureResources.getListener()).build();
|
|
||||||
LOG.info("Opened info server at " + infoHost + ":" + tmpInfoPort);
|
LOG.info("Opened info server at " + infoHost + ":" + tmpInfoPort);
|
||||||
if (conf.getBoolean(DFS_HTTPS_ENABLE_KEY, false)) {
|
if (conf.getBoolean(DFS_HTTPS_ENABLE_KEY, false)) {
|
||||||
boolean needClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
|
boolean needClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
|
||||||
|
@ -407,6 +322,7 @@ public class DataNode extends Configured
|
||||||
if(LOG.isDebugEnabled()) {
|
if(LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Datanode listening for SSL on " + secInfoSocAddr);
|
LOG.debug("Datanode listening for SSL on " + secInfoSocAddr);
|
||||||
}
|
}
|
||||||
|
infoSecurePort = secInfoSocAddr.getPort();
|
||||||
}
|
}
|
||||||
this.infoServer.addInternalServlet(null, "/streamFile/*", StreamFile.class);
|
this.infoServer.addInternalServlet(null, "/streamFile/*", StreamFile.class);
|
||||||
this.infoServer.addInternalServlet(null, "/getFileChecksum/*",
|
this.infoServer.addInternalServlet(null, "/getFileChecksum/*",
|
||||||
|
@ -775,7 +691,8 @@ public class DataNode extends Configured
|
||||||
}
|
}
|
||||||
DatanodeID dnId = new DatanodeID(
|
DatanodeID dnId = new DatanodeID(
|
||||||
streamingAddr.getAddress().getHostAddress(), hostName,
|
streamingAddr.getAddress().getHostAddress(), hostName,
|
||||||
getStorageId(), getXferPort(), getInfoPort(), getIpcPort());
|
getStorageId(), getXferPort(), getInfoPort(),
|
||||||
|
infoSecurePort, getIpcPort());
|
||||||
return new DatanodeRegistration(dnId, storageInfo,
|
return new DatanodeRegistration(dnId, storageInfo,
|
||||||
new ExportedBlockKeys(), VersionInfo.getVersion());
|
new ExportedBlockKeys(), VersionInfo.getVersion());
|
||||||
}
|
}
|
||||||
|
@ -873,7 +790,7 @@ public class DataNode extends Configured
|
||||||
* If this is the first block pool to register, this also initializes
|
* If this is the first block pool to register, this also initializes
|
||||||
* the datanode-scoped storage.
|
* the datanode-scoped storage.
|
||||||
*
|
*
|
||||||
* @param nsInfo the handshake response from the NN.
|
* @param bpos Block pool offer service
|
||||||
* @throws IOException if the NN is inconsistent with the local storage.
|
* @throws IOException if the NN is inconsistent with the local storage.
|
||||||
*/
|
*/
|
||||||
void initBlockPool(BPOfferService bpos) throws IOException {
|
void initBlockPool(BPOfferService bpos) throws IOException {
|
||||||
|
@ -2330,6 +2247,13 @@ public class DataNode extends Configured
|
||||||
return infoServer.getPort();
|
return infoServer.getPort();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the datanode's https port
|
||||||
|
*/
|
||||||
|
public int getInfoSecurePort() {
|
||||||
|
return infoSecurePort;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returned information is a JSON representation of a map with
|
* Returned information is a JSON representation of a map with
|
||||||
* name node host name as the key and block pool Id as the value.
|
* name node host name as the key and block pool Id as the value.
|
||||||
|
|
|
@ -17,29 +17,11 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.web;
|
package org.apache.hadoop.hdfs.web;
|
||||||
|
|
||||||
import java.io.ByteArrayInputStream;
|
import org.apache.hadoop.fs.*;
|
||||||
import java.io.DataInputStream;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.TreeMap;
|
|
||||||
|
|
||||||
import org.apache.hadoop.fs.ContentSummary;
|
|
||||||
import org.apache.hadoop.fs.FileChecksum;
|
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
|
||||||
import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
|
|
||||||
import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
|
|
||||||
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
|
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.*;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
|
||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeId;
|
import org.apache.hadoop.hdfs.server.namenode.INodeId;
|
||||||
|
@ -50,6 +32,11 @@ import org.apache.hadoop.util.DataChecksum;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.mortbay.util.ajax.JSON;
|
import org.mortbay.util.ajax.JSON;
|
||||||
|
|
||||||
|
import java.io.ByteArrayInputStream;
|
||||||
|
import java.io.DataInputStream;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.*;
|
||||||
|
|
||||||
/** JSON Utilities */
|
/** JSON Utilities */
|
||||||
public class JsonUtil {
|
public class JsonUtil {
|
||||||
private static final Object[] EMPTY_OBJECT_ARRAY = {};
|
private static final Object[] EMPTY_OBJECT_ARRAY = {};
|
||||||
|
@ -295,6 +282,7 @@ public class JsonUtil {
|
||||||
m.put("storageID", datanodeinfo.getStorageID());
|
m.put("storageID", datanodeinfo.getStorageID());
|
||||||
m.put("xferPort", datanodeinfo.getXferPort());
|
m.put("xferPort", datanodeinfo.getXferPort());
|
||||||
m.put("infoPort", datanodeinfo.getInfoPort());
|
m.put("infoPort", datanodeinfo.getInfoPort());
|
||||||
|
m.put("infoSecurePort", datanodeinfo.getInfoSecurePort());
|
||||||
m.put("ipcPort", datanodeinfo.getIpcPort());
|
m.put("ipcPort", datanodeinfo.getIpcPort());
|
||||||
|
|
||||||
m.put("capacity", datanodeinfo.getCapacity());
|
m.put("capacity", datanodeinfo.getCapacity());
|
||||||
|
@ -320,6 +308,7 @@ public class JsonUtil {
|
||||||
(String)m.get("storageID"),
|
(String)m.get("storageID"),
|
||||||
(int)(long)(Long)m.get("xferPort"),
|
(int)(long)(Long)m.get("xferPort"),
|
||||||
(int)(long)(Long)m.get("infoPort"),
|
(int)(long)(Long)m.get("infoPort"),
|
||||||
|
(int)(long)(Long)m.get("infoSecurePort"),
|
||||||
(int)(long)(Long)m.get("ipcPort"),
|
(int)(long)(Long)m.get("ipcPort"),
|
||||||
|
|
||||||
(Long)m.get("capacity"),
|
(Long)m.get("capacity"),
|
||||||
|
|
|
@ -52,8 +52,9 @@ message DatanodeIDProto {
|
||||||
required string hostName = 2; // hostname
|
required string hostName = 2; // hostname
|
||||||
required string storageID = 3; // unique storage id
|
required string storageID = 3; // unique storage id
|
||||||
required uint32 xferPort = 4; // data streaming port
|
required uint32 xferPort = 4; // data streaming port
|
||||||
required uint32 infoPort = 5; // info server port
|
required uint32 infoPort = 5; // datanode http port
|
||||||
required uint32 ipcPort = 6; // ipc server port
|
required uint32 ipcPort = 6; // ipc server port
|
||||||
|
optional uint32 infoSecurePort = 7 [default = 0]; // datanode https port
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -18,60 +18,20 @@
|
||||||
|
|
||||||
package org.apache.hadoop.hdfs;
|
package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
|
import com.google.common.base.Charsets;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
|
import com.google.common.base.Joiner;
|
||||||
import static org.junit.Assert.assertEquals;
|
|
||||||
|
|
||||||
import java.io.BufferedOutputStream;
|
|
||||||
import java.io.BufferedReader;
|
|
||||||
import java.io.ByteArrayInputStream;
|
|
||||||
import java.io.ByteArrayOutputStream;
|
|
||||||
import java.io.DataInputStream;
|
|
||||||
import java.io.DataOutputStream;
|
|
||||||
import java.io.File;
|
|
||||||
import java.io.FileInputStream;
|
|
||||||
import java.io.FileOutputStream;
|
|
||||||
import java.io.FileReader;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.InputStream;
|
|
||||||
import java.io.OutputStream;
|
|
||||||
import java.net.HttpURLConnection;
|
|
||||||
import java.net.InetSocketAddress;
|
|
||||||
import java.net.Socket;
|
|
||||||
import java.net.URL;
|
|
||||||
import java.net.URLConnection;
|
|
||||||
import java.security.PrivilegedExceptionAction;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.HashSet;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Random;
|
|
||||||
import java.util.Set;
|
|
||||||
import java.util.concurrent.TimeoutException;
|
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.BlockLocation;
|
import org.apache.hadoop.fs.*;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
|
||||||
import org.apache.hadoop.fs.FSDataInputStream;
|
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
|
||||||
import org.apache.hadoop.fs.FileContext;
|
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.FileSystem.Statistics;
|
import org.apache.hadoop.fs.FileSystem.Statistics;
|
||||||
import org.apache.hadoop.fs.Options.Rename;
|
import org.apache.hadoop.fs.Options.Rename;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.fs.Path;
|
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
|
import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
|
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.*;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
|
@ -93,8 +53,15 @@ import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.util.VersionInfo;
|
import org.apache.hadoop.util.VersionInfo;
|
||||||
|
|
||||||
import com.google.common.base.Charsets;
|
import java.io.*;
|
||||||
import com.google.common.base.Joiner;
|
import java.net.*;
|
||||||
|
import java.security.PrivilegedExceptionAction;
|
||||||
|
import java.util.*;
|
||||||
|
import java.util.concurrent.TimeoutException;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
|
||||||
/** Utilities for HDFS tests */
|
/** Utilities for HDFS tests */
|
||||||
public class DFSTestUtil {
|
public class DFSTestUtil {
|
||||||
|
@ -114,10 +81,10 @@ public class DFSTestUtil {
|
||||||
|
|
||||||
/** Creates a new instance of DFSTestUtil
|
/** Creates a new instance of DFSTestUtil
|
||||||
*
|
*
|
||||||
* @param testName Name of the test from where this utility is used
|
|
||||||
* @param nFiles Number of files to be created
|
* @param nFiles Number of files to be created
|
||||||
* @param maxLevels Maximum number of directory levels
|
* @param maxLevels Maximum number of directory levels
|
||||||
* @param maxSize Maximum size for file
|
* @param maxSize Maximum size for file
|
||||||
|
* @param minSize Minimum size for file
|
||||||
*/
|
*/
|
||||||
private DFSTestUtil(int nFiles, int maxLevels, int maxSize, int minSize) {
|
private DFSTestUtil(int nFiles, int maxLevels, int maxSize, int minSize) {
|
||||||
this.nFiles = nFiles;
|
this.nFiles = nFiles;
|
||||||
|
@ -143,7 +110,7 @@ public class DFSTestUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* when formating a namenode - we must provide clusterid.
|
* when formatting a namenode - we must provide clusterid.
|
||||||
* @param conf
|
* @param conf
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
|
@ -806,6 +773,7 @@ public class DFSTestUtil {
|
||||||
return new DatanodeID(ipAddr, "localhost", "",
|
return new DatanodeID(ipAddr, "localhost", "",
|
||||||
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
|
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
|
||||||
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
||||||
|
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
|
||||||
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
|
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -815,7 +783,7 @@ public class DFSTestUtil {
|
||||||
|
|
||||||
public static DatanodeID getLocalDatanodeID(int port) {
|
public static DatanodeID getLocalDatanodeID(int port) {
|
||||||
return new DatanodeID("127.0.0.1", "localhost", "",
|
return new DatanodeID("127.0.0.1", "localhost", "",
|
||||||
port, port, port);
|
port, port, port, port);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static DatanodeDescriptor getLocalDatanodeDescriptor() {
|
public static DatanodeDescriptor getLocalDatanodeDescriptor() {
|
||||||
|
@ -838,6 +806,7 @@ public class DFSTestUtil {
|
||||||
String host, int port) {
|
String host, int port) {
|
||||||
return new DatanodeInfo(new DatanodeID(ipAddr, host, "",
|
return new DatanodeInfo(new DatanodeID(ipAddr, host, "",
|
||||||
port, DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
port, DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
||||||
|
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
|
||||||
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT));
|
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -846,6 +815,7 @@ public class DFSTestUtil {
|
||||||
return new DatanodeInfo(ipAddr, hostname, "",
|
return new DatanodeInfo(ipAddr, hostname, "",
|
||||||
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
|
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
|
||||||
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
||||||
|
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
|
||||||
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT,
|
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT,
|
||||||
1, 2, 3, 4, 5, 6, "local", adminState);
|
1, 2, 3, 4, 5, 6, "local", adminState);
|
||||||
}
|
}
|
||||||
|
@ -860,6 +830,7 @@ public class DFSTestUtil {
|
||||||
int port, String rackLocation) {
|
int port, String rackLocation) {
|
||||||
DatanodeID dnId = new DatanodeID(ipAddr, "host", "", port,
|
DatanodeID dnId = new DatanodeID(ipAddr, "host", "", port,
|
||||||
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
||||||
|
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
|
||||||
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
|
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
|
||||||
return new DatanodeDescriptor(dnId, rackLocation);
|
return new DatanodeDescriptor(dnId, rackLocation);
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,13 +17,6 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs;
|
package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
import static org.junit.Assert.*;
|
|
||||||
import static org.mockito.Mockito.doReturn;
|
|
||||||
import static org.mockito.Mockito.mock;
|
|
||||||
|
|
||||||
import java.net.InetSocketAddress;
|
|
||||||
import java.security.Permission;
|
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
@ -40,6 +33,13 @@ import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.hadoop.util.VersionInfo;
|
import org.apache.hadoop.util.VersionInfo;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
|
import java.security.Permission;
|
||||||
|
|
||||||
|
import static org.junit.Assert.*;
|
||||||
|
import static org.mockito.Mockito.doReturn;
|
||||||
|
import static org.mockito.Mockito.mock;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This class tests data node registration.
|
* This class tests data node registration.
|
||||||
*/
|
*/
|
||||||
|
@ -157,7 +157,8 @@ public class TestDatanodeRegistration {
|
||||||
final String DN_HOSTNAME = "localhost";
|
final String DN_HOSTNAME = "localhost";
|
||||||
final int DN_XFER_PORT = 12345;
|
final int DN_XFER_PORT = 12345;
|
||||||
final int DN_INFO_PORT = 12346;
|
final int DN_INFO_PORT = 12346;
|
||||||
final int DN_IPC_PORT = 12347;
|
final int DN_INFO_SECURE_PORT = 12347;
|
||||||
|
final int DN_IPC_PORT = 12348;
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
MiniDFSCluster cluster = null;
|
MiniDFSCluster cluster = null;
|
||||||
try {
|
try {
|
||||||
|
@ -172,7 +173,8 @@ public class TestDatanodeRegistration {
|
||||||
|
|
||||||
// register a datanode
|
// register a datanode
|
||||||
DatanodeID dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME,
|
DatanodeID dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME,
|
||||||
"fake-storage-id", DN_XFER_PORT, DN_INFO_PORT, DN_IPC_PORT);
|
"fake-storage-id", DN_XFER_PORT, DN_INFO_PORT, DN_INFO_SECURE_PORT,
|
||||||
|
DN_IPC_PORT);
|
||||||
long nnCTime = cluster.getNamesystem().getFSImage().getStorage()
|
long nnCTime = cluster.getNamesystem().getFSImage().getStorage()
|
||||||
.getCTime();
|
.getCTime();
|
||||||
StorageInfo mockStorageInfo = mock(StorageInfo.class);
|
StorageInfo mockStorageInfo = mock(StorageInfo.class);
|
||||||
|
@ -188,7 +190,8 @@ public class TestDatanodeRegistration {
|
||||||
|
|
||||||
// register the same datanode again with a different storage ID
|
// register the same datanode again with a different storage ID
|
||||||
dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME,
|
dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME,
|
||||||
"changed-fake-storage-id", DN_XFER_PORT, DN_INFO_PORT, DN_IPC_PORT);
|
"changed-fake-storage-id", DN_XFER_PORT, DN_INFO_PORT,
|
||||||
|
DN_INFO_SECURE_PORT, DN_IPC_PORT);
|
||||||
dnReg = new DatanodeRegistration(dnId,
|
dnReg = new DatanodeRegistration(dnId,
|
||||||
mockStorageInfo, null, VersionInfo.getVersion());
|
mockStorageInfo, null, VersionInfo.getVersion());
|
||||||
rpcServer.registerDatanode(dnReg);
|
rpcServer.registerDatanode(dnReg);
|
||||||
|
|
|
@ -17,12 +17,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs;
|
package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
import java.io.FileInputStream;
|
|
||||||
import java.io.FileOutputStream;
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
import junit.framework.Assert;
|
import junit.framework.Assert;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
|
@ -31,6 +26,10 @@ import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
|
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.io.FileInputStream;
|
||||||
|
import java.io.FileOutputStream;
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
public class TestFileInputStreamCache {
|
public class TestFileInputStreamCache {
|
||||||
static final Log LOG = LogFactory.getLog(TestFileInputStreamCache.class);
|
static final Log LOG = LogFactory.getLog(TestFileInputStreamCache.class);
|
||||||
|
|
||||||
|
@ -80,7 +79,7 @@ public class TestFileInputStreamCache {
|
||||||
public void testAddAndRetrieve() throws Exception {
|
public void testAddAndRetrieve() throws Exception {
|
||||||
FileInputStreamCache cache = new FileInputStreamCache(1, 1000000);
|
FileInputStreamCache cache = new FileInputStreamCache(1, 1000000);
|
||||||
DatanodeID dnId = new DatanodeID("127.0.0.1", "localhost",
|
DatanodeID dnId = new DatanodeID("127.0.0.1", "localhost",
|
||||||
"xyzzy", 8080, 9090, 7070);
|
"xyzzy", 8080, 9090, 7070, 6060);
|
||||||
ExtendedBlock block = new ExtendedBlock("poolid", 123);
|
ExtendedBlock block = new ExtendedBlock("poolid", 123);
|
||||||
TestFileDescriptorPair pair = new TestFileDescriptorPair();
|
TestFileDescriptorPair pair = new TestFileDescriptorPair();
|
||||||
cache.put(dnId, block, pair.getFileInputStreams());
|
cache.put(dnId, block, pair.getFileInputStreams());
|
||||||
|
@ -94,7 +93,7 @@ public class TestFileInputStreamCache {
|
||||||
public void testExpiry() throws Exception {
|
public void testExpiry() throws Exception {
|
||||||
FileInputStreamCache cache = new FileInputStreamCache(1, 10);
|
FileInputStreamCache cache = new FileInputStreamCache(1, 10);
|
||||||
DatanodeID dnId = new DatanodeID("127.0.0.1", "localhost",
|
DatanodeID dnId = new DatanodeID("127.0.0.1", "localhost",
|
||||||
"xyzzy", 8080, 9090, 7070);
|
"xyzzy", 8080, 9090, 7070, 6060);
|
||||||
ExtendedBlock block = new ExtendedBlock("poolid", 123);
|
ExtendedBlock block = new ExtendedBlock("poolid", 123);
|
||||||
TestFileDescriptorPair pair = new TestFileDescriptorPair();
|
TestFileDescriptorPair pair = new TestFileDescriptorPair();
|
||||||
cache.put(dnId, block, pair.getFileInputStreams());
|
cache.put(dnId, block, pair.getFileInputStreams());
|
||||||
|
@ -109,12 +108,12 @@ public class TestFileInputStreamCache {
|
||||||
public void testEviction() throws Exception {
|
public void testEviction() throws Exception {
|
||||||
FileInputStreamCache cache = new FileInputStreamCache(1, 10000000);
|
FileInputStreamCache cache = new FileInputStreamCache(1, 10000000);
|
||||||
DatanodeID dnId = new DatanodeID("127.0.0.1", "localhost",
|
DatanodeID dnId = new DatanodeID("127.0.0.1", "localhost",
|
||||||
"xyzzy", 8080, 9090, 7070);
|
"xyzzy", 8080, 9090, 7070, 6060);
|
||||||
ExtendedBlock block = new ExtendedBlock("poolid", 123);
|
ExtendedBlock block = new ExtendedBlock("poolid", 123);
|
||||||
TestFileDescriptorPair pair = new TestFileDescriptorPair();
|
TestFileDescriptorPair pair = new TestFileDescriptorPair();
|
||||||
cache.put(dnId, block, pair.getFileInputStreams());
|
cache.put(dnId, block, pair.getFileInputStreams());
|
||||||
DatanodeID dnId2 = new DatanodeID("127.0.0.1", "localhost",
|
DatanodeID dnId2 = new DatanodeID("127.0.0.1", "localhost",
|
||||||
"xyzzy", 8081, 9091, 7071);
|
"xyzzy", 8081, 9091, 7071, 6061);
|
||||||
TestFileDescriptorPair pair2 = new TestFileDescriptorPair();
|
TestFileDescriptorPair pair2 = new TestFileDescriptorPair();
|
||||||
cache.put(dnId2, block, pair2.getFileInputStreams());
|
cache.put(dnId2, block, pair2.getFileInputStreams());
|
||||||
FileInputStream fis[] = cache.get(dnId, block);
|
FileInputStream fis[] = cache.get(dnId, block);
|
||||||
|
|
|
@ -17,26 +17,23 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs;
|
package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import com.google.common.collect.HashMultiset;
|
||||||
import static org.junit.Assert.assertSame;
|
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.InputStream;
|
|
||||||
import java.io.OutputStream;
|
|
||||||
import java.nio.channels.ReadableByteChannel;
|
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
|
||||||
import org.apache.hadoop.hdfs.net.Peer;
|
import org.apache.hadoop.hdfs.net.Peer;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
import org.apache.hadoop.net.unix.DomainSocket;
|
import org.apache.hadoop.net.unix.DomainSocket;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
import org.mockito.invocation.InvocationOnMock;
|
import org.mockito.invocation.InvocationOnMock;
|
||||||
import org.mockito.stubbing.Answer;
|
import org.mockito.stubbing.Answer;
|
||||||
|
|
||||||
import com.google.common.collect.HashMultiset;
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.io.OutputStream;
|
||||||
|
import java.nio.channels.ReadableByteChannel;
|
||||||
|
|
||||||
|
import static org.junit.Assert.*;
|
||||||
|
|
||||||
public class TestPeerCache {
|
public class TestPeerCache {
|
||||||
static final Log LOG = LogFactory.getLog(TestPeerCache.class);
|
static final Log LOG = LogFactory.getLog(TestPeerCache.class);
|
||||||
|
@ -150,7 +147,7 @@ public class TestPeerCache {
|
||||||
PeerCache cache = new PeerCache(3, 100000);
|
PeerCache cache = new PeerCache(3, 100000);
|
||||||
DatanodeID dnId = new DatanodeID("192.168.0.1",
|
DatanodeID dnId = new DatanodeID("192.168.0.1",
|
||||||
"fakehostname", "fake_storage_id",
|
"fakehostname", "fake_storage_id",
|
||||||
100, 101, 102);
|
100, 101, 102, 103);
|
||||||
FakePeer peer = new FakePeer(dnId, false);
|
FakePeer peer = new FakePeer(dnId, false);
|
||||||
cache.put(dnId, peer);
|
cache.put(dnId, peer);
|
||||||
assertTrue(!peer.isClosed());
|
assertTrue(!peer.isClosed());
|
||||||
|
@ -170,7 +167,7 @@ public class TestPeerCache {
|
||||||
for (int i = 0; i < CAPACITY; ++i) {
|
for (int i = 0; i < CAPACITY; ++i) {
|
||||||
dnIds[i] = new DatanodeID("192.168.0.1",
|
dnIds[i] = new DatanodeID("192.168.0.1",
|
||||||
"fakehostname_" + i, "fake_storage_id",
|
"fakehostname_" + i, "fake_storage_id",
|
||||||
100, 101, 102);
|
100, 101, 102, 103);
|
||||||
peers[i] = new FakePeer(dnIds[i], false);
|
peers[i] = new FakePeer(dnIds[i], false);
|
||||||
}
|
}
|
||||||
for (int i = 0; i < CAPACITY; ++i) {
|
for (int i = 0; i < CAPACITY; ++i) {
|
||||||
|
@ -201,7 +198,7 @@ public class TestPeerCache {
|
||||||
for (int i = 0; i < dnIds.length; ++i) {
|
for (int i = 0; i < dnIds.length; ++i) {
|
||||||
dnIds[i] = new DatanodeID("192.168.0.1",
|
dnIds[i] = new DatanodeID("192.168.0.1",
|
||||||
"fakehostname_" + i, "fake_storage_id_" + i,
|
"fakehostname_" + i, "fake_storage_id_" + i,
|
||||||
100, 101, 102);
|
100, 101, 102, 103);
|
||||||
peers[i] = new FakePeer(dnIds[i], false);
|
peers[i] = new FakePeer(dnIds[i], false);
|
||||||
}
|
}
|
||||||
for (int i = 0; i < CAPACITY; ++i) {
|
for (int i = 0; i < CAPACITY; ++i) {
|
||||||
|
@ -232,7 +229,7 @@ public class TestPeerCache {
|
||||||
PeerCache cache = new PeerCache(CAPACITY, 100000);
|
PeerCache cache = new PeerCache(CAPACITY, 100000);
|
||||||
DatanodeID dnId = new DatanodeID("192.168.0.1",
|
DatanodeID dnId = new DatanodeID("192.168.0.1",
|
||||||
"fakehostname", "fake_storage_id",
|
"fakehostname", "fake_storage_id",
|
||||||
100, 101, 102);
|
100, 101, 102, 103);
|
||||||
HashMultiset<FakePeer> peers = HashMultiset.create(CAPACITY);
|
HashMultiset<FakePeer> peers = HashMultiset.create(CAPACITY);
|
||||||
for (int i = 0; i < CAPACITY; ++i) {
|
for (int i = 0; i < CAPACITY; ++i) {
|
||||||
FakePeer peer = new FakePeer(dnId, false);
|
FakePeer peer = new FakePeer(dnId, false);
|
||||||
|
@ -257,7 +254,7 @@ public class TestPeerCache {
|
||||||
PeerCache cache = new PeerCache(CAPACITY, 100000);
|
PeerCache cache = new PeerCache(CAPACITY, 100000);
|
||||||
DatanodeID dnId = new DatanodeID("192.168.0.1",
|
DatanodeID dnId = new DatanodeID("192.168.0.1",
|
||||||
"fakehostname", "fake_storage_id",
|
"fakehostname", "fake_storage_id",
|
||||||
100, 101, 102);
|
100, 101, 102, 103);
|
||||||
HashMultiset<FakePeer> peers = HashMultiset.create(CAPACITY);
|
HashMultiset<FakePeer> peers = HashMultiset.create(CAPACITY);
|
||||||
for (int i = 0; i < CAPACITY; ++i) {
|
for (int i = 0; i < CAPACITY; ++i) {
|
||||||
FakePeer peer = new FakePeer(dnId, i == CAPACITY - 1);
|
FakePeer peer = new FakePeer(dnId, i == CAPACITY - 1);
|
||||||
|
|
|
@ -17,31 +17,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.common;
|
package org.apache.hadoop.hdfs.server.common;
|
||||||
|
|
||||||
import static org.junit.Assert.assertArrayEquals;
|
import com.google.common.base.Strings;
|
||||||
import static org.junit.Assert.assertEquals;
|
|
||||||
import static org.junit.Assert.assertFalse;
|
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
import static org.junit.Assert.fail;
|
|
||||||
import static org.mockito.Mockito.mock;
|
|
||||||
import static org.mockito.Mockito.when;
|
|
||||||
import static org.mockito.Mockito.doAnswer;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.StringReader;
|
|
||||||
import java.net.InetSocketAddress;
|
|
||||||
import java.text.MessageFormat;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
import javax.servlet.ServletContext;
|
|
||||||
import javax.servlet.http.HttpServletRequest;
|
|
||||||
import javax.servlet.jsp.JspWriter;
|
|
||||||
import javax.xml.parsers.DocumentBuilder;
|
|
||||||
import javax.xml.parsers.DocumentBuilderFactory;
|
|
||||||
import javax.xml.parsers.ParserConfigurationException;
|
|
||||||
|
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
|
@ -70,7 +46,20 @@ import org.mockito.stubbing.Answer;
|
||||||
import org.xml.sax.InputSource;
|
import org.xml.sax.InputSource;
|
||||||
import org.xml.sax.SAXException;
|
import org.xml.sax.SAXException;
|
||||||
|
|
||||||
import com.google.common.base.Strings;
|
import javax.servlet.ServletContext;
|
||||||
|
import javax.servlet.http.HttpServletRequest;
|
||||||
|
import javax.servlet.jsp.JspWriter;
|
||||||
|
import javax.xml.parsers.DocumentBuilder;
|
||||||
|
import javax.xml.parsers.DocumentBuilderFactory;
|
||||||
|
import javax.xml.parsers.ParserConfigurationException;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.StringReader;
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
|
import java.text.MessageFormat;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
|
||||||
|
import static org.junit.Assert.*;
|
||||||
|
import static org.mockito.Mockito.*;
|
||||||
|
|
||||||
|
|
||||||
public class TestJspHelper {
|
public class TestJspHelper {
|
||||||
|
@ -459,9 +448,9 @@ public class TestJspHelper {
|
||||||
@Test
|
@Test
|
||||||
public void testSortNodeByFields() throws Exception {
|
public void testSortNodeByFields() throws Exception {
|
||||||
DatanodeID dnId1 = new DatanodeID("127.0.0.1", "localhost1", "storage1",
|
DatanodeID dnId1 = new DatanodeID("127.0.0.1", "localhost1", "storage1",
|
||||||
1234, 2345, 3456);
|
1234, 2345, 3456, 4567);
|
||||||
DatanodeID dnId2 = new DatanodeID("127.0.0.2", "localhost2", "storage2",
|
DatanodeID dnId2 = new DatanodeID("127.0.0.2", "localhost2", "storage2",
|
||||||
1235, 2346, 3457);
|
1235, 2346, 3457, 4568);
|
||||||
DatanodeDescriptor dnDesc1 = new DatanodeDescriptor(dnId1, "rack1", 1024,
|
DatanodeDescriptor dnDesc1 = new DatanodeDescriptor(dnId1, "rack1", 1024,
|
||||||
100, 924, 100, 10, 2);
|
100, 924, 100, 10, 2);
|
||||||
DatanodeDescriptor dnDesc2 = new DatanodeDescriptor(dnId2, "rack2", 2500,
|
DatanodeDescriptor dnDesc2 = new DatanodeDescriptor(dnId2, "rack2", 2500,
|
||||||
|
|
|
@ -17,14 +17,6 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.namenode;
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
import java.io.File;
|
|
||||||
import java.io.FileOutputStream;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.EnumSet;
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.commons.logging.impl.Log4JLogger;
|
import org.apache.commons.logging.impl.Log4JLogger;
|
||||||
|
@ -33,40 +25,28 @@ import org.apache.hadoop.fs.CreateFlag;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.*;
|
||||||
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
|
||||||
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
|
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
|
import org.apache.hadoop.hdfs.server.protocol.*;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
|
||||||
import org.apache.hadoop.io.EnumSetWritable;
|
import org.apache.hadoop.io.EnumSetWritable;
|
||||||
import org.apache.hadoop.net.DNS;
|
import org.apache.hadoop.net.DNS;
|
||||||
import org.apache.hadoop.net.NetworkTopology;
|
import org.apache.hadoop.net.NetworkTopology;
|
||||||
import org.apache.hadoop.security.Groups;
|
import org.apache.hadoop.security.Groups;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.*;
|
||||||
import org.apache.hadoop.util.Time;
|
|
||||||
import org.apache.hadoop.util.Tool;
|
|
||||||
import org.apache.hadoop.util.ToolRunner;
|
|
||||||
import org.apache.hadoop.util.VersionInfo;
|
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
import org.apache.log4j.LogManager;
|
import org.apache.log4j.LogManager;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.FileOutputStream;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.EnumSet;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Main class for a series of name-node benchmarks.
|
* Main class for a series of name-node benchmarks.
|
||||||
*
|
*
|
||||||
|
@ -839,6 +819,7 @@ public class NNThroughputBenchmark implements Tool {
|
||||||
DNS.getDefaultHost("default", "default"),
|
DNS.getDefaultHost("default", "default"),
|
||||||
"", getNodePort(dnIdx),
|
"", getNodePort(dnIdx),
|
||||||
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
||||||
|
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
|
||||||
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
|
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
|
||||||
new DataStorage(nsInfo, ""),
|
new DataStorage(nsInfo, ""),
|
||||||
new ExportedBlockKeys(), VersionInfo.getVersion());
|
new ExportedBlockKeys(), VersionInfo.getVersion());
|
||||||
|
@ -1305,7 +1286,7 @@ public class NNThroughputBenchmark implements Tool {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Main method of the benchmark.
|
* Main method of the benchmark.
|
||||||
* @param args command line parameters
|
* @param aArgs command line parameters
|
||||||
*/
|
*/
|
||||||
@Override // Tool
|
@Override // Tool
|
||||||
public int run(String[] aArgs) throws Exception {
|
public int run(String[] aArgs) throws Exception {
|
||||||
|
|
|
@ -18,15 +18,6 @@
|
||||||
|
|
||||||
package org.apache.hadoop.hdfs.server.namenode;
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
import static org.junit.Assert.fail;
|
|
||||||
import static org.mockito.Matchers.any;
|
|
||||||
import static org.mockito.Matchers.anyBoolean;
|
|
||||||
import static org.mockito.Mockito.doReturn;
|
|
||||||
import static org.mockito.Mockito.mock;
|
|
||||||
import static org.mockito.Mockito.spy;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
|
@ -37,6 +28,13 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import static org.junit.Assert.fail;
|
||||||
|
import static org.mockito.Matchers.any;
|
||||||
|
import static org.mockito.Matchers.anyBoolean;
|
||||||
|
import static org.mockito.Mockito.*;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Verify that TestCommitBlockSynchronization is idempotent.
|
* Verify that TestCommitBlockSynchronization is idempotent.
|
||||||
*/
|
*/
|
||||||
|
@ -177,7 +175,7 @@ public class TestCommitBlockSynchronization {
|
||||||
Block block = new Block(blockId, length, genStamp);
|
Block block = new Block(blockId, length, genStamp);
|
||||||
FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
|
FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
|
||||||
DatanodeID[] newTargets = new DatanodeID[]{
|
DatanodeID[] newTargets = new DatanodeID[]{
|
||||||
new DatanodeID("0.0.0.0", "nonexistantHost", "1", 0, 0, 0)};
|
new DatanodeID("0.0.0.0", "nonexistantHost", "1", 0, 0, 0, 0)};
|
||||||
|
|
||||||
ExtendedBlock lastBlock = new ExtendedBlock();
|
ExtendedBlock lastBlock = new ExtendedBlock();
|
||||||
namesystemSpy.commitBlockSynchronization(
|
namesystemSpy.commitBlockSynchronization(
|
||||||
|
|
Loading…
Reference in New Issue