svn merge -c 1396798 FIXES: HDFS-3224. Bug in check for DN re-registration with different storage ID. Contributed by Jason Lowe
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1396807 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
70e7ec1f47
commit
eb50c308b8
|
@ -1550,6 +1550,9 @@ Release 0.23.5 - UNRELEASED
|
|||
HDFS-3824. TestHftpDelegationToken fails intermittently with JDK7 (Trevor
|
||||
Robinson via tgraves)
|
||||
|
||||
HDFS-3224. Bug in check for DN re-registration with different storage ID
|
||||
(jlowe)
|
||||
|
||||
Release 0.23.4 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -594,7 +594,8 @@ public class DatanodeManager {
|
|||
+ " storage " + nodeReg.getStorageID());
|
||||
|
||||
DatanodeDescriptor nodeS = datanodeMap.get(nodeReg.getStorageID());
|
||||
DatanodeDescriptor nodeN = getDatanodeByHost(nodeReg.getXferAddr());
|
||||
DatanodeDescriptor nodeN = host2DatanodeMap.getDatanodeByXferAddr(
|
||||
nodeReg.getIpAddr(), nodeReg.getXferPort());
|
||||
|
||||
if (nodeN != null && nodeN != nodeS) {
|
||||
NameNode.LOG.info("BLOCK* NameSystem.registerDatanode: "
|
||||
|
|
|
@ -159,6 +159,35 @@ class Host2NodesMap {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Find data node by its transfer address
|
||||
*
|
||||
* @return DatanodeDescriptor if found or null otherwise
|
||||
*/
|
||||
public DatanodeDescriptor getDatanodeByXferAddr(String ipAddr,
|
||||
int xferPort) {
|
||||
if (ipAddr==null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
hostmapLock.readLock().lock();
|
||||
try {
|
||||
DatanodeDescriptor[] nodes = map.get(ipAddr);
|
||||
// no entry
|
||||
if (nodes== null) {
|
||||
return null;
|
||||
}
|
||||
for(DatanodeDescriptor containedNode:nodes) {
|
||||
if (xferPort == containedNode.getXferPort()) {
|
||||
return containedNode;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
} finally {
|
||||
hostmapLock.readLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
final StringBuilder b = new StringBuilder(getClass().getSimpleName())
|
||||
|
|
|
@ -27,6 +27,7 @@ import java.net.InetSocketAddress;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||
|
@ -92,6 +93,58 @@ public class TestDatanodeRegistration {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testChangeStorageID() throws Exception {
|
||||
final String DN_IP_ADDR = "127.0.0.1";
|
||||
final String DN_HOSTNAME = "localhost";
|
||||
final int DN_XFER_PORT = 12345;
|
||||
final int DN_INFO_PORT = 12346;
|
||||
final int DN_IPC_PORT = 12347;
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
MiniDFSCluster cluster = null;
|
||||
try {
|
||||
cluster = new MiniDFSCluster.Builder(conf)
|
||||
.numDataNodes(0)
|
||||
.build();
|
||||
InetSocketAddress addr = new InetSocketAddress(
|
||||
"localhost",
|
||||
cluster.getNameNodePort());
|
||||
DFSClient client = new DFSClient(addr, conf);
|
||||
NamenodeProtocols rpcServer = cluster.getNameNodeRpc();
|
||||
|
||||
// register a datanode
|
||||
DatanodeID dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME,
|
||||
"fake-storage-id", DN_XFER_PORT, DN_INFO_PORT, DN_IPC_PORT);
|
||||
long nnCTime = cluster.getNamesystem().getFSImage().getStorage()
|
||||
.getCTime();
|
||||
StorageInfo mockStorageInfo = mock(StorageInfo.class);
|
||||
doReturn(nnCTime).when(mockStorageInfo).getCTime();
|
||||
doReturn(HdfsConstants.LAYOUT_VERSION).when(mockStorageInfo)
|
||||
.getLayoutVersion();
|
||||
DatanodeRegistration dnReg = new DatanodeRegistration(dnId,
|
||||
mockStorageInfo, null, VersionInfo.getVersion());
|
||||
rpcServer.registerDatanode(dnReg);
|
||||
|
||||
DatanodeInfo[] report = client.datanodeReport(DatanodeReportType.ALL);
|
||||
assertEquals("Expected a registered datanode", 1, report.length);
|
||||
|
||||
// register the same datanode again with a different storage ID
|
||||
dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME,
|
||||
"changed-fake-storage-id", DN_XFER_PORT, DN_INFO_PORT, DN_IPC_PORT);
|
||||
dnReg = new DatanodeRegistration(dnId,
|
||||
mockStorageInfo, null, VersionInfo.getVersion());
|
||||
rpcServer.registerDatanode(dnReg);
|
||||
|
||||
report = client.datanodeReport(DatanodeReportType.ALL);
|
||||
assertEquals("Datanode with changed storage ID not recognized",
|
||||
1, report.length);
|
||||
} finally {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRegistrationWithDifferentSoftwareVersions() throws Exception {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
|
|
Loading…
Reference in New Issue