From f85530f649bd7c16bd7c1d4a3447863563d24c03 Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Wed, 4 Mar 2015 17:23:00 -0600 Subject: [PATCH] HDFS-7434. DatanodeID hashCode should not be mutable. Contributed by Daryn Sharp. (cherry picked from commit 722b4794693d8bad1dee0ca5c2f99030a08402f9) --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../hadoop/hdfs/protocol/DatanodeID.java | 48 +++++++------------ .../server/protocol/DatanodeRegistration.java | 10 ++++ .../blockmanagement/TestBlockManager.java | 7 --- .../TestComputeInvalidateWork.java | 16 +++++-- .../TestDatanodeProtocolRetryPolicy.java | 3 +- 6 files changed, 43 insertions(+), 43 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 42f7c8ca10d..6d2ec9931e1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -785,6 +785,8 @@ Release 2.7.0 - UNRELEASED HDFS-7879. hdfs.dll does not export functions of the public libhdfs API. (Chris Nauroth via wheat9) + HDFS-7434. DatanodeID hashCode should not be mutable. (daryn via kihwal) + BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS HDFS-7720. Quota by Storage Type API, tools and ClientNameNode diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java index 779e3b905f1..f91696fb284 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java @@ -47,19 +47,23 @@ public class DatanodeID implements Comparable { private int infoSecurePort; // info server port private int ipcPort; // IPC server port private String xferAddr; - private int hashCode = -1; /** * UUID identifying a given datanode. For upgraded Datanodes this is the * same as the StorageID that was previously used by this Datanode. * For newly formatted Datanodes it is a UUID. */ - private String datanodeUuid = null; + private final String datanodeUuid; public DatanodeID(DatanodeID from) { + this(from.getDatanodeUuid(), from); + } + + @VisibleForTesting + public DatanodeID(String datanodeUuid, DatanodeID from) { this(from.getIpAddr(), from.getHostName(), - from.getDatanodeUuid(), + datanodeUuid, from.getXferPort(), from.getInfoPort(), from.getInfoSecurePort(), @@ -81,19 +85,24 @@ public class DatanodeID implements Comparable { */ public DatanodeID(String ipAddr, String hostName, String datanodeUuid, int xferPort, int infoPort, int infoSecurePort, int ipcPort) { - this.ipAddr = ipAddr; + setIpAndXferPort(ipAddr, xferPort); this.hostName = hostName; this.datanodeUuid = checkDatanodeUuid(datanodeUuid); - this.xferPort = xferPort; this.infoPort = infoPort; this.infoSecurePort = infoSecurePort; this.ipcPort = ipcPort; - updateXferAddrAndInvalidateHashCode(); } public void setIpAddr(String ipAddr) { + //updated during registration, preserve former xferPort + setIpAndXferPort(ipAddr, xferPort); + } + + private void setIpAndXferPort(String ipAddr, int xferPort) { + // build xferAddr string to reduce cost of frequent use this.ipAddr = ipAddr; - updateXferAddrAndInvalidateHashCode(); + this.xferPort = xferPort; + this.xferAddr = ipAddr + ":" + xferPort; } public void setPeerHostName(String peerHostName) { @@ -107,12 +116,6 @@ public class DatanodeID implements Comparable { return datanodeUuid; } - @VisibleForTesting - public void setDatanodeUuidForTesting(String datanodeUuid) { - this.datanodeUuid = datanodeUuid; - updateXferAddrAndInvalidateHashCode(); - } - private String checkDatanodeUuid(String uuid) { if (uuid == null || uuid.isEmpty()) { return null; @@ -242,11 +245,7 @@ public class DatanodeID implements Comparable { @Override public int hashCode() { - if (hashCode == -1) { - int newHashCode = xferAddr.hashCode() ^ datanodeUuid.hashCode(); - hashCode = newHashCode & Integer.MAX_VALUE; - } - return hashCode; + return datanodeUuid.hashCode(); } @Override @@ -259,14 +258,12 @@ public class DatanodeID implements Comparable { * Note that this does not update storageID. */ public void updateRegInfo(DatanodeID nodeReg) { - ipAddr = nodeReg.getIpAddr(); + setIpAndXferPort(nodeReg.getIpAddr(), nodeReg.getXferPort()); hostName = nodeReg.getHostName(); peerHostName = nodeReg.getPeerHostName(); - xferPort = nodeReg.getXferPort(); infoPort = nodeReg.getInfoPort(); infoSecurePort = nodeReg.getInfoSecurePort(); ipcPort = nodeReg.getIpcPort(); - updateXferAddrAndInvalidateHashCode(); } /** @@ -279,13 +276,4 @@ public class DatanodeID implements Comparable { public int compareTo(DatanodeID that) { return getXferAddr().compareTo(that.getXferAddr()); } - - // NOTE: mutable hash codes are dangerous, however this class chooses to - // use them. this method must be called when a value mutates that is used - // to compute the hash, equality, or comparison of instances. - private void updateXferAddrAndInvalidateHashCode() { - xferAddr = ipAddr + ":" + xferPort; - // can't compute new hash yet because uuid might still null... - hashCode = -1; - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java index aaa18c666a7..9db2fca2471 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java @@ -25,6 +25,8 @@ import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; +import com.google.common.annotations.VisibleForTesting; + /** * DatanodeRegistration class contains all information the name-node needs * to identify and verify a data-node when it contacts the name-node. @@ -39,6 +41,14 @@ public class DatanodeRegistration extends DatanodeID private ExportedBlockKeys exportedKeys; private final String softwareVersion; + @VisibleForTesting + public DatanodeRegistration(String uuid, DatanodeRegistration dnr) { + this(new DatanodeID(uuid, dnr), + dnr.getStorageInfo(), + dnr.getExportedKeys(), + dnr.getSoftwareVersion()); + } + public DatanodeRegistration(DatanodeID dn, StorageInfo info, ExportedBlockKeys keys, String softwareVersion) { super(dn); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index f9c2bdce22a..6d67c7d6399 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -538,10 +538,6 @@ public class TestBlockManager { public void testSafeModeIBR() throws Exception { DatanodeDescriptor node = spy(nodes.get(0)); DatanodeStorageInfo ds = node.getStorageInfos()[0]; - - // TODO: Needs to be fixed. DatanodeUuid is not storageID. - node.setDatanodeUuidForTesting(ds.getStorageID()); - node.isAlive = true; DatanodeRegistration nodeReg = @@ -587,9 +583,6 @@ public class TestBlockManager { DatanodeDescriptor node = spy(nodes.get(0)); DatanodeStorageInfo ds = node.getStorageInfos()[0]; - // TODO: Needs to be fixed. DatanodeUuid is not storageID. - node.setDatanodeUuidForTesting(ds.getStorageID()); - node.isAlive = true; DatanodeRegistration nodeReg = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java index fecca4e915c..5b08f5302f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java @@ -38,7 +38,6 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.util.VersionInfo; import org.junit.After; -import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.internal.util.reflection.Whitebox; @@ -119,10 +118,17 @@ public class TestComputeInvalidateWork { public void testDatanodeReformat() throws Exception { namesystem.writeLock(); try { + // Change the datanode UUID to emulate a reformat + String poolId = cluster.getNamesystem().getBlockPoolId(); + DatanodeRegistration dnr = cluster.getDataNode(nodes[0].getIpcPort()) + .getDNRegistrationForBP(poolId); + dnr = new DatanodeRegistration(UUID.randomUUID().toString(), dnr); + cluster.stopDataNode(nodes[0].getXferAddr()); + Block block = new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP); bm.addToInvalidates(block, nodes[0]); - // Change the datanode UUID to emulate a reformation - nodes[0].setDatanodeUuidForTesting("fortesting"); + bm.getDatanodeManager().registerDatanode(dnr); + // Since UUID has changed, the invalidation work should be skipped assertEquals(0, bm.computeInvalidateWork(1)); assertEquals(0, bm.getPendingDeletionBlocksCount()); @@ -158,8 +164,8 @@ public class TestComputeInvalidateWork { // Re-register each DN and see that it wipes the invalidation work for (DataNode dn : cluster.getDataNodes()) { DatanodeID did = dn.getDatanodeId(); - did.setDatanodeUuidForTesting(UUID.randomUUID().toString()); - DatanodeRegistration reg = new DatanodeRegistration(did, + DatanodeRegistration reg = new DatanodeRegistration( + new DatanodeID(UUID.randomUUID().toString(), did), new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE), new ExportedBlockKeys(), VersionInfo.getVersion()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java index da858cd662c..ac7ebc05eba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java @@ -173,7 +173,8 @@ public class TestDatanodeProtocolRetryPolicy { } else { DatanodeRegistration dr = (DatanodeRegistration) invocation.getArguments()[0]; - datanodeRegistration.setDatanodeUuidForTesting(dr.getDatanodeUuid()); + datanodeRegistration = + new DatanodeRegistration(dr.getDatanodeUuid(), dr); LOG.info("mockito succeeded " + datanodeRegistration); return datanodeRegistration; }