diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 1e6d88249e2..78371f5b0f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -26,7 +26,7 @@ import com.google.protobuf.ByteString; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; -import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceStateProto; +import org.apache.hadoop.ha.proto.HAServiceProtocolProtos; import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeID; @@ -338,8 +338,7 @@ public class PBHelper { StorageInfoProto storage = info.getStorageInfo(); return new NamespaceInfo(storage.getNamespceID(), storage.getClusterID(), info.getBlockPoolID(), storage.getCTime(), info.getBuildVersion(), - info.getSoftwareVersion(), info.getCapabilities(), - convert(info.getState())); + info.getSoftwareVersion(), info.getCapabilities()); } public static NamenodeCommand convert(NamenodeCommandProto cmd) { @@ -745,68 +744,43 @@ public class PBHelper { } public static NamespaceInfoProto convert(NamespaceInfo info) { - NamespaceInfoProto.Builder builder = NamespaceInfoProto.newBuilder(); - builder.setBlockPoolID(info.getBlockPoolID()) + return NamespaceInfoProto.newBuilder() + .setBlockPoolID(info.getBlockPoolID()) .setBuildVersion(info.getBuildVersion()) .setUnused(0) .setStorageInfo(PBHelper.convert((StorageInfo)info)) .setSoftwareVersion(info.getSoftwareVersion()) - .setCapabilities(info.getCapabilities()); - HAServiceState state = info.getState(); - if(state != null) { - builder.setState(convert(info.getState())); - } - return builder.build(); - } - - public static HAServiceState convert(HAServiceStateProto s) { - if (s == null) { - return null; - } - switch (s) { - case INITIALIZING: - return HAServiceState.INITIALIZING; - case ACTIVE: - return HAServiceState.ACTIVE; - case STANDBY: - return HAServiceState.STANDBY; - default: - throw new IllegalArgumentException("Unexpected HAServiceStateProto:" - + s); - } - } - - public static HAServiceStateProto convert(HAServiceState s) { - if (s == null) { - return null; - } - switch (s) { - case INITIALIZING: - return HAServiceStateProto.INITIALIZING; - case ACTIVE: - return HAServiceStateProto.ACTIVE; - case STANDBY: - return HAServiceStateProto.STANDBY; - default: - throw new IllegalArgumentException("Unexpected HAServiceState:" - + s); - } + .setCapabilities(info.getCapabilities()) + .build(); } public static NNHAStatusHeartbeat convert(NNHAStatusHeartbeatProto s) { - if (s == null) { - return null; + if (s == null) return null; + switch (s.getState()) { + case ACTIVE: + return new NNHAStatusHeartbeat(HAServiceState.ACTIVE, s.getTxid()); + case STANDBY: + return new NNHAStatusHeartbeat(HAServiceState.STANDBY, s.getTxid()); + default: + throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" + s.getState()); } - return new NNHAStatusHeartbeat(convert(s.getState()), s.getTxid()); } public static NNHAStatusHeartbeatProto convert(NNHAStatusHeartbeat hb) { - if (hb == null) { - return null; - } + if (hb == null) return null; NNHAStatusHeartbeatProto.Builder builder = - NNHAStatusHeartbeatProto.newBuilder(); - builder.setState(convert(hb.getState())); + NNHAStatusHeartbeatProto.newBuilder(); + switch (hb.getState()) { + case ACTIVE: + builder.setState(HAServiceProtocolProtos.HAServiceStateProto.ACTIVE); + break; + case STANDBY: + builder.setState(HAServiceProtocolProtos.HAServiceStateProto.STANDBY); + break; + default: + throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" + + hb.getState()); + } builder.setTxid(hb.getTxId()); return builder.build(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java index 00e6b3ecb01..00102eb56d9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java @@ -307,16 +307,8 @@ class BPOfferService { * verifies that this namespace matches (eg to prevent a misconfiguration * where a StandbyNode from a different cluster is specified) */ - void verifyAndSetNamespaceInfo(BPServiceActor actor, NamespaceInfo nsInfo) - throws IOException { + void verifyAndSetNamespaceInfo(NamespaceInfo nsInfo) throws IOException { writeLock(); - - if(nsInfo.getState() == HAServiceState.ACTIVE - && bpServiceToActive == null) { - LOG.info("Acknowledging ACTIVE Namenode during handshake" + actor); - bpServiceToActive = actor; - } - try { if (this.bpNSInfo == null) { this.bpNSInfo = nsInfo; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index dffe14f4575..f3247fca27d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -269,11 +269,11 @@ class BPServiceActor implements Runnable { // First phase of the handshake with NN - get the namespace // info. NamespaceInfo nsInfo = retrieveNamespaceInfo(); - + // Verify that this matches the other NN in this HA pair. // This also initializes our block pool in the DN if we are // the first NN connection for this BP. - bpos.verifyAndSetNamespaceInfo(this, nsInfo); + bpos.verifyAndSetNamespaceInfo(nsInfo); // Second phase of the handshake with the NN. register(nsInfo); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 90fb924ee91..8a750a06ce0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1594,7 +1594,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, NamespaceInfo unprotectedGetNamespaceInfo() { return new NamespaceInfo(getFSImage().getStorage().getNamespaceID(), getClusterId(), getBlockPoolId(), - getFSImage().getStorage().getCTime(), getState()); + getFSImage().getStorage().getCTime()); } /** @@ -4531,16 +4531,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, return 0; } } - + @Metric public int getBlockCapacity() { return blockManager.getCapacity(); } - public HAServiceState getState() { - return haContext == null ? null : haContext.getState().getServiceState(); - } - @Override // FSNamesystemMBean public String getFSState() { return isInSafeMode() ? "safeMode" : "Operational"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java index 66ce9ee52e4..90d0aacc4fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java @@ -22,7 +22,6 @@ import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; @@ -45,7 +44,6 @@ public class NamespaceInfo extends StorageInfo { String blockPoolID = ""; // id of the block pool String softwareVersion; long capabilities; - HAServiceState state; // only authoritative on the server-side to determine advertisement to // clients. enum will update the supported values @@ -90,14 +88,6 @@ public class NamespaceInfo extends StorageInfo { CAPABILITIES_SUPPORTED); } - public NamespaceInfo(int nsID, String clusterID, String bpID, - long cT, String buildVersion, String softwareVersion, - long capabilities, HAServiceState st) { - this(nsID, clusterID, bpID, cT, buildVersion, softwareVersion, - capabilities); - this.state = st; - } - // for use by server and/or client public NamespaceInfo(int nsID, String clusterID, String bpID, long cT, String buildVersion, String softwareVersion, @@ -115,13 +105,6 @@ public class NamespaceInfo extends StorageInfo { this(nsID, clusterID, bpID, cT, Storage.getBuildVersion(), VersionInfo.getVersion()); } - - public NamespaceInfo(int nsID, String clusterID, String bpID, - long cT, HAServiceState st) { - this(nsID, clusterID, bpID, cT, Storage.getBuildVersion(), - VersionInfo.getVersion()); - this.state = st; - } public long getCapabilities() { return capabilities; @@ -132,11 +115,6 @@ public class NamespaceInfo extends StorageInfo { this.capabilities = capabilities; } - @VisibleForTesting - public void setState(HAServiceState state) { - this.state = state; - } - public boolean isCapabilitySupported(Capability capability) { Preconditions.checkArgument(capability != Capability.UNKNOWN, "cannot test for unknown capability"); @@ -156,10 +134,6 @@ public class NamespaceInfo extends StorageInfo { return softwareVersion; } - public HAServiceState getState() { - return state; - } - @Override public String toString(){ return super.toString() + ";bpid=" + blockPoolID; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto index d7deebf1670..910e03bef57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto @@ -32,7 +32,6 @@ option java_generate_equals_and_hash = true; package hadoop.hdfs; import "hdfs.proto"; -import "HAServiceProtocol.proto"; /** * Block access token information @@ -102,7 +101,6 @@ message NamespaceInfoProto { required StorageInfoProto storageInfo = 4;// Node information required string softwareVersion = 5; // Software version number (e.g. 2.0.0) optional uint64 capabilities = 6 [default = 0]; // feature flags - optional hadoop.common.HAServiceStateProto state = 7; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java index f8f0a3c460c..2d50c757633 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.datanode; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertSame; import static org.junit.Assert.assertTrue; @@ -800,34 +799,4 @@ public class TestBPOfferService { } return -1; } - - /* - * - */ - @Test - public void testNNHAStateUpdateFromVersionRequest() throws Exception { - final BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2); - BPServiceActor actor = bpos.getBPServiceActors().get(0); - bpos.start(); - waitForInitialization(bpos); - // Should start with neither NN as active. - assertNull(bpos.getActiveNN()); - - // getNamespaceInfo() will not include HAServiceState - NamespaceInfo nsInfo = mockNN1.versionRequest(); - bpos.verifyAndSetNamespaceInfo(actor, nsInfo); - - assertNull(bpos.getActiveNN()); - - // Change mock so getNamespaceInfo() will include HAServiceState - Mockito.doReturn(new NamespaceInfo(1, FAKE_CLUSTERID, FAKE_BPID, 0, - HAServiceState.ACTIVE)).when(mockNN1).versionRequest(); - - // Update the bpos NamespaceInfo - nsInfo = mockNN1.versionRequest(); - bpos.verifyAndSetNamespaceInfo(actor, nsInfo); - - assertNotNull(bpos.getActiveNN()); - - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java index 6a0dd6fd9db..f02c679f388 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java @@ -33,7 +33,6 @@ import java.util.Collection; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -44,7 +43,6 @@ import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; import org.apache.hadoop.hdfs.server.namenode.ha.HAState; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.top.TopAuditLogger; -import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.junit.After; import org.junit.Test; import org.mockito.Mockito; @@ -156,25 +154,6 @@ public class TestFSNamesystem { + "safemode 2nd time", bm.isPopulatingReplQueues()); } - @Test - public void testHAStateInNamespaceInfo() throws IOException { - Configuration conf = new Configuration(); - - FSEditLog fsEditLog = Mockito.mock(FSEditLog.class); - FSImage fsImage = Mockito.mock(FSImage.class); - Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog); - NNStorage nnStorage = Mockito.mock(NNStorage.class); - Mockito.when(fsImage.getStorage()).thenReturn(nnStorage); - - FSNamesystem fsNamesystem = new FSNamesystem(conf, fsImage); - FSNamesystem fsn = Mockito.spy(fsNamesystem); - Mockito.when(fsn.getState()).thenReturn( - HAServiceProtocol.HAServiceState.ACTIVE); - - NamespaceInfo nsInfo = fsn.unprotectedGetNamespaceInfo(); - assertNotNull(nsInfo.getState()); - } - @Test public void testReset() throws Exception { Configuration conf = new Configuration();