HDFS-4268. Remove redundant enum NNHAStatusHeartbeat.State. Contributed by Konstantin Shvachko.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1417753 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Konstantin Shvachko 2012-12-06 07:23:41 +00:00
parent 31e97a7757
commit f87b3c68bf
8 changed files with 38 additions and 39 deletions

View File

@ -143,6 +143,8 @@ Release 2.0.3-alpha - Unreleased
HDFS-3935. Add JournalNode to the start/stop scripts (Andy Isaacson via todd) HDFS-3935. Add JournalNode to the start/stop scripts (Andy Isaacson via todd)
HDFS-4268. Remove redundant enum NNHAStatusHeartbeat.State. (shv)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES

View File

@ -26,6 +26,7 @@ import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
@ -1232,9 +1233,9 @@ public class PBHelper {
if (s == null) return null; if (s == null) return null;
switch (s.getState()) { switch (s.getState()) {
case ACTIVE: case ACTIVE:
return new NNHAStatusHeartbeat(NNHAStatusHeartbeat.State.ACTIVE, s.getTxid()); return new NNHAStatusHeartbeat(HAServiceState.ACTIVE, s.getTxid());
case STANDBY: case STANDBY:
return new NNHAStatusHeartbeat(NNHAStatusHeartbeat.State.STANDBY, s.getTxid()); return new NNHAStatusHeartbeat(HAServiceState.STANDBY, s.getTxid());
default: default:
throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" + s.getState()); throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" + s.getState());
} }

View File

@ -26,6 +26,7 @@ import java.util.concurrent.CopyOnWriteArrayList;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@ -412,7 +413,7 @@ class BPOfferService {
final long txid = nnHaState.getTxId(); final long txid = nnHaState.getTxId();
final boolean nnClaimsActive = final boolean nnClaimsActive =
nnHaState.getState() == NNHAStatusHeartbeat.State.ACTIVE; nnHaState.getState() == HAServiceState.ACTIVE;
final boolean bposThinksActive = bpServiceToActive == actor; final boolean bposThinksActive = bpServiceToActive == actor;
final boolean isMoreRecentClaim = txid > lastActiveClaimTxId; final boolean isMoreRecentClaim = txid > lastActiveClaimTxId;

View File

@ -2,6 +2,7 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.ServiceFailedException; import org.apache.hadoop.ha.ServiceFailedException;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
@ -9,6 +10,7 @@ import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
import org.apache.hadoop.hdfs.server.namenode.ha.HAState; import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.ipc.StandbyException;
@InterfaceAudience.Private
public class BackupState extends HAState { public class BackupState extends HAState {
public BackupState() { public BackupState() {
@ -26,7 +28,7 @@ public class BackupState extends HAState {
return false; return false;
} }
@Override @Override // HAState
public void enterState(HAContext context) throws ServiceFailedException { public void enterState(HAContext context) throws ServiceFailedException {
try { try {
context.startActiveServices(); context.startActiveServices();
@ -35,7 +37,7 @@ public class BackupState extends HAState {
} }
} }
@Override @Override // HAState
public void exitState(HAContext context) throws ServiceFailedException { public void exitState(HAContext context) throws ServiceFailedException {
try { try {
context.stopActiveServices(); context.stopActiveServices();
@ -44,7 +46,7 @@ public class BackupState extends HAState {
} }
} }
@Override @Override // HAState
public void prepareToExitState(HAContext context) throws ServiceFailedException { public void prepareToExitState(HAContext context) throws ServiceFailedException {
context.prepareToStopStandbyServices(); context.prepareToStopStandbyServices();
} }

View File

@ -166,7 +166,6 @@ import org.apache.hadoop.hdfs.server.namenode.ha.EditLogTailer;
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
import org.apache.hadoop.hdfs.server.namenode.ha.HAState; import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
import org.apache.hadoop.hdfs.server.namenode.ha.StandbyCheckpointer; import org.apache.hadoop.hdfs.server.namenode.ha.StandbyCheckpointer;
import org.apache.hadoop.hdfs.server.namenode.ha.StandbyState;
import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean; import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
@ -986,8 +985,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
// start in active. // start in active.
return haEnabled; return haEnabled;
} }
return haContext.getState() instanceof StandbyState; return HAServiceState.STANDBY == haContext.getState().getServiceState();
} }
/** /**
@ -3412,15 +3411,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
private NNHAStatusHeartbeat createHaStatusHeartbeat() { private NNHAStatusHeartbeat createHaStatusHeartbeat() {
HAState state = haContext.getState(); HAState state = haContext.getState();
NNHAStatusHeartbeat.State hbState; return new NNHAStatusHeartbeat(state.getServiceState(),
if (state.getServiceState() == HAServiceState.ACTIVE) {
hbState = NNHAStatusHeartbeat.State.ACTIVE;
} else if (state.getServiceState() == HAServiceState.STANDBY) {
hbState = NNHAStatusHeartbeat.State.STANDBY;
} else {
throw new AssertionError("Invalid state: " + state.getClass());
}
return new NNHAStatusHeartbeat(hbState,
getFSImage().getLastAppliedOrWrittenTxId()); getFSImage().getLastAppliedOrWrittenTxId());
} }
@ -3849,7 +3840,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
private synchronized void leave() { private synchronized void leave() {
// if not done yet, initialize replication queues. // if not done yet, initialize replication queues.
// In the standby, do not populate repl queues // In the standby, do not populate repl queues
if (!isPopulatingReplQueues() && !isInStandbyState()) { if (!isPopulatingReplQueues() && shouldPopulateReplQueues()) {
initializeReplQueues(); initializeReplQueues();
} }
long timeInSafemode = now() - startTime; long timeInSafemode = now() - startTime;
@ -3892,7 +3883,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* initializing replication queues. * initializing replication queues.
*/ */
private synchronized boolean canInitializeReplQueues() { private synchronized boolean canInitializeReplQueues() {
return !isInStandbyState() && blockSafe >= blockReplQueueThreshold; return shouldPopulateReplQueues()
&& blockSafe >= blockReplQueueThreshold;
} }
/** /**
@ -4232,7 +4224,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
@Override @Override
public boolean isPopulatingReplQueues() { public boolean isPopulatingReplQueues() {
if (isInStandbyState()) { if (!shouldPopulateReplQueues()) {
return false; return false;
} }
// safeMode is volatile, and may be set to null at any time // safeMode is volatile, and may be set to null at any time
@ -4241,7 +4233,13 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
return true; return true;
return safeMode.isPopulatingReplQueues(); return safeMode.isPopulatingReplQueues();
} }
private boolean shouldPopulateReplQueues() {
if(haContext == null || haContext.getState() == null)
return false;
return haContext.getState().shouldPopulateReplQueues();
}
@Override @Override
public void incrementSafeBlockCount(int replication) { public void incrementSafeBlockCount(int replication) {
// safeMode is volatile, and may be set to null at any time // safeMode is volatile, and may be set to null at any time

View File

@ -19,31 +19,26 @@ package org.apache.hadoop.hdfs.server.protocol;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@InterfaceAudience.Private @InterfaceAudience.Private
@InterfaceStability.Evolving @InterfaceStability.Evolving
public class NNHAStatusHeartbeat { public class NNHAStatusHeartbeat {
private State state; private HAServiceState state;
private long txid = HdfsConstants.INVALID_TXID; private long txid = HdfsConstants.INVALID_TXID;
public NNHAStatusHeartbeat(State state, long txid) { public NNHAStatusHeartbeat(HAServiceState state, long txid) {
this.state = state; this.state = state;
this.txid = txid; this.txid = txid;
} }
public State getState() { public HAServiceState getState() {
return state; return state;
} }
public long getTxId() { public long getTxId() {
return txid; return txid;
} }
@InterfaceAudience.Private
public enum State {
ACTIVE,
STANDBY;
}
} }

View File

@ -29,6 +29,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@ -41,7 +42,6 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat; import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat.State;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
@ -123,7 +123,7 @@ public class TestBPOfferService {
Mockito.anyInt(), Mockito.anyInt(),
Mockito.anyInt(), Mockito.anyInt(),
Mockito.anyInt()); Mockito.anyInt());
mockHaStatuses[nnIdx] = new NNHAStatusHeartbeat(State.STANDBY, 0); mockHaStatuses[nnIdx] = new NNHAStatusHeartbeat(HAServiceState.STANDBY, 0);
return mock; return mock;
} }
@ -255,12 +255,12 @@ public class TestBPOfferService {
assertNull(bpos.getActiveNN()); assertNull(bpos.getActiveNN());
// Have NN1 claim active at txid 1 // Have NN1 claim active at txid 1
mockHaStatuses[0] = new NNHAStatusHeartbeat(State.ACTIVE, 1); mockHaStatuses[0] = new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1);
bpos.triggerHeartbeatForTests(); bpos.triggerHeartbeatForTests();
assertSame(mockNN1, bpos.getActiveNN()); assertSame(mockNN1, bpos.getActiveNN());
// NN2 claims active at a higher txid // NN2 claims active at a higher txid
mockHaStatuses[1] = new NNHAStatusHeartbeat(State.ACTIVE, 2); mockHaStatuses[1] = new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 2);
bpos.triggerHeartbeatForTests(); bpos.triggerHeartbeatForTests();
assertSame(mockNN2, bpos.getActiveNN()); assertSame(mockNN2, bpos.getActiveNN());
@ -272,12 +272,12 @@ public class TestBPOfferService {
// Even if NN2 goes to standby, DN shouldn't reset to talking to NN1, // Even if NN2 goes to standby, DN shouldn't reset to talking to NN1,
// because NN1's txid is lower than the last active txid. Instead, // because NN1's txid is lower than the last active txid. Instead,
// it should consider neither active. // it should consider neither active.
mockHaStatuses[1] = new NNHAStatusHeartbeat(State.STANDBY, 2); mockHaStatuses[1] = new NNHAStatusHeartbeat(HAServiceState.STANDBY, 2);
bpos.triggerHeartbeatForTests(); bpos.triggerHeartbeatForTests();
assertNull(bpos.getActiveNN()); assertNull(bpos.getActiveNN());
// Now if NN1 goes back to a higher txid, it should be considered active // Now if NN1 goes back to a higher txid, it should be considered active
mockHaStatuses[0] = new NNHAStatusHeartbeat(State.ACTIVE, 3); mockHaStatuses[0] = new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 3);
bpos.triggerHeartbeatForTests(); bpos.triggerHeartbeatForTests();
assertSame(mockNN1, bpos.getActiveNN()); assertSame(mockNN1, bpos.getActiveNN());

View File

@ -49,6 +49,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
@ -72,7 +73,6 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat; import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat.State;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.StorageReport;
@ -157,7 +157,7 @@ public class TestBlockRecovery {
Mockito.anyInt())) Mockito.anyInt()))
.thenReturn(new HeartbeatResponse( .thenReturn(new HeartbeatResponse(
new DatanodeCommand[0], new DatanodeCommand[0],
new NNHAStatusHeartbeat(State.ACTIVE, 1))); new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1)));
dn = new DataNode(conf, dirs, null) { dn = new DataNode(conf, dirs, null) {
@Override @Override