svn merge -c 1229877 from trunk for HDFS-2739.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23-PB@1231837 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-01-16 04:54:16 +00:00
parent 7c39f9b4f1
commit de5dd8ad89
6 changed files with 15 additions and 53 deletions

View File

@ -102,6 +102,8 @@ Release 0.23-PB - Unreleased
HDFS-2700. Fix failing TestDataNodeMultipleRegistrations in trunk HDFS-2700. Fix failing TestDataNodeMultipleRegistrations in trunk
(Uma Maheswara Rao G via todd) (Uma Maheswara Rao G via todd)
HDFS-2739. SecondaryNameNode doesn't start up. (jitendra)
Release 0.23.1 - UNRELEASED Release 0.23.1 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -97,7 +97,7 @@ public class NamenodeProtocolServerSideTranslatorPB implements
} }
@Override @Override
public GetTransactionIdResponseProto getTransationId(RpcController unused, public GetTransactionIdResponseProto getTransactionId(RpcController unused,
GetTransactionIdRequestProto request) throws ServiceException { GetTransactionIdRequestProto request) throws ServiceException {
long txid; long txid;
try { try {

View File

@ -92,46 +92,13 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
final private NamenodeProtocolPB rpcProxy; final private NamenodeProtocolPB rpcProxy;
private static NamenodeProtocolPB createNamenode(
InetSocketAddress nameNodeAddr, Configuration conf,
UserGroupInformation ugi) throws IOException {
RPC.setProtocolEngine(conf, NamenodeProtocolPB.class,
ProtobufRpcEngine.class);
return RPC.getProxy(NamenodeProtocolPB.class,
RPC.getProtocolVersion(NamenodeProtocolPB.class), nameNodeAddr, ugi,
conf, NetUtils.getSocketFactory(conf, NamenodeProtocolPB.class));
}
/** Create a {@link NameNode} proxy */
static NamenodeProtocolPB createNamenodeWithRetry(
NamenodeProtocolPB rpcNamenode) {
RetryPolicy createPolicy = RetryPolicies
.retryUpToMaximumCountWithFixedSleep(5,
HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
Map<Class<? extends Exception>, RetryPolicy> remoteExceptionToPolicyMap =
new HashMap<Class<? extends Exception>, RetryPolicy>();
remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
createPolicy);
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap =
new HashMap<Class<? extends Exception>, RetryPolicy>();
exceptionToPolicyMap.put(RemoteException.class, RetryPolicies
.retryByRemoteException(RetryPolicies.TRY_ONCE_THEN_FAIL,
remoteExceptionToPolicyMap));
RetryPolicy methodPolicy = RetryPolicies.retryByException(
RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
Map<String, RetryPolicy> methodNameToPolicyMap =
new HashMap<String, RetryPolicy>();
methodNameToPolicyMap.put("create", methodPolicy);
return (NamenodeProtocolPB) RetryProxy.create(NamenodeProtocolPB.class,
rpcNamenode, methodNameToPolicyMap);
}
public NamenodeProtocolTranslatorPB(InetSocketAddress nameNodeAddr, public NamenodeProtocolTranslatorPB(InetSocketAddress nameNodeAddr,
Configuration conf, UserGroupInformation ugi) throws IOException { Configuration conf, UserGroupInformation ugi) throws IOException {
rpcProxy = createNamenodeWithRetry(createNamenode(nameNodeAddr, conf, ugi)); RPC.setProtocolEngine(conf, NamenodeProtocolPB.class,
ProtobufRpcEngine.class);
rpcProxy = RPC.getProxy(NamenodeProtocolPB.class,
RPC.getProtocolVersion(NamenodeProtocolPB.class), nameNodeAddr, ugi,
conf, NetUtils.getSocketFactory(conf, NamenodeProtocolPB.class));
} }
public NamenodeProtocolTranslatorPB(NamenodeProtocolPB rpcProxy) { public NamenodeProtocolTranslatorPB(NamenodeProtocolPB rpcProxy) {
@ -182,7 +149,7 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
@Override @Override
public long getTransactionID() throws IOException { public long getTransactionID() throws IOException {
try { try {
return rpcProxy.getTransationId(NULL_CONTROLLER, GET_TRANSACTIONID) return rpcProxy.getTransactionId(NULL_CONTROLLER, GET_TRANSACTIONID)
.getTxId(); .getTxId();
} catch (ServiceException e) { } catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e); throw ProtobufHelper.getRemoteException(e);

View File

@ -31,7 +31,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalProtocolService; import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalProtocolService;
import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB; import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.JournalProtocolServerSideTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.JournalProtocolServerSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage;
@ -43,6 +42,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import com.google.protobuf.BlockingService; import com.google.protobuf.BlockingService;
@ -294,11 +294,8 @@ public class BackupNode extends NameNode {
private NamespaceInfo handshake(Configuration conf) throws IOException { private NamespaceInfo handshake(Configuration conf) throws IOException {
// connect to name node // connect to name node
InetSocketAddress nnAddress = NameNode.getServiceAddress(conf, true); InetSocketAddress nnAddress = NameNode.getServiceAddress(conf, true);
NamenodeProtocolPB proxy = this.namenode = new NamenodeProtocolTranslatorPB(nnAddress, conf,
RPC.waitForProxy(NamenodeProtocolPB.class, UserGroupInformation.getCurrentUser());
RPC.getProtocolVersion(NamenodeProtocolPB.class),
nnAddress, conf);
this.namenode = new NamenodeProtocolTranslatorPB(proxy);
this.nnRpcAddress = getHostPortString(nnAddress); this.nnRpcAddress = getHostPortString(nnAddress);
this.nnHttpAddress = getHostPortString(super.getHttpServerAddress(conf)); this.nnHttpAddress = getHostPortString(super.getHttpServerAddress(conf));
// get version and id info from the name-node // get version and id info from the name-node

View File

@ -48,7 +48,6 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator; import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
@ -60,7 +59,6 @@ import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.http.HttpServer; import org.apache.hadoop.http.HttpServer;
import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.source.JvmMetrics; import org.apache.hadoop.metrics2.source.JvmMetrics;
@ -219,10 +217,8 @@ public class SecondaryNameNode implements Runnable {
nameNodeAddr = NameNode.getServiceAddress(conf, true); nameNodeAddr = NameNode.getServiceAddress(conf, true);
this.conf = conf; this.conf = conf;
NamenodeProtocolPB proxy = this.namenode = new NamenodeProtocolTranslatorPB(nameNodeAddr, conf,
RPC.waitForProxy(NamenodeProtocolPB.class, UserGroupInformation.getCurrentUser());
RPC.getProtocolVersion(NamenodeProtocolPB.class), nameNodeAddr, conf);
this.namenode = new NamenodeProtocolTranslatorPB(proxy);
// initialize checkpoint directories // initialize checkpoint directories
fsName = getInfoServer(); fsName = getInfoServer();

View File

@ -185,7 +185,7 @@ service NamenodeProtocolService {
/** /**
* Get the transaction ID of the most recently persisted editlog record * Get the transaction ID of the most recently persisted editlog record
*/ */
rpc getTransationId(GetTransactionIdRequestProto) rpc getTransactionId(GetTransactionIdRequestProto)
returns(GetTransactionIdResponseProto); returns(GetTransactionIdResponseProto);
/** /**