Merge trunk into HA branch.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1210547 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Aaron Myers 2011-12-05 18:00:44 +00:00
commit 8a4db4d331
90 changed files with 4229 additions and 629 deletions

View File

@ -5,6 +5,11 @@ Trunk (unreleased changes)
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
NEW FEATURES NEW FEATURES
HADOOP-7773. Add support for protocol buffer based RPC engine.
(suresh)
HADOOP-7875. Add helper class to unwrap protobuf ServiceException.
(suresh)
IMPROVEMENTS IMPROVEMENTS
@ -61,6 +66,12 @@ Trunk (unreleased changes)
HADOOP-7590. Mavenize streaming and MR examples. (tucu) HADOOP-7590. Mavenize streaming and MR examples. (tucu)
HADOOP-7862. Move the support for multiple protocols to lower layer so
that Writable, PB and Avro can all use it (Sanjay)
HADOOP-7876. Provided access to encoded key in DelegationKey for
use in protobuf based RPCs. (suresh)
BUGS BUGS
HADOOP-7606. Upgrade Jackson to version 1.7.1 to match the version required HADOOP-7606. Upgrade Jackson to version 1.7.1 to match the version required
@ -102,13 +113,12 @@ Trunk (unreleased changes)
HDFS-2614. hadoop dist tarball is missing hdfs headers. (tucu) HDFS-2614. hadoop dist tarball is missing hdfs headers. (tucu)
HADOOP-7874. native libs should be under lib/native/ dir. (tucu)
OPTIMIZATIONS OPTIMIZATIONS
HADOOP-7761. Improve the performance of raw comparisons. (todd) HADOOP-7761. Improve the performance of raw comparisons. (todd)
HADOOP-7773. Add support for protocol buffer based RPC engine.
(suresh)
Release 0.23.1 - Unreleased Release 0.23.1 - Unreleased
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
@ -134,6 +144,9 @@ Release 0.23.1 - Unreleased
HADOOP-7804. Enable hadoop config generator to set configurations to enable HADOOP-7804. Enable hadoop config generator to set configurations to enable
short circuit read. (Arpit Gupta via jitendra) short circuit read. (Arpit Gupta via jitendra)
HADOOP-7877. Update balancer CLI usage documentation to include the new
-policy option. (szetszwo)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES

View File

@ -186,29 +186,16 @@ fi
# setup 'java.library.path' for native-hadoop code if necessary # setup 'java.library.path' for native-hadoop code if necessary
if [ -d "${HADOOP_PREFIX}/build/native" -o -d "${HADOOP_PREFIX}/lib/native" ]; then if [ -d "${HADOOP_PREFIX}/build/native" -o -d "${HADOOP_PREFIX}/lib/native" ]; then
JAVA_PLATFORM=`CLASSPATH=${CLASSPATH} ${JAVA} -Xmx32m ${HADOOP_JAVA_PLATFORM_OPTS} org.apache.hadoop.util.PlatformName | sed -e "s/ /_/g"`
if [ -d "$HADOOP_PREFIX/build/native" ]; then
if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:${HADOOP_PREFIX}/build/native/${JAVA_PLATFORM}/lib
else
JAVA_LIBRARY_PATH=${HADOOP_PREFIX}/build/native/${JAVA_PLATFORM}/lib
fi
fi
if [ -d "${HADOOP_PREFIX}/lib/native" ]; then if [ -d "${HADOOP_PREFIX}/lib/native" ]; then
if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:${HADOOP_PREFIX}/lib/native/${JAVA_PLATFORM} JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:${HADOOP_PREFIX}/lib/native
else else
JAVA_LIBRARY_PATH=${HADOOP_PREFIX}/lib/native/${JAVA_PLATFORM} JAVA_LIBRARY_PATH=${HADOOP_PREFIX}/lib/native
fi fi
fi fi
fi fi
if [ -e "${HADOOP_PREFIX}/lib/libhadoop.a" ]; then
JAVA_LIBRARY_PATH=${HADOOP_PREFIX}/lib
fi
# cygwin path translation # cygwin path translation
if $cygwin; then if $cygwin; then
JAVA_LIBRARY_PATH=`cygpath -p "$JAVA_LIBRARY_PATH"` JAVA_LIBRARY_PATH=`cygpath -p "$JAVA_LIBRARY_PATH"`

View File

@ -445,14 +445,22 @@
<a href="http://hadoop.apache.org/hdfs/docs/current/hdfs_user_guide.html#Rebalancer">Rebalancer</a>. <a href="http://hadoop.apache.org/hdfs/docs/current/hdfs_user_guide.html#Rebalancer">Rebalancer</a>.
</p> </p>
<p> <p>
<code>Usage: hadoop balancer [-threshold &lt;threshold&gt;]</code> <code>Usage: hadoop balancer [-policy &lt;blockpool|datanode&gt;] [-threshold &lt;threshold&gt;]</code>
</p> </p>
<table> <table>
<tr><th> COMMAND_OPTION </th><th> Description </th></tr> <tr><th> COMMAND_OPTION </th><th> Description </th></tr>
<tr>
<td><code>-policy &lt;blockpool|datanode&gt;</code></td>
<td>The balancing policy.
<br /><code>datanode</code>: Cluster is balance if the disk usage of each datanode is balance.
<br /><code>blockpool</code>: Cluster is balance if the disk usage of each block pool in each datanode is balance.
<br />Note that <code>blockpool</code> is a condition stronger than <code>datanode</code>.
The default policy is <code>datanode</code>.
</td>
</tr>
<tr> <tr>
<td><code>-threshold &lt;threshold&gt;</code></td> <td><code>-threshold &lt;threshold&gt;</code></td>
<td>Percentage of disk capacity. This overwrites the default threshold.</td> <td>Percentage of disk capacity. This default threshold is 10%.</td>
</tr> </tr>
</table> </table>
</section> </section>

View File

@ -44,6 +44,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.Writable;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenIdentifier;
@ -237,14 +238,15 @@ public class AvroRpcEngine implements RpcEngine {
super((Class)null, new Object(), conf, super((Class)null, new Object(), conf,
bindAddress, port, numHandlers, numReaders, bindAddress, port, numHandlers, numReaders,
queueSizePerHandler, verbose, secretManager); queueSizePerHandler, verbose, secretManager);
super.addProtocol(TunnelProtocol.class, responder); // RpcKind is WRITABLE since Avro is tunneled through WRITABLE
super.addProtocol(RpcKind.RPC_WRITABLE, TunnelProtocol.class, responder);
responder.addProtocol(iface, impl); responder.addProtocol(iface, impl);
} }
@Override @Override
public <PROTO, IMPL extends PROTO> Server public Server
addProtocol(Class<PROTO> protocolClass, IMPL protocolImpl) addProtocol(RpcKind rpcKind, Class<?> protocolClass, Object protocolImpl)
throws IOException { throws IOException {
responder.addProtocol(protocolClass, protocolImpl); responder.addProtocol(protocolClass, protocolImpl);
return this; return this;

View File

@ -1002,17 +1002,19 @@ public class Client {
} }
/** /**
* Same as {@link #call(RpcKind, Writable, ConnectionId)} for Writable * Same as {@link #call(RpcPayloadHeader.RpcKind, Writable, ConnectionId)}
* for RPC_BUILTIN
*/ */
public Writable call(Writable param, InetSocketAddress address) public Writable call(Writable param, InetSocketAddress address)
throws InterruptedException, IOException { throws InterruptedException, IOException {
return call(RpcKind.RPC_WRITABLE, param, address); return call(RpcKind.RPC_BUILTIN, param, address);
} }
/** Make a call, passing <code>param</code>, to the IPC server running at /** Make a call, passing <code>param</code>, to the IPC server running at
* <code>address</code>, returning the value. Throws exceptions if there are * <code>address</code>, returning the value. Throws exceptions if there are
* network problems or if the remote code threw an exception. * network problems or if the remote code threw an exception.
* @deprecated Use {@link #call(RpcKind, Writable, ConnectionId)} instead * @deprecated Use {@link #call(RpcPayloadHeader.RpcKind, Writable,
* ConnectionId)} instead
*/ */
@Deprecated @Deprecated
public Writable call(RpcKind rpcKind, Writable param, InetSocketAddress address) public Writable call(RpcKind rpcKind, Writable param, InetSocketAddress address)
@ -1025,7 +1027,8 @@ public class Client {
* the value. * the value.
* Throws exceptions if there are network problems or if the remote code * Throws exceptions if there are network problems or if the remote code
* threw an exception. * threw an exception.
* @deprecated Use {@link #call(RpcKind, Writable, ConnectionId)} instead * @deprecated Use {@link #call(RpcPayloadHeader.RpcKind, Writable,
* ConnectionId)} instead
*/ */
@Deprecated @Deprecated
public Writable call(RpcKind rpcKind, Writable param, InetSocketAddress addr, public Writable call(RpcKind rpcKind, Writable param, InetSocketAddress addr,
@ -1042,7 +1045,8 @@ public class Client {
* timeout, returning the value. * timeout, returning the value.
* Throws exceptions if there are network problems or if the remote code * Throws exceptions if there are network problems or if the remote code
* threw an exception. * threw an exception.
* @deprecated Use {@link #call(RpcKind, Writable, ConnectionId)} instead * @deprecated Use {@link #call(RpcPayloadHeader.RpcKind, Writable,
* ConnectionId)} instead
*/ */
@Deprecated @Deprecated
public Writable call(RpcKind rpcKind, Writable param, InetSocketAddress addr, public Writable call(RpcKind rpcKind, Writable param, InetSocketAddress addr,
@ -1056,7 +1060,7 @@ public class Client {
/** /**
* Same as {@link #call(RpcKind, Writable, InetSocketAddress, * Same as {@link #call(RpcPayloadHeader.RpcKind, Writable, InetSocketAddress,
* Class, UserGroupInformation, int, Configuration)} * Class, UserGroupInformation, int, Configuration)}
* except that rpcKind is writable. * except that rpcKind is writable.
*/ */
@ -1066,7 +1070,7 @@ public class Client {
throws InterruptedException, IOException { throws InterruptedException, IOException {
ConnectionId remoteId = ConnectionId.getConnectionId(addr, protocol, ConnectionId remoteId = ConnectionId.getConnectionId(addr, protocol,
ticket, rpcTimeout, conf); ticket, rpcTimeout, conf);
return call(RpcKind.RPC_WRITABLE, param, remoteId); return call(RpcKind.RPC_BUILTIN, param, remoteId);
} }
/** /**
@ -1087,21 +1091,28 @@ public class Client {
} }
/** /**
* Same as {link {@link #call(RpcKind, Writable, ConnectionId)} * Same as {link {@link #call(RpcPayloadHeader.RpcKind, Writable, ConnectionId)}
* except the rpcKind is RPC_WRITABLE * except the rpcKind is RPC_BUILTIN
*/ */
public Writable call(Writable param, ConnectionId remoteId) public Writable call(Writable param, ConnectionId remoteId)
throws InterruptedException, IOException { throws InterruptedException, IOException {
return call(RpcKind.RPC_WRITABLE, param, remoteId); return call(RpcKind.RPC_BUILTIN, param, remoteId);
} }
/** Make a call, passing <code>param</code>, to the IPC server defined by /**
* <code>remoteId</code>, returning the value. * Make a call, passing <code>rpcRequest</code>, to the IPC server defined by
* <code>remoteId</code>, returning the rpc respond.
*
* @param rpcKind
* @param rpcRequest - contains serialized method and method parameters
* @param remoteId - the target rpc server
* @returns the rpc response
* Throws exceptions if there are network problems or if the remote code * Throws exceptions if there are network problems or if the remote code
* threw an exception. */ * threw an exception.
public Writable call(RpcKind rpcKind, Writable param, ConnectionId remoteId) */
throws InterruptedException, IOException { public Writable call(RpcKind rpcKind, Writable rpcRequest,
Call call = new Call(rpcKind, param); ConnectionId remoteId) throws InterruptedException, IOException {
Call call = new Call(rpcKind, rpcRequest);
Connection connection = getConnection(remoteId, call); Connection connection = getConnection(remoteId, call);
connection.sendParam(call); // send the parameter connection.sendParam(call); // send the parameter
boolean interrupted = false; boolean interrupted = false;

View File

@ -0,0 +1,46 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import com.google.protobuf.ServiceException;
/**
* Helper methods for protobuf related RPC implementation
*/
@InterfaceAudience.Private
public class ProtobufHelper {
private ProtobufHelper() {
// Hidden constructor for class with only static helper methods
}
/**
* Return the RemoteException wrapped in ServiceException as cause.
* @param se ServiceException that wraps RemoteException
* @return RemoteException wrapped in ServiceException or
* a new IOException that wraps unexpected ServiceException.
*/
public static IOException getRemoteException(ServiceException se) {
Throwable e = se.getCause();
return ((e instanceof RemoteException) ? (IOException) e :
new IOException(se));
}
}

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.Writable;
import org.apache.hadoop.ipc.RPC.RpcInvoker;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind; import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
import org.apache.hadoop.ipc.protobuf.HadoopRpcProtos.HadoopRpcExceptionProto; import org.apache.hadoop.ipc.protobuf.HadoopRpcProtos.HadoopRpcExceptionProto;
import org.apache.hadoop.ipc.protobuf.HadoopRpcProtos.HadoopRpcRequestProto; import org.apache.hadoop.ipc.protobuf.HadoopRpcProtos.HadoopRpcRequestProto;
@ -61,6 +62,12 @@ import com.google.protobuf.ServiceException;
public class ProtobufRpcEngine implements RpcEngine { public class ProtobufRpcEngine implements RpcEngine {
private static final Log LOG = LogFactory.getLog(ProtobufRpcEngine.class); private static final Log LOG = LogFactory.getLog(ProtobufRpcEngine.class);
static { // Register the rpcRequest deserializer for WritableRpcEngine
org.apache.hadoop.ipc.Server.registerProtocolEngine(
RpcKind.RPC_PROTOCOL_BUFFER, RpcRequestWritable.class,
new Server.ProtoBufRpcInvoker());
}
private static final ClientCache CLIENTS = new ClientCache(); private static final ClientCache CLIENTS = new ClientCache();
@Override @Override
@ -75,10 +82,13 @@ public class ProtobufRpcEngine implements RpcEngine {
} }
private static class Invoker implements InvocationHandler, Closeable { private static class Invoker implements InvocationHandler, Closeable {
private Map<String, Message> returnTypes = new ConcurrentHashMap<String, Message>(); private final Map<String, Message> returnTypes =
new ConcurrentHashMap<String, Message>();
private boolean isClosed = false; private boolean isClosed = false;
private Client.ConnectionId remoteId; private final Client.ConnectionId remoteId;
private Client client; private final Client client;
private final long clientProtocolVersion;
private final String protocolName;
public Invoker(Class<?> protocol, InetSocketAddress addr, public Invoker(Class<?> protocol, InetSocketAddress addr,
UserGroupInformation ticket, Configuration conf, SocketFactory factory, UserGroupInformation ticket, Configuration conf, SocketFactory factory,
@ -87,6 +97,8 @@ public class ProtobufRpcEngine implements RpcEngine {
ticket, rpcTimeout, conf); ticket, rpcTimeout, conf);
this.client = CLIENTS.getClient(conf, factory, this.client = CLIENTS.getClient(conf, factory,
RpcResponseWritable.class); RpcResponseWritable.class);
this.clientProtocolVersion = RPC.getProtocolVersion(protocol);
this.protocolName = RPC.getProtocolName(protocol);
} }
private HadoopRpcRequestProto constructRpcRequest(Method method, private HadoopRpcRequestProto constructRpcRequest(Method method,
@ -108,6 +120,19 @@ public class ProtobufRpcEngine implements RpcEngine {
Message param = (Message) params[1]; Message param = (Message) params[1];
builder.setRequest(param.toByteString()); builder.setRequest(param.toByteString());
// For protobuf, {@code protocol} used when creating client side proxy is
// the interface extending BlockingInterface, which has the annotations
// such as ProtocolName etc.
//
// Using Method.getDeclaringClass(), as in WritableEngine to get at
// the protocol interface will return BlockingInterface, from where
// the annotation ProtocolName and Version cannot be
// obtained.
//
// Hence we simply use the protocol class used to create the proxy.
// For PB this may limit the use of mixins on client side.
builder.setDeclaringClassProtocolName(protocolName);
builder.setClientProtocolVersion(clientProtocolVersion);
rpcRequest = builder.build(); rpcRequest = builder.build();
return rpcRequest; return rpcRequest;
} }
@ -273,14 +298,15 @@ public class ProtobufRpcEngine implements RpcEngine {
} }
@Override @Override
public RPC.Server getServer(Class<?> protocol, Object instance, public RPC.Server getServer(Class<?> protocol, Object protocolImpl,
String bindAddress, int port, int numHandlers, int numReaders, String bindAddress, int port, int numHandlers, int numReaders,
int queueSizePerHandler, boolean verbose, Configuration conf, int queueSizePerHandler, boolean verbose, Configuration conf,
SecretManager<? extends TokenIdentifier> secretManager) SecretManager<? extends TokenIdentifier> secretManager)
throws IOException { throws IOException {
return new Server(instance, conf, bindAddress, port, numHandlers, return new Server(protocol, protocolImpl, conf, bindAddress, port,
numReaders, queueSizePerHandler, verbose, secretManager); numHandlers, numReaders, queueSizePerHandler, verbose, secretManager);
} }
private static RemoteException getRemoteException(Exception e) { private static RemoteException getRemoteException(Exception e) {
@ -289,87 +315,31 @@ public class ProtobufRpcEngine implements RpcEngine {
} }
public static class Server extends RPC.Server { public static class Server extends RPC.Server {
private BlockingService service;
private boolean verbose;
private static String classNameBase(String className) {
String[] names = className.split("\\.", -1);
if (names == null || names.length == 0) {
return className;
}
return names[names.length - 1];
}
/** /**
* Construct an RPC server. * Construct an RPC server.
* *
* @param instance the instance whose methods will be called * @param protocolClass the class of protocol
* @param protocolImpl the protocolImpl whose methods will be called
* @param conf the configuration to use * @param conf the configuration to use
* @param bindAddress the address to bind on to listen for connection * @param bindAddress the address to bind on to listen for connection
* @param port the port to listen for connections on * @param port the port to listen for connections on
* @param numHandlers the number of method handler threads to run * @param numHandlers the number of method handler threads to run
* @param verbose whether each call should be logged * @param verbose whether each call should be logged
*/ */
public Server(Object instance, Configuration conf, String bindAddress, public Server(Class<?> protocolClass, Object protocolImpl,
int port, int numHandlers, int numReaders, int queueSizePerHandler, Configuration conf, String bindAddress, int port, int numHandlers,
boolean verbose, SecretManager<? extends TokenIdentifier> secretManager) int numReaders, int queueSizePerHandler, boolean verbose,
SecretManager<? extends TokenIdentifier> secretManager)
throws IOException { throws IOException {
super(bindAddress, port, RpcRequestWritable.class, numHandlers, super(bindAddress, port, RpcRequestWritable.class, numHandlers,
numReaders, queueSizePerHandler, conf, classNameBase(instance numReaders, queueSizePerHandler, conf, classNameBase(protocolImpl
.getClass().getName()), secretManager); .getClass().getName()), secretManager);
this.service = (BlockingService) instance;
this.verbose = verbose; this.verbose = verbose;
registerProtocolAndImpl(RpcKind.RPC_PROTOCOL_BUFFER,
protocolClass, protocolImpl);
} }
/** private static RpcResponseWritable handleException(Throwable e) {
* This is a server side method, which is invoked over RPC. On success
* the return response has protobuf response payload. On failure, the
* exception name and the stack trace are return in the resposne. See {@link HadoopRpcResponseProto}
*
* In this method there three types of exceptions possible and they are
* returned in response as follows.
* <ol>
* <li> Exceptions encountered in this method that are returned as {@link RpcServerException} </li>
* <li> Exceptions thrown by the service is wrapped in ServiceException. In that
* this method returns in response the exception thrown by the service.</li>
* <li> Other exceptions thrown by the service. They are returned as
* it is.</li>
* </ol>
*/
@Override
public Writable call(String protocol, Writable writableRequest,
long receiveTime) throws IOException {
RpcRequestWritable request = (RpcRequestWritable) writableRequest;
HadoopRpcRequestProto rpcRequest = request.message;
String methodName = rpcRequest.getMethodName();
if (verbose)
LOG.info("Call: protocol=" + protocol + ", method=" + methodName);
MethodDescriptor methodDescriptor = service.getDescriptorForType()
.findMethodByName(methodName);
if (methodDescriptor == null) {
String msg = "Unknown method " + methodName + " called on " + protocol
+ " protocol.";
LOG.warn(msg);
return handleException(new RpcServerException(msg));
}
Message prototype = service.getRequestPrototype(methodDescriptor);
Message param = prototype.newBuilderForType()
.mergeFrom(rpcRequest.getRequest()).build();
Message result;
try {
result = service.callBlockingMethod(methodDescriptor, null, param);
} catch (ServiceException e) {
Throwable cause = e.getCause();
return handleException(cause != null ? cause : e);
} catch (Exception e) {
return handleException(e);
}
HadoopRpcResponseProto response = constructProtoSpecificRpcSuccessResponse(result);
return new RpcResponseWritable(response);
}
private RpcResponseWritable handleException(Throwable e) {
HadoopRpcExceptionProto exception = HadoopRpcExceptionProto.newBuilder() HadoopRpcExceptionProto exception = HadoopRpcExceptionProto.newBuilder()
.setExceptionName(e.getClass().getName()) .setExceptionName(e.getClass().getName())
.setStackTrace(StringUtils.stringifyException(e)).build(); .setStackTrace(StringUtils.stringifyException(e)).build();
@ -378,7 +348,7 @@ public class ProtobufRpcEngine implements RpcEngine {
return new RpcResponseWritable(response); return new RpcResponseWritable(response);
} }
private HadoopRpcResponseProto constructProtoSpecificRpcSuccessResponse( private static HadoopRpcResponseProto constructProtoSpecificRpcSuccessResponse(
Message message) { Message message) {
HadoopRpcResponseProto res = HadoopRpcResponseProto.newBuilder() HadoopRpcResponseProto res = HadoopRpcResponseProto.newBuilder()
.setResponse(message.toByteString()) .setResponse(message.toByteString())
@ -386,5 +356,81 @@ public class ProtobufRpcEngine implements RpcEngine {
.build(); .build();
return res; return res;
} }
/**
* Protobuf invoker for {@link RpcInvoker}
*/
static class ProtoBufRpcInvoker implements RpcInvoker {
@Override
/**
* This is a server side method, which is invoked over RPC. On success
* the return response has protobuf response payload. On failure, the
* exception name and the stack trace are return in the resposne.
* See {@link HadoopRpcResponseProto}
*
* In this method there three types of exceptions possible and they are
* returned in response as follows.
* <ol>
* <li> Exceptions encountered in this method that are returned
* as {@link RpcServerException} </li>
* <li> Exceptions thrown by the service is wrapped in ServiceException.
* In that this method returns in response the exception thrown by the
* service.</li>
* <li> Other exceptions thrown by the service. They are returned as
* it is.</li>
* </ol>
*/
public Writable call(RPC.Server server, String protocol,
Writable writableRequest, long receiveTime) throws IOException {
RpcRequestWritable request = (RpcRequestWritable) writableRequest;
HadoopRpcRequestProto rpcRequest = request.message;
String methodName = rpcRequest.getMethodName();
String protoName = rpcRequest.getDeclaringClassProtocolName();
long clientVersion = rpcRequest.getClientProtocolVersion();
if (server.verbose)
LOG.info("Call: protocol=" + protocol + ", method=" + methodName);
ProtoNameVer pv = new ProtoNameVer(protoName, clientVersion);
ProtoClassProtoImpl protocolImpl =
server.getProtocolImplMap(RpcKind.RPC_PROTOCOL_BUFFER).get(pv);
if (protocolImpl == null) { // no match for Protocol AND Version
VerProtocolImpl highest =
server.getHighestSupportedProtocol(RpcKind.RPC_PROTOCOL_BUFFER,
protoName);
if (highest == null) {
throw new IOException("Unknown protocol: " + protoName);
}
// protocol supported but not the version that client wants
throw new RPC.VersionMismatch(protoName, clientVersion,
highest.version);
}
BlockingService service = (BlockingService) protocolImpl.protocolImpl;
MethodDescriptor methodDescriptor = service.getDescriptorForType()
.findMethodByName(methodName);
if (methodDescriptor == null) {
String msg = "Unknown method " + methodName + " called on " + protocol
+ " protocol.";
LOG.warn(msg);
return handleException(new RpcServerException(msg));
}
Message prototype = service.getRequestPrototype(methodDescriptor);
Message param = prototype.newBuilderForType()
.mergeFrom(rpcRequest.getRequest()).build();
Message result;
try {
result = service.callBlockingMethod(methodDescriptor, null, param);
} catch (ServiceException e) {
Throwable cause = e.getCause();
return handleException(cause != null ? cause : e);
} catch (Exception e) {
return handleException(e);
}
HadoopRpcResponseProto response = constructProtoSpecificRpcSuccessResponse(result);
return new RpcResponseWritable(response);
}
}
} }
} }

View File

@ -35,4 +35,5 @@ import java.lang.annotation.RetentionPolicy;
@Retention(RetentionPolicy.RUNTIME) @Retention(RetentionPolicy.RUNTIME)
public @interface ProtocolInfo { public @interface ProtocolInfo {
String protocolName(); // the name of the protocol (i.e. rpc service) String protocolName(); // the name of the protocol (i.e. rpc service)
long protocolVersion() default -1; // default means not defined use old way
} }

View File

@ -57,19 +57,11 @@ public class ProtocolProxy<T> {
private void fetchServerMethods(Method method) throws IOException { private void fetchServerMethods(Method method) throws IOException {
long clientVersion; long clientVersion;
try { clientVersion = RPC.getProtocolVersion(method.getDeclaringClass());
Field versionField = method.getDeclaringClass().getField("versionID");
versionField.setAccessible(true);
clientVersion = versionField.getLong(method.getDeclaringClass());
} catch (NoSuchFieldException ex) {
throw new RuntimeException(ex);
} catch (IllegalAccessException ex) {
throw new RuntimeException(ex);
}
int clientMethodsHash = ProtocolSignature.getFingerprint(method int clientMethodsHash = ProtocolSignature.getFingerprint(method
.getDeclaringClass().getMethods()); .getDeclaringClass().getMethods());
ProtocolSignature serverInfo = ((VersionedProtocol) proxy) ProtocolSignature serverInfo = ((VersionedProtocol) proxy)
.getProtocolSignature(protocol.getName(), clientVersion, .getProtocolSignature(RPC.getProtocolName(protocol), clientVersion,
clientMethodsHash); clientMethodsHash);
long serverVersion = serverInfo.getVersion(); long serverVersion = serverInfo.getVersion();
if (serverVersion != clientVersion) { if (serverVersion != clientVersion) {

View File

@ -29,6 +29,8 @@ import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableFactories; import org.apache.hadoop.io.WritableFactories;
import org.apache.hadoop.io.WritableFactory; import org.apache.hadoop.io.WritableFactory;
import com.google.common.annotations.VisibleForTesting;
public class ProtocolSignature implements Writable { public class ProtocolSignature implements Writable {
static { // register a ctor static { // register a ctor
WritableFactories.setFactory WritableFactories.setFactory
@ -164,10 +166,15 @@ public class ProtocolSignature implements Writable {
/** /**
* A cache that maps a protocol's name to its signature & finger print * A cache that maps a protocol's name to its signature & finger print
*/ */
final private static HashMap<String, ProtocolSigFingerprint> private final static HashMap<String, ProtocolSigFingerprint>
PROTOCOL_FINGERPRINT_CACHE = PROTOCOL_FINGERPRINT_CACHE =
new HashMap<String, ProtocolSigFingerprint>(); new HashMap<String, ProtocolSigFingerprint>();
@VisibleForTesting
public static void resetCache() {
PROTOCOL_FINGERPRINT_CACHE.clear();
}
/** /**
* Return a protocol's signature and finger print from cache * Return a protocol's signature and finger print from cache
* *
@ -177,7 +184,7 @@ public class ProtocolSignature implements Writable {
*/ */
private static ProtocolSigFingerprint getSigFingerprint( private static ProtocolSigFingerprint getSigFingerprint(
Class <? extends VersionedProtocol> protocol, long serverVersion) { Class <? extends VersionedProtocol> protocol, long serverVersion) {
String protocolName = protocol.getName(); String protocolName = RPC.getProtocolName(protocol);
synchronized (PROTOCOL_FINGERPRINT_CACHE) { synchronized (PROTOCOL_FINGERPRINT_CACHE) {
ProtocolSigFingerprint sig = PROTOCOL_FINGERPRINT_CACHE.get(protocolName); ProtocolSigFingerprint sig = PROTOCOL_FINGERPRINT_CACHE.get(protocolName);
if (sig == null) { if (sig == null) {

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.ipc; package org.apache.hadoop.ipc;
import java.lang.reflect.Field;
import java.lang.reflect.InvocationHandler; import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Proxy; import java.lang.reflect.Proxy;
import java.lang.reflect.Method; import java.lang.reflect.Method;
@ -28,6 +29,9 @@ import java.net.NoRouteToHostException;
import java.net.SocketTimeoutException; import java.net.SocketTimeoutException;
import java.io.*; import java.io.*;
import java.io.Closeable; import java.io.Closeable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.HashMap; import java.util.HashMap;
@ -36,6 +40,7 @@ import javax.net.SocketFactory;
import org.apache.commons.logging.*; import org.apache.commons.logging.*;
import org.apache.hadoop.io.*; import org.apache.hadoop.io.*;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SaslRpcServer; import org.apache.hadoop.security.SaslRpcServer;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
@ -63,8 +68,54 @@ import org.apache.hadoop.util.ReflectionUtils;
* the protocol instance is transmitted. * the protocol instance is transmitted.
*/ */
public class RPC { public class RPC {
interface RpcInvoker {
/**
* Process a client call on the server side
* @param server the server within whose context this rpc call is made
* @param protocol - the protocol name (the class of the client proxy
* used to make calls to the rpc server.
* @param rpcRequest - deserialized
* @param receiveTime time at which the call received (for metrics)
* @return the call's return
* @throws IOException
**/
public Writable call(Server server, String protocol,
Writable rpcRequest, long receiveTime) throws IOException ;
}
static final Log LOG = LogFactory.getLog(RPC.class); static final Log LOG = LogFactory.getLog(RPC.class);
/**
* Get all superInterfaces that extend VersionedProtocol
* @param childInterfaces
* @return the super interfaces that extend VersionedProtocol
*/
static Class<?>[] getSuperInterfaces(Class<?>[] childInterfaces) {
List<Class<?>> allInterfaces = new ArrayList<Class<?>>();
for (Class<?> childInterface : childInterfaces) {
if (VersionedProtocol.class.isAssignableFrom(childInterface)) {
allInterfaces.add(childInterface);
allInterfaces.addAll(
Arrays.asList(
getSuperInterfaces(childInterface.getInterfaces())));
} else {
LOG.warn("Interface " + childInterface +
" ignored because it does not extend VersionedProtocol");
}
}
return allInterfaces.toArray(new Class[allInterfaces.size()]);
}
/**
* Get all interfaces that the given protocol implements or extends
* which are assignable from VersionedProtocol.
*/
static Class<?>[] getProtocolInterfaces(Class<?> protocol) {
Class<?>[] interfaces = protocol.getInterfaces();
return getSuperInterfaces(interfaces);
}
/** /**
* Get the protocol name. * Get the protocol name.
@ -75,10 +126,37 @@ public class RPC {
if (protocol == null) { if (protocol == null) {
return null; return null;
} }
ProtocolInfo anno = (ProtocolInfo) protocol.getAnnotation(ProtocolInfo.class); ProtocolInfo anno = protocol.getAnnotation(ProtocolInfo.class);
return (anno == null) ? protocol.getName() : anno.protocolName(); return (anno == null) ? protocol.getName() : anno.protocolName();
} }
/**
* Get the protocol version from protocol class.
* If the protocol class has a ProtocolAnnotation, then get the protocol
* name from the annotation; otherwise the class name is the protocol name.
*/
static public long getProtocolVersion(Class<?> protocol) {
if (protocol == null) {
throw new IllegalArgumentException("Null protocol");
}
long version;
ProtocolInfo anno = protocol.getAnnotation(ProtocolInfo.class);
if (anno != null) {
version = anno.protocolVersion();
if (version != -1)
return version;
}
try {
Field versionField = protocol.getField("versionID");
versionField.setAccessible(true);
return versionField.getLong(protocol);
} catch (NoSuchFieldException ex) {
throw new RuntimeException(ex);
} catch (IllegalAccessException ex) {
throw new RuntimeException(ex);
}
}
private RPC() {} // no public ctor private RPC() {} // no public ctor
// cache of RpcEngines by protocol // cache of RpcEngines by protocol
@ -590,6 +668,144 @@ public class RPC {
/** An RPC Server. */ /** An RPC Server. */
public abstract static class Server extends org.apache.hadoop.ipc.Server { public abstract static class Server extends org.apache.hadoop.ipc.Server {
boolean verbose;
static String classNameBase(String className) {
String[] names = className.split("\\.", -1);
if (names == null || names.length == 0) {
return className;
}
return names[names.length-1];
}
/**
* Store a map of protocol and version to its implementation
*/
/**
* The key in Map
*/
static class ProtoNameVer {
final String protocol;
final long version;
ProtoNameVer(String protocol, long ver) {
this.protocol = protocol;
this.version = ver;
}
@Override
public boolean equals(Object o) {
if (o == null)
return false;
if (this == o)
return true;
if (! (o instanceof ProtoNameVer))
return false;
ProtoNameVer pv = (ProtoNameVer) o;
return ((pv.protocol.equals(this.protocol)) &&
(pv.version == this.version));
}
@Override
public int hashCode() {
return protocol.hashCode() * 37 + (int) version;
}
}
/**
* The value in map
*/
static class ProtoClassProtoImpl {
final Class<?> protocolClass;
final Object protocolImpl;
ProtoClassProtoImpl(Class<?> protocolClass, Object protocolImpl) {
this.protocolClass = protocolClass;
this.protocolImpl = protocolImpl;
}
}
ArrayList<Map<ProtoNameVer, ProtoClassProtoImpl>> protocolImplMapArray =
new ArrayList<Map<ProtoNameVer, ProtoClassProtoImpl>>(RpcKind.MAX_INDEX);
Map<ProtoNameVer, ProtoClassProtoImpl> getProtocolImplMap(RpcKind rpcKind) {
if (protocolImplMapArray.size() == 0) {// initialize for all rpc kinds
for (int i=0; i <= RpcKind.MAX_INDEX; ++i) {
protocolImplMapArray.add(
new HashMap<ProtoNameVer, ProtoClassProtoImpl>(10));
}
}
return protocolImplMapArray.get(rpcKind.ordinal());
}
// Register protocol and its impl for rpc calls
void registerProtocolAndImpl(RpcKind rpcKind, Class<?> protocolClass,
Object protocolImpl) throws IOException {
String protocolName = RPC.getProtocolName(protocolClass);
long version;
try {
version = RPC.getProtocolVersion(protocolClass);
} catch (Exception ex) {
LOG.warn("Protocol " + protocolClass +
" NOT registered as cannot get protocol version ");
return;
}
getProtocolImplMap(rpcKind).put(new ProtoNameVer(protocolName, version),
new ProtoClassProtoImpl(protocolClass, protocolImpl));
LOG.debug("RpcKind = " + rpcKind + " Protocol Name = " + protocolName + " version=" + version +
" ProtocolImpl=" + protocolImpl.getClass().getName() +
" protocolClass=" + protocolClass.getName());
}
static class VerProtocolImpl {
final long version;
final ProtoClassProtoImpl protocolTarget;
VerProtocolImpl(long ver, ProtoClassProtoImpl protocolTarget) {
this.version = ver;
this.protocolTarget = protocolTarget;
}
}
@SuppressWarnings("unused") // will be useful later.
VerProtocolImpl[] getSupportedProtocolVersions(RpcKind rpcKind,
String protocolName) {
VerProtocolImpl[] resultk =
new VerProtocolImpl[getProtocolImplMap(rpcKind).size()];
int i = 0;
for (Map.Entry<ProtoNameVer, ProtoClassProtoImpl> pv :
getProtocolImplMap(rpcKind).entrySet()) {
if (pv.getKey().protocol.equals(protocolName)) {
resultk[i++] =
new VerProtocolImpl(pv.getKey().version, pv.getValue());
}
}
if (i == 0) {
return null;
}
VerProtocolImpl[] result = new VerProtocolImpl[i];
System.arraycopy(resultk, 0, result, 0, i);
return result;
}
VerProtocolImpl getHighestSupportedProtocol(RpcKind rpcKind,
String protocolName) {
Long highestVersion = 0L;
ProtoClassProtoImpl highest = null;
System.out.println("Size of protoMap for " + rpcKind + " =" + getProtocolImplMap(rpcKind).size());
for (Map.Entry<ProtoNameVer, ProtoClassProtoImpl> pv :
getProtocolImplMap(rpcKind).entrySet()) {
if (pv.getKey().protocol.equals(protocolName)) {
if ((highest == null) || (pv.getKey().version > highestVersion)) {
highest = pv.getValue();
highestVersion = pv.getKey().version;
}
}
}
if (highest == null) {
return null;
}
return new VerProtocolImpl(highestVersion, highest);
}
protected Server(String bindAddress, int port, protected Server(String bindAddress, int port,
Class<? extends Writable> paramClass, int handlerCount, Class<? extends Writable> paramClass, int handlerCount,
@ -606,11 +822,17 @@ public class RPC {
* @param protocolImpl - the impl of the protocol that will be called * @param protocolImpl - the impl of the protocol that will be called
* @return the server (for convenience) * @return the server (for convenience)
*/ */
public <PROTO, IMPL extends PROTO> public Server addProtocol(RpcKind rpcKind, Class<?> protocolClass,
Server addProtocol(Class<PROTO> protocolClass, IMPL protocolImpl Object protocolImpl) throws IOException {
) throws IOException { registerProtocolAndImpl(rpcKind, protocolClass, protocolImpl);
throw new IOException("addProtocol Not Implemented"); return this;
}
@Override
public Writable call(RpcKind rpcKind, String protocol,
Writable rpcRequest, long receiveTime) throws IOException {
return getRpcInvoker(rpcKind).call(this, protocol, rpcRequest,
receiveTime);
} }
} }
} }

View File

@ -54,13 +54,14 @@ public class RpcPayloadHeader implements Writable {
} }
public enum RpcKind { public enum RpcKind {
RPC_BUILTIN ((short ) 1), // Used for built in calls RPC_BUILTIN ((short) 1), // Used for built in calls by tests
RPC_WRITABLE ((short ) 2), RPC_WRITABLE ((short) 2), // Use WritableRpcEngine
RPC_PROTOCOL_BUFFER ((short)3), RPC_PROTOCOL_BUFFER ((short) 3), // Use ProtobufRpcEngine
RPC_AVRO ((short)4); RPC_AVRO ((short) 4); // Use AvroRpcEngine
static final short MAX_INDEX = RPC_AVRO.value; // used for array size
private final short value;
private static final short FIRST_INDEX = RPC_BUILTIN.value; private static final short FIRST_INDEX = RPC_BUILTIN.value;
private final short value;
RpcKind(short val) { RpcKind(short val) {
this.value = val; this.value = val;
} }

View File

@ -43,6 +43,7 @@ import java.nio.channels.WritableByteChannel;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator; import java.util.Iterator;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
@ -66,6 +67,7 @@ import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.ipc.RPC.RpcInvoker;
import org.apache.hadoop.ipc.RPC.VersionMismatch; import org.apache.hadoop.ipc.RPC.VersionMismatch;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcPayloadOperation; import org.apache.hadoop.ipc.RpcPayloadHeader.RpcPayloadOperation;
import org.apache.hadoop.ipc.metrics.RpcDetailedMetrics; import org.apache.hadoop.ipc.metrics.RpcDetailedMetrics;
@ -134,6 +136,59 @@ public abstract class Server {
*/ */
static int INITIAL_RESP_BUF_SIZE = 10240; static int INITIAL_RESP_BUF_SIZE = 10240;
static class RpcKindMapValue {
final Class<? extends Writable> rpcRequestWrapperClass;
final RpcInvoker rpcInvoker;
RpcKindMapValue (Class<? extends Writable> rpcRequestWrapperClass,
RpcInvoker rpcInvoker) {
this.rpcInvoker = rpcInvoker;
this.rpcRequestWrapperClass = rpcRequestWrapperClass;
}
}
static Map<RpcKind, RpcKindMapValue> rpcKindMap = new
HashMap<RpcKind, RpcKindMapValue>(4);
/**
* Register a RPC kind and the class to deserialize the rpc request.
*
* Called by static initializers of rpcKind Engines
* @param rpcKind
* @param rpcRequestWrapperClass - this class is used to deserialze the
* the rpc request.
* @param rpcInvoker - use to process the calls on SS.
*/
public static void registerProtocolEngine(RpcKind rpcKind,
Class<? extends Writable> rpcRequestWrapperClass,
RpcInvoker rpcInvoker) {
RpcKindMapValue old =
rpcKindMap.put(rpcKind, new RpcKindMapValue(rpcRequestWrapperClass, rpcInvoker));
if (old != null) {
rpcKindMap.put(rpcKind, old);
throw new IllegalArgumentException("ReRegistration of rpcKind: " +
rpcKind);
}
LOG.info("rpcKind=" + rpcKind +
", rpcRequestWrapperClass=" + rpcRequestWrapperClass +
", rpcInvoker=" + rpcInvoker);
}
public Class<? extends Writable> getRpcRequestWrapper(
RpcKind rpcKind) {
if (rpcRequestClass != null)
return rpcRequestClass;
RpcKindMapValue val = rpcKindMap.get(rpcKind);
return (val == null) ? null : val.rpcRequestWrapperClass;
}
public static RpcInvoker getRpcInvoker(RpcKind rpcKind) {
RpcKindMapValue val = rpcKindMap.get(rpcKind);
return (val == null) ? null : val.rpcInvoker;
}
public static final Log LOG = LogFactory.getLog(Server.class); public static final Log LOG = LogFactory.getLog(Server.class);
public static final Log AUDITLOG = public static final Log AUDITLOG =
LogFactory.getLog("SecurityLogger."+Server.class.getName()); LogFactory.getLog("SecurityLogger."+Server.class.getName());
@ -197,7 +252,7 @@ public abstract class Server {
private int port; // port we listen on private int port; // port we listen on
private int handlerCount; // number of handler threads private int handlerCount; // number of handler threads
private int readThreads; // number of read threads private int readThreads; // number of read threads
private Class<? extends Writable> paramClass; // class of call parameters private Class<? extends Writable> rpcRequestClass; // class used for deserializing the rpc request
private int maxIdleTime; // the maximum idle time after private int maxIdleTime; // the maximum idle time after
// which a client may be disconnected // which a client may be disconnected
private int thresholdIdleConnections; // the number of idle connections private int thresholdIdleConnections; // the number of idle connections
@ -1425,9 +1480,27 @@ public abstract class Server {
throw new IOException("IPC Server does not implement operation" + throw new IOException("IPC Server does not implement operation" +
header.getOperation()); header.getOperation());
} }
// If we know the rpc kind, get its class so that we can deserialize
// (Note it would make more sense to have the handler deserialize but
// we continue with this original design.
Class<? extends Writable> rpcRequestClass =
getRpcRequestWrapper(header.getkind());
if (rpcRequestClass == null) {
LOG.warn("Unknown rpc kind " + header.getkind() +
" from client " + getHostAddress());
final Call readParamsFailedCall =
new Call(header.getCallId(), null, this);
ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
setupResponse(responseBuffer, readParamsFailedCall, Status.FATAL, null,
IOException.class.getName(),
"Unknown rpc kind " + header.getkind());
responder.doRespond(readParamsFailedCall);
return;
}
Writable rpcRequest; Writable rpcRequest;
try { //Read the rpc request try { //Read the rpc request
rpcRequest = ReflectionUtils.newInstance(paramClass, conf); rpcRequest = ReflectionUtils.newInstance(rpcRequestClass, conf);
rpcRequest.readFields(dis); rpcRequest.readFields(dis);
} catch (Throwable t) { } catch (Throwable t) {
LOG.warn("Unable to read call parameters for client " + LOG.warn("Unable to read call parameters for client " +
@ -1519,7 +1592,7 @@ public abstract class Server {
// Make the call as the user via Subject.doAs, thus associating // Make the call as the user via Subject.doAs, thus associating
// the call with the Subject // the call with the Subject
if (call.connection.user == null) { if (call.connection.user == null) {
value = call(call.connection.protocolName, call.rpcRequest, value = call(call.rpcKind, call.connection.protocolName, call.rpcRequest,
call.timestamp); call.timestamp);
} else { } else {
value = value =
@ -1528,7 +1601,7 @@ public abstract class Server {
@Override @Override
public Writable run() throws Exception { public Writable run() throws Exception {
// make the call // make the call
return call(call.connection.protocolName, return call(call.rpcKind, call.connection.protocolName,
call.rpcRequest, call.timestamp); call.rpcRequest, call.timestamp);
} }
@ -1590,24 +1663,33 @@ public abstract class Server {
Configuration conf) Configuration conf)
throws IOException throws IOException
{ {
this(bindAddress, port, paramClass, handlerCount, -1, -1, conf, Integer.toString(port), null); this(bindAddress, port, paramClass, handlerCount, -1, -1, conf, Integer
.toString(port), null);
} }
/** Constructs a server listening on the named port and address. Parameters passed must /**
* Constructs a server listening on the named port and address. Parameters passed must
* be of the named class. The <code>handlerCount</handlerCount> determines * be of the named class. The <code>handlerCount</handlerCount> determines
* the number of handler threads that will be used to process calls. * the number of handler threads that will be used to process calls.
* If queueSizePerHandler or numReaders are not -1 they will be used instead of parameters * If queueSizePerHandler or numReaders are not -1 they will be used instead of parameters
* from configuration. Otherwise the configuration will be picked up. * from configuration. Otherwise the configuration will be picked up.
*
* If rpcRequestClass is null then the rpcRequestClass must have been
* registered via {@link #registerProtocolEngine(RpcPayloadHeader.RpcKind,
* Class, RPC.RpcInvoker)}
* This parameter has been retained for compatibility with existing tests
* and usage.
*/ */
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
protected Server(String bindAddress, int port, protected Server(String bindAddress, int port,
Class<? extends Writable> paramClass, int handlerCount, int numReaders, int queueSizePerHandler, Class<? extends Writable> rpcRequestClass, int handlerCount,
Configuration conf, String serverName, SecretManager<? extends TokenIdentifier> secretManager) int numReaders, int queueSizePerHandler, Configuration conf,
String serverName, SecretManager<? extends TokenIdentifier> secretManager)
throws IOException { throws IOException {
this.bindAddress = bindAddress; this.bindAddress = bindAddress;
this.conf = conf; this.conf = conf;
this.port = port; this.port = port;
this.paramClass = paramClass; this.rpcRequestClass = rpcRequestClass;
this.handlerCount = handlerCount; this.handlerCount = handlerCount;
this.socketSendBufferSize = 0; this.socketSendBufferSize = 0;
if (queueSizePerHandler != -1) { if (queueSizePerHandler != -1) {
@ -1797,17 +1879,17 @@ public abstract class Server {
/** /**
* Called for each call. * Called for each call.
* @deprecated Use {@link #call(String, Writable, long)} instead * @deprecated Use {@link #call(RpcPayloadHeader.RpcKind, String,
* Writable, long)} instead
*/ */
@Deprecated @Deprecated
public Writable call(Writable param, long receiveTime) throws IOException { public Writable call(Writable param, long receiveTime) throws IOException {
return call(null, param, receiveTime); return call(RpcKind.RPC_BUILTIN, null, param, receiveTime);
} }
/** Called for each call. */ /** Called for each call. */
public abstract Writable call(String protocol, public abstract Writable call(RpcKind rpcKind, String protocol,
Writable param, long receiveTime) Writable param, long receiveTime) throws IOException;
throws IOException;
/** /**
* Authorize the incoming client connection. * Authorize the incoming client connection.

View File

@ -18,7 +18,6 @@
package org.apache.hadoop.ipc; package org.apache.hadoop.ipc;
import java.lang.reflect.Field;
import java.lang.reflect.Proxy; import java.lang.reflect.Proxy;
import java.lang.reflect.Method; import java.lang.reflect.Method;
import java.lang.reflect.Array; import java.lang.reflect.Array;
@ -27,18 +26,14 @@ import java.lang.reflect.InvocationTargetException;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.io.*; import java.io.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.io.Closeable; import java.io.Closeable;
import java.util.Map;
import java.util.HashMap;
import javax.net.SocketFactory; import javax.net.SocketFactory;
import org.apache.commons.logging.*; import org.apache.commons.logging.*;
import org.apache.hadoop.io.*; import org.apache.hadoop.io.*;
import org.apache.hadoop.ipc.RPC.RpcInvoker;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind; import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
import org.apache.hadoop.ipc.VersionedProtocol; import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
@ -53,36 +48,9 @@ import org.apache.hadoop.conf.*;
public class WritableRpcEngine implements RpcEngine { public class WritableRpcEngine implements RpcEngine {
private static final Log LOG = LogFactory.getLog(RPC.class); private static final Log LOG = LogFactory.getLog(RPC.class);
static { // Register the rpcRequest deserializer for WritableRpcEngine
/** org.apache.hadoop.ipc.Server.registerProtocolEngine(RpcKind.RPC_WRITABLE,
* Get all superInterfaces that extend VersionedProtocol Invocation.class, new Server.WritableRpcInvoker());
* @param childInterfaces
* @return the super interfaces that extend VersionedProtocol
*/
private static Class<?>[] getSuperInterfaces(Class<?>[] childInterfaces) {
List<Class<?>> allInterfaces = new ArrayList<Class<?>>();
for (Class<?> childInterface : childInterfaces) {
if (VersionedProtocol.class.isAssignableFrom(childInterface)) {
allInterfaces.add(childInterface);
allInterfaces.addAll(
Arrays.asList(
getSuperInterfaces(childInterface.getInterfaces())));
} else {
LOG.warn("Interface " + childInterface +
" ignored because it does not extend VersionedProtocol");
}
}
return (Class<?>[]) allInterfaces.toArray(new Class[allInterfaces.size()]);
}
/**
* Get all interfaces that the given protocol implements or extends
* which are assignable from VersionedProtocol.
*/
private static Class<?>[] getProtocolInterfaces(Class<?> protocol) {
Class<?>[] interfaces = protocol.getInterfaces();
return getSuperInterfaces(interfaces);
} }
@ -120,15 +88,7 @@ public class WritableRpcEngine implements RpcEngine {
clientVersion = 0; clientVersion = 0;
clientMethodsHash = 0; clientMethodsHash = 0;
} else { } else {
try { this.clientVersion = RPC.getProtocolVersion(method.getDeclaringClass());
Field versionField = method.getDeclaringClass().getField("versionID");
versionField.setAccessible(true);
this.clientVersion = versionField.getLong(method.getDeclaringClass());
} catch (NoSuchFieldException ex) {
throw new RuntimeException(ex);
} catch (IllegalAccessException ex) {
throw new RuntimeException(ex);
}
this.clientMethodsHash = ProtocolSignature.getFingerprint(method this.clientMethodsHash = ProtocolSignature.getFingerprint(method
.getDeclaringClass().getMethods()); .getDeclaringClass().getMethods());
} }
@ -329,140 +289,25 @@ public class WritableRpcEngine implements RpcEngine {
/** An RPC Server. */ /** An RPC Server. */
public static class Server extends RPC.Server { public static class Server extends RPC.Server {
private boolean verbose;
/** /**
* The key in Map * Construct an RPC server.
*/
static class ProtoNameVer {
final String protocol;
final long version;
ProtoNameVer(String protocol, long ver) {
this.protocol = protocol;
this.version = ver;
}
@Override
public boolean equals(Object o) {
if (o == null)
return false;
if (this == o)
return true;
if (! (o instanceof ProtoNameVer))
return false;
ProtoNameVer pv = (ProtoNameVer) o;
return ((pv.protocol.equals(this.protocol)) &&
(pv.version == this.version));
}
@Override
public int hashCode() {
return protocol.hashCode() * 37 + (int) version;
}
}
/**
* The value in map
*/
static class ProtoClassProtoImpl {
final Class<?> protocolClass;
final Object protocolImpl;
ProtoClassProtoImpl(Class<?> protocolClass, Object protocolImpl) {
this.protocolClass = protocolClass;
this.protocolImpl = protocolImpl;
}
}
private Map<ProtoNameVer, ProtoClassProtoImpl> protocolImplMap =
new HashMap<ProtoNameVer, ProtoClassProtoImpl>(10);
// Register protocol and its impl for rpc calls
private void registerProtocolAndImpl(Class<?> protocolClass,
Object protocolImpl) throws IOException {
String protocolName = RPC.getProtocolName(protocolClass);
VersionedProtocol vp = (VersionedProtocol) protocolImpl;
long version;
try {
version = vp.getProtocolVersion(protocolName, 0);
} catch (Exception ex) {
LOG.warn("Protocol " + protocolClass +
" NOT registered as getProtocolVersion throws exception ");
return;
}
protocolImplMap.put(new ProtoNameVer(protocolName, version),
new ProtoClassProtoImpl(protocolClass, protocolImpl));
LOG.debug("Protocol Name = " + protocolName + " version=" + version +
" ProtocolImpl=" + protocolImpl.getClass().getName() +
" protocolClass=" + protocolClass.getName());
}
private static class VerProtocolImpl {
final long version;
final ProtoClassProtoImpl protocolTarget;
VerProtocolImpl(long ver, ProtoClassProtoImpl protocolTarget) {
this.version = ver;
this.protocolTarget = protocolTarget;
}
}
@SuppressWarnings("unused") // will be useful later.
private VerProtocolImpl[] getSupportedProtocolVersions(
String protocolName) {
VerProtocolImpl[] resultk = new VerProtocolImpl[protocolImplMap.size()];
int i = 0;
for (Map.Entry<ProtoNameVer, ProtoClassProtoImpl> pv :
protocolImplMap.entrySet()) {
if (pv.getKey().protocol.equals(protocolName)) {
resultk[i++] =
new VerProtocolImpl(pv.getKey().version, pv.getValue());
}
}
if (i == 0) {
return null;
}
VerProtocolImpl[] result = new VerProtocolImpl[i];
System.arraycopy(resultk, 0, result, 0, i);
return result;
}
private VerProtocolImpl getHighestSupportedProtocol(String protocolName) {
Long highestVersion = 0L;
ProtoClassProtoImpl highest = null;
for (Map.Entry<ProtoNameVer, ProtoClassProtoImpl> pv : protocolImplMap
.entrySet()) {
if (pv.getKey().protocol.equals(protocolName)) {
if ((highest == null) || (pv.getKey().version > highestVersion)) {
highest = pv.getValue();
highestVersion = pv.getKey().version;
}
}
}
if (highest == null) {
return null;
}
return new VerProtocolImpl(highestVersion, highest);
}
/** Construct an RPC server.
* @param instance the instance whose methods will be called * @param instance the instance whose methods will be called
* @param conf the configuration to use * @param conf the configuration to use
* @param bindAddress the address to bind on to listen for connection * @param bindAddress the address to bind on to listen for connection
* @param port the port to listen for connections on * @param port the port to listen for connections on
* *
* @deprecated Use #Server(Class, Object, Configuration, String, int) * @deprecated Use #Server(Class, Object, Configuration, String, int)
*
*/ */
@Deprecated @Deprecated
public Server(Object instance, Configuration conf, String bindAddress, public Server(Object instance, Configuration conf, String bindAddress,
int port) int port) throws IOException {
throws IOException {
this(null, instance, conf, bindAddress, port); this(null, instance, conf, bindAddress, port);
} }
/** Construct an RPC server. /** Construct an RPC server.
* @param protocol class * @param protocolClass class
* @param instance the instance whose methods will be called * @param protocolImpl the instance whose methods will be called
* @param conf the configuration to use * @param conf the configuration to use
* @param bindAddress the address to bind on to listen for connection * @param bindAddress the address to bind on to listen for connection
* @param port the port to listen for connections on * @param port the port to listen for connections on
@ -474,16 +319,8 @@ public class WritableRpcEngine implements RpcEngine {
false, null); false, null);
} }
private static String classNameBase(String className) { /**
String[] names = className.split("\\.", -1); * Construct an RPC server.
if (names == null || names.length == 0) {
return className;
}
return names[names.length-1];
}
/** Construct an RPC server.
* @param protocolImpl the instance whose methods will be called * @param protocolImpl the instance whose methods will be called
* @param conf the configuration to use * @param conf the configuration to use
* @param bindAddress the address to bind on to listen for connection * @param bindAddress the address to bind on to listen for connection
@ -505,7 +342,8 @@ public class WritableRpcEngine implements RpcEngine {
} }
/** Construct an RPC server. /**
* Construct an RPC server.
* @param protocolClass - the protocol being registered * @param protocolClass - the protocol being registered
* can be null for compatibility with old usage (see below for details) * can be null for compatibility with old usage (see below for details)
* @param protocolImpl the protocol impl that will be called * @param protocolImpl the protocol impl that will be called
@ -520,7 +358,7 @@ public class WritableRpcEngine implements RpcEngine {
int numHandlers, int numReaders, int queueSizePerHandler, int numHandlers, int numReaders, int queueSizePerHandler,
boolean verbose, SecretManager<? extends TokenIdentifier> secretManager) boolean verbose, SecretManager<? extends TokenIdentifier> secretManager)
throws IOException { throws IOException {
super(bindAddress, port, Invocation.class, numHandlers, numReaders, super(bindAddress, port, null, numHandlers, numReaders,
queueSizePerHandler, conf, queueSizePerHandler, conf,
classNameBase(protocolImpl.getClass().getName()), secretManager); classNameBase(protocolImpl.getClass().getName()), secretManager);
@ -535,7 +373,7 @@ public class WritableRpcEngine implements RpcEngine {
* the protocolImpl is derived from the protocolClass(es) * the protocolImpl is derived from the protocolClass(es)
* we register all interfaces extended by the protocolImpl * we register all interfaces extended by the protocolImpl
*/ */
protocols = getProtocolInterfaces(protocolImpl.getClass()); protocols = RPC.getProtocolInterfaces(protocolImpl.getClass());
} else { } else {
if (!protocolClass.isAssignableFrom(protocolImpl.getClass())) { if (!protocolClass.isAssignableFrom(protocolImpl.getClass())) {
@ -544,132 +382,125 @@ public class WritableRpcEngine implements RpcEngine {
protocolImpl.getClass()); protocolImpl.getClass());
} }
// register protocol class and its super interfaces // register protocol class and its super interfaces
registerProtocolAndImpl(protocolClass, protocolImpl); registerProtocolAndImpl(RpcKind.RPC_WRITABLE, protocolClass, protocolImpl);
protocols = getProtocolInterfaces(protocolClass); protocols = RPC.getProtocolInterfaces(protocolClass);
} }
for (Class<?> p : protocols) { for (Class<?> p : protocols) {
if (!p.equals(VersionedProtocol.class)) { if (!p.equals(VersionedProtocol.class)) {
registerProtocolAndImpl(p, protocolImpl); registerProtocolAndImpl(RpcKind.RPC_WRITABLE, p, protocolImpl);
} }
} }
} }
private static void log(String value) {
@Override if (value!= null && value.length() > 55)
public <PROTO, IMPL extends PROTO> Server value = value.substring(0, 55)+"...";
addProtocol( LOG.info(value);
Class<PROTO> protocolClass, IMPL protocolImpl) throws IOException {
registerProtocolAndImpl(protocolClass, protocolImpl);
return this;
} }
/** static class WritableRpcInvoker implements RpcInvoker {
* Process a client call
* @param protocolName - the protocol name (the class of the client proxy
* used to make calls to the rpc server.
* @param param parameters
* @param receivedTime time at which the call receoved (for metrics)
* @return the call's return
* @throws IOException
*/
public Writable call(String protocolName, Writable param, long receivedTime)
throws IOException {
try {
Invocation call = (Invocation)param;
if (verbose) log("Call: " + call);
// Verify rpc version @Override
if (call.getRpcVersion() != writableRpcVersion) { public Writable call(org.apache.hadoop.ipc.RPC.Server server,
// Client is using a different version of WritableRpc String protocolName, Writable rpcRequest, long receivedTime)
throw new IOException( throws IOException {
"WritableRpc version mismatch, client side version=" try {
+ call.getRpcVersion() + ", server side version=" Invocation call = (Invocation)rpcRequest;
+ writableRpcVersion); if (server.verbose) log("Call: " + call);
}
long clientVersion = call.getProtocolVersion(); // Verify rpc version
final String protoName; if (call.getRpcVersion() != writableRpcVersion) {
ProtoClassProtoImpl protocolImpl; // Client is using a different version of WritableRpc
if (call.declaringClassProtocolName.equals(VersionedProtocol.class.getName())) { throw new IOException(
// VersionProtocol methods are often used by client to figure out "WritableRpc version mismatch, client side version="
// which version of protocol to use. + call.getRpcVersion() + ", server side version="
// + writableRpcVersion);
// Versioned protocol methods should go the protocolName protocol }
// rather than the declaring class of the method since the
// the declaring class is VersionedProtocol which is not
// registered directly.
// Send the call to the highest protocol version
protocolImpl =
getHighestSupportedProtocol(protocolName).protocolTarget;
} else {
protoName = call.declaringClassProtocolName;
// Find the right impl for the protocol based on client version. long clientVersion = call.getProtocolVersion();
ProtoNameVer pv = final String protoName;
new ProtoNameVer(call.declaringClassProtocolName, clientVersion); ProtoClassProtoImpl protocolImpl;
protocolImpl = protocolImplMap.get(pv); if (call.declaringClassProtocolName.equals(VersionedProtocol.class.getName())) {
if (protocolImpl == null) { // no match for Protocol AND Version // VersionProtocol methods are often used by client to figure out
VerProtocolImpl highest = // which version of protocol to use.
getHighestSupportedProtocol(protoName); //
// Versioned protocol methods should go the protocolName protocol
// rather than the declaring class of the method since the
// the declaring class is VersionedProtocol which is not
// registered directly.
// Send the call to the highest protocol version
VerProtocolImpl highest = server.getHighestSupportedProtocol(
RpcKind.RPC_WRITABLE, protocolName);
if (highest == null) { if (highest == null) {
throw new IOException("Unknown protocol: " + protoName); throw new IOException("Unknown protocol: " + protocolName);
} else { // protocol supported but not the version that client wants }
throw new RPC.VersionMismatch(protoName, clientVersion, protocolImpl = highest.protocolTarget;
highest.version); } else {
protoName = call.declaringClassProtocolName;
// Find the right impl for the protocol based on client version.
ProtoNameVer pv =
new ProtoNameVer(call.declaringClassProtocolName, clientVersion);
protocolImpl =
server.getProtocolImplMap(RpcKind.RPC_WRITABLE).get(pv);
if (protocolImpl == null) { // no match for Protocol AND Version
VerProtocolImpl highest =
server.getHighestSupportedProtocol(RpcKind.RPC_WRITABLE,
protoName);
if (highest == null) {
throw new IOException("Unknown protocol: " + protoName);
} else { // protocol supported but not the version that client wants
throw new RPC.VersionMismatch(protoName, clientVersion,
highest.version);
}
} }
} }
}
// Invoke the protocol method // Invoke the protocol method
long startTime = System.currentTimeMillis(); long startTime = System.currentTimeMillis();
Method method = Method method =
protocolImpl.protocolClass.getMethod(call.getMethodName(), protocolImpl.protocolClass.getMethod(call.getMethodName(),
call.getParameterClasses()); call.getParameterClasses());
method.setAccessible(true); method.setAccessible(true);
rpcDetailedMetrics.init(protocolImpl.protocolClass); server.rpcDetailedMetrics.init(protocolImpl.protocolClass);
Object value = Object value =
method.invoke(protocolImpl.protocolImpl, call.getParameters()); method.invoke(protocolImpl.protocolImpl, call.getParameters());
int processingTime = (int) (System.currentTimeMillis() - startTime); int processingTime = (int) (System.currentTimeMillis() - startTime);
int qTime = (int) (startTime-receivedTime); int qTime = (int) (startTime-receivedTime);
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Served: " + call.getMethodName() + LOG.debug("Served: " + call.getMethodName() +
" queueTime= " + qTime + " queueTime= " + qTime +
" procesingTime= " + processingTime); " procesingTime= " + processingTime);
} }
rpcMetrics.addRpcQueueTime(qTime); server.rpcMetrics.addRpcQueueTime(qTime);
rpcMetrics.addRpcProcessingTime(processingTime); server.rpcMetrics.addRpcProcessingTime(processingTime);
rpcDetailedMetrics.addProcessingTime(call.getMethodName(), server.rpcDetailedMetrics.addProcessingTime(call.getMethodName(),
processingTime); processingTime);
if (verbose) log("Return: "+value); if (server.verbose) log("Return: "+value);
return new ObjectWritable(method.getReturnType(), value); return new ObjectWritable(method.getReturnType(), value);
} catch (InvocationTargetException e) { } catch (InvocationTargetException e) {
Throwable target = e.getTargetException(); Throwable target = e.getTargetException();
if (target instanceof IOException) { if (target instanceof IOException) {
throw (IOException)target; throw (IOException)target;
} else { } else {
IOException ioe = new IOException(target.toString()); IOException ioe = new IOException(target.toString());
ioe.setStackTrace(target.getStackTrace()); ioe.setStackTrace(target.getStackTrace());
throw ioe;
}
} catch (Throwable e) {
if (!(e instanceof IOException)) {
LOG.error("Unexpected throwable object ", e);
}
IOException ioe = new IOException(e.toString());
ioe.setStackTrace(e.getStackTrace());
throw ioe; throw ioe;
} }
} catch (Throwable e) {
if (!(e instanceof IOException)) {
LOG.error("Unexpected throwable object ", e);
}
IOException ioe = new IOException(e.toString());
ioe.setStackTrace(e.getStackTrace());
throw ioe;
} }
} }
} }
private static void log(String value) {
if (value!= null && value.length() > 55)
value = value.substring(0, 55)+"...";
LOG.info(value);
}
} }

View File

@ -1,3 +1,20 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Generated by the protocol buffer compiler. DO NOT EDIT! // Generated by the protocol buffer compiler. DO NOT EDIT!
// source: hadoop_rpc.proto // source: hadoop_rpc.proto
@ -18,6 +35,14 @@ public final class HadoopRpcProtos {
// optional bytes request = 2; // optional bytes request = 2;
boolean hasRequest(); boolean hasRequest();
com.google.protobuf.ByteString getRequest(); com.google.protobuf.ByteString getRequest();
// required string declaringClassProtocolName = 3;
boolean hasDeclaringClassProtocolName();
String getDeclaringClassProtocolName();
// required uint64 clientProtocolVersion = 4;
boolean hasClientProtocolVersion();
long getClientProtocolVersion();
} }
public static final class HadoopRpcRequestProto extends public static final class HadoopRpcRequestProto extends
com.google.protobuf.GeneratedMessage com.google.protobuf.GeneratedMessage
@ -90,9 +115,53 @@ public final class HadoopRpcProtos {
return request_; return request_;
} }
// required string declaringClassProtocolName = 3;
public static final int DECLARINGCLASSPROTOCOLNAME_FIELD_NUMBER = 3;
private java.lang.Object declaringClassProtocolName_;
public boolean hasDeclaringClassProtocolName() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
public String getDeclaringClassProtocolName() {
java.lang.Object ref = declaringClassProtocolName_;
if (ref instanceof String) {
return (String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
String s = bs.toStringUtf8();
if (com.google.protobuf.Internal.isValidUtf8(bs)) {
declaringClassProtocolName_ = s;
}
return s;
}
}
private com.google.protobuf.ByteString getDeclaringClassProtocolNameBytes() {
java.lang.Object ref = declaringClassProtocolName_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8((String) ref);
declaringClassProtocolName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required uint64 clientProtocolVersion = 4;
public static final int CLIENTPROTOCOLVERSION_FIELD_NUMBER = 4;
private long clientProtocolVersion_;
public boolean hasClientProtocolVersion() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
public long getClientProtocolVersion() {
return clientProtocolVersion_;
}
private void initFields() { private void initFields() {
methodName_ = ""; methodName_ = "";
request_ = com.google.protobuf.ByteString.EMPTY; request_ = com.google.protobuf.ByteString.EMPTY;
declaringClassProtocolName_ = "";
clientProtocolVersion_ = 0L;
} }
private byte memoizedIsInitialized = -1; private byte memoizedIsInitialized = -1;
public final boolean isInitialized() { public final boolean isInitialized() {
@ -103,6 +172,14 @@ public final class HadoopRpcProtos {
memoizedIsInitialized = 0; memoizedIsInitialized = 0;
return false; return false;
} }
if (!hasDeclaringClassProtocolName()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasClientProtocolVersion()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1; memoizedIsInitialized = 1;
return true; return true;
} }
@ -116,6 +193,12 @@ public final class HadoopRpcProtos {
if (((bitField0_ & 0x00000002) == 0x00000002)) { if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, request_); output.writeBytes(2, request_);
} }
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, getDeclaringClassProtocolNameBytes());
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt64(4, clientProtocolVersion_);
}
getUnknownFields().writeTo(output); getUnknownFields().writeTo(output);
} }
@ -133,6 +216,14 @@ public final class HadoopRpcProtos {
size += com.google.protobuf.CodedOutputStream size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, request_); .computeBytesSize(2, request_);
} }
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, getDeclaringClassProtocolNameBytes());
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(4, clientProtocolVersion_);
}
size += getUnknownFields().getSerializedSize(); size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size; memoizedSerializedSize = size;
return size; return size;
@ -166,6 +257,16 @@ public final class HadoopRpcProtos {
result = result && getRequest() result = result && getRequest()
.equals(other.getRequest()); .equals(other.getRequest());
} }
result = result && (hasDeclaringClassProtocolName() == other.hasDeclaringClassProtocolName());
if (hasDeclaringClassProtocolName()) {
result = result && getDeclaringClassProtocolName()
.equals(other.getDeclaringClassProtocolName());
}
result = result && (hasClientProtocolVersion() == other.hasClientProtocolVersion());
if (hasClientProtocolVersion()) {
result = result && (getClientProtocolVersion()
== other.getClientProtocolVersion());
}
result = result && result = result &&
getUnknownFields().equals(other.getUnknownFields()); getUnknownFields().equals(other.getUnknownFields());
return result; return result;
@ -183,6 +284,14 @@ public final class HadoopRpcProtos {
hash = (37 * hash) + REQUEST_FIELD_NUMBER; hash = (37 * hash) + REQUEST_FIELD_NUMBER;
hash = (53 * hash) + getRequest().hashCode(); hash = (53 * hash) + getRequest().hashCode();
} }
if (hasDeclaringClassProtocolName()) {
hash = (37 * hash) + DECLARINGCLASSPROTOCOLNAME_FIELD_NUMBER;
hash = (53 * hash) + getDeclaringClassProtocolName().hashCode();
}
if (hasClientProtocolVersion()) {
hash = (37 * hash) + CLIENTPROTOCOLVERSION_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getClientProtocolVersion());
}
hash = (29 * hash) + getUnknownFields().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode();
return hash; return hash;
} }
@ -303,6 +412,10 @@ public final class HadoopRpcProtos {
bitField0_ = (bitField0_ & ~0x00000001); bitField0_ = (bitField0_ & ~0x00000001);
request_ = com.google.protobuf.ByteString.EMPTY; request_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000002); bitField0_ = (bitField0_ & ~0x00000002);
declaringClassProtocolName_ = "";
bitField0_ = (bitField0_ & ~0x00000004);
clientProtocolVersion_ = 0L;
bitField0_ = (bitField0_ & ~0x00000008);
return this; return this;
} }
@ -349,6 +462,14 @@ public final class HadoopRpcProtos {
to_bitField0_ |= 0x00000002; to_bitField0_ |= 0x00000002;
} }
result.request_ = request_; result.request_ = request_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.declaringClassProtocolName_ = declaringClassProtocolName_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.clientProtocolVersion_ = clientProtocolVersion_;
result.bitField0_ = to_bitField0_; result.bitField0_ = to_bitField0_;
onBuilt(); onBuilt();
return result; return result;
@ -371,6 +492,12 @@ public final class HadoopRpcProtos {
if (other.hasRequest()) { if (other.hasRequest()) {
setRequest(other.getRequest()); setRequest(other.getRequest());
} }
if (other.hasDeclaringClassProtocolName()) {
setDeclaringClassProtocolName(other.getDeclaringClassProtocolName());
}
if (other.hasClientProtocolVersion()) {
setClientProtocolVersion(other.getClientProtocolVersion());
}
this.mergeUnknownFields(other.getUnknownFields()); this.mergeUnknownFields(other.getUnknownFields());
return this; return this;
} }
@ -380,6 +507,14 @@ public final class HadoopRpcProtos {
return false; return false;
} }
if (!hasDeclaringClassProtocolName()) {
return false;
}
if (!hasClientProtocolVersion()) {
return false;
}
return true; return true;
} }
@ -416,6 +551,16 @@ public final class HadoopRpcProtos {
request_ = input.readBytes(); request_ = input.readBytes();
break; break;
} }
case 26: {
bitField0_ |= 0x00000004;
declaringClassProtocolName_ = input.readBytes();
break;
}
case 32: {
bitField0_ |= 0x00000008;
clientProtocolVersion_ = input.readUInt64();
break;
}
} }
} }
} }
@ -482,6 +627,63 @@ public final class HadoopRpcProtos {
return this; return this;
} }
// required string declaringClassProtocolName = 3;
private java.lang.Object declaringClassProtocolName_ = "";
public boolean hasDeclaringClassProtocolName() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
public String getDeclaringClassProtocolName() {
java.lang.Object ref = declaringClassProtocolName_;
if (!(ref instanceof String)) {
String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
declaringClassProtocolName_ = s;
return s;
} else {
return (String) ref;
}
}
public Builder setDeclaringClassProtocolName(String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
declaringClassProtocolName_ = value;
onChanged();
return this;
}
public Builder clearDeclaringClassProtocolName() {
bitField0_ = (bitField0_ & ~0x00000004);
declaringClassProtocolName_ = getDefaultInstance().getDeclaringClassProtocolName();
onChanged();
return this;
}
void setDeclaringClassProtocolName(com.google.protobuf.ByteString value) {
bitField0_ |= 0x00000004;
declaringClassProtocolName_ = value;
onChanged();
}
// required uint64 clientProtocolVersion = 4;
private long clientProtocolVersion_ ;
public boolean hasClientProtocolVersion() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
public long getClientProtocolVersion() {
return clientProtocolVersion_;
}
public Builder setClientProtocolVersion(long value) {
bitField0_ |= 0x00000008;
clientProtocolVersion_ = value;
onChanged();
return this;
}
public Builder clearClientProtocolVersion() {
bitField0_ = (bitField0_ & ~0x00000008);
clientProtocolVersion_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:HadoopRpcRequestProto) // @@protoc_insertion_point(builder_scope:HadoopRpcRequestProto)
} }
@ -1706,16 +1908,18 @@ public final class HadoopRpcProtos {
descriptor; descriptor;
static { static {
java.lang.String[] descriptorData = { java.lang.String[] descriptorData = {
"\n\020hadoop_rpc.proto\"<\n\025HadoopRpcRequestPr" + "\n\020hadoop_rpc.proto\"\177\n\025HadoopRpcRequestPr" +
"oto\022\022\n\nmethodName\030\001 \002(\t\022\017\n\007request\030\002 \001(\014" + "oto\022\022\n\nmethodName\030\001 \002(\t\022\017\n\007request\030\002 \001(\014" +
"\"D\n\027HadoopRpcExceptionProto\022\025\n\rexception" + "\022\"\n\032declaringClassProtocolName\030\003 \002(\t\022\035\n\025" +
"Name\030\001 \001(\t\022\022\n\nstackTrace\030\002 \001(\t\"\272\001\n\026Hadoo" + "clientProtocolVersion\030\004 \002(\004\"D\n\027HadoopRpc" +
"pRpcResponseProto\0226\n\006status\030\001 \002(\0162&.Hado" + "ExceptionProto\022\025\n\rexceptionName\030\001 \001(\t\022\022\n" +
"opRpcResponseProto.ResponseStatus\022\020\n\010res" + "\nstackTrace\030\002 \001(\t\"\272\001\n\026HadoopRpcResponseP" +
"ponse\030\002 \001(\014\022+\n\texception\030\003 \001(\0132\030.HadoopR" + "roto\0226\n\006status\030\001 \002(\0162&.HadoopRpcResponse" +
"pcExceptionProto\")\n\016ResponseStatus\022\013\n\007SU" + "Proto.ResponseStatus\022\020\n\010response\030\002 \001(\014\022+" +
"CCESS\020\001\022\n\n\006ERRROR\020\002B4\n\036org.apache.hadoop" + "\n\texception\030\003 \001(\0132\030.HadoopRpcExceptionPr" +
".ipc.protobufB\017HadoopRpcProtos\240\001\001" "oto\")\n\016ResponseStatus\022\013\n\007SUCCESS\020\001\022\n\n\006ER",
"RROR\020\002B4\n\036org.apache.hadoop.ipc.protobuf" +
"B\017HadoopRpcProtos\240\001\001"
}; };
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@ -1727,7 +1931,7 @@ public final class HadoopRpcProtos {
internal_static_HadoopRpcRequestProto_fieldAccessorTable = new internal_static_HadoopRpcRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable( com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_HadoopRpcRequestProto_descriptor, internal_static_HadoopRpcRequestProto_descriptor,
new java.lang.String[] { "MethodName", "Request", }, new java.lang.String[] { "MethodName", "Request", "DeclaringClassProtocolName", "ClientProtocolVersion", },
org.apache.hadoop.ipc.protobuf.HadoopRpcProtos.HadoopRpcRequestProto.class, org.apache.hadoop.ipc.protobuf.HadoopRpcProtos.HadoopRpcRequestProto.class,
org.apache.hadoop.ipc.protobuf.HadoopRpcProtos.HadoopRpcRequestProto.Builder.class); org.apache.hadoop.ipc.protobuf.HadoopRpcProtos.HadoopRpcRequestProto.Builder.class);
internal_static_HadoopRpcExceptionProto_descriptor = internal_static_HadoopRpcExceptionProto_descriptor =

View File

@ -42,15 +42,20 @@ public class DelegationKey implements Writable {
@Nullable @Nullable
private byte[] keyBytes = null; private byte[] keyBytes = null;
/** Default constructore required for Writable */
public DelegationKey() { public DelegationKey() {
this(0, 0L, null); this(0, 0L, (SecretKey)null);
} }
public DelegationKey(int keyId, long expiryDate, SecretKey key) { public DelegationKey(int keyId, long expiryDate, SecretKey key) {
this(keyId, expiryDate, key != null ? key.getEncoded() : null);
}
public DelegationKey(int keyId, long expiryDate, byte[] encodedKey) {
this.keyId = keyId; this.keyId = keyId;
this.expiryDate = expiryDate; this.expiryDate = expiryDate;
if (key!=null) { if (encodedKey != null) {
this.keyBytes = key.getEncoded(); this.keyBytes = encodedKey;
} }
} }
@ -71,6 +76,10 @@ public class DelegationKey implements Writable {
} }
} }
public byte[] getEncodedKey() {
return keyBytes;
}
public void setExpiryDate(long expiryDate) { public void setExpiryDate(long expiryDate) {
this.expiryDate = expiryDate; this.expiryDate = expiryDate;
} }

View File

@ -34,6 +34,12 @@ message HadoopRpcRequestProto {
/** Bytes corresponding to the client protobuf request */ /** Bytes corresponding to the client protobuf request */
optional bytes request = 2; optional bytes request = 2;
/** protocol name of class declaring the called method */
required string declaringClassProtocolName = 3;
/** protocol version of class declaring the called method */
required uint64 clientProtocolVersion = 4;
} }
/** /**

View File

@ -34,6 +34,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
import org.apache.hadoop.ipc.TestSaslRPC.CustomSecurityInfo; import org.apache.hadoop.ipc.TestSaslRPC.CustomSecurityInfo;
import org.apache.hadoop.ipc.TestSaslRPC.TestTokenIdentifier; import org.apache.hadoop.ipc.TestSaslRPC.TestTokenIdentifier;
import org.apache.hadoop.ipc.TestSaslRPC.TestTokenSecretManager; import org.apache.hadoop.ipc.TestSaslRPC.TestTokenSecretManager;
@ -101,7 +102,8 @@ public class TestAvroRpc extends TestCase {
RPC.setProtocolEngine(conf, AvroTestProtocol.class, AvroRpcEngine.class); RPC.setProtocolEngine(conf, AvroTestProtocol.class, AvroRpcEngine.class);
RPC.Server server = RPC.getServer(EmptyProtocol.class, new EmptyImpl(), RPC.Server server = RPC.getServer(EmptyProtocol.class, new EmptyImpl(),
ADDRESS, 0, 5, true, conf, sm); ADDRESS, 0, 5, true, conf, sm);
server.addProtocol(AvroTestProtocol.class, new TestImpl()); server.addProtocol(RpcKind.RPC_WRITABLE,
AvroTestProtocol.class, new TestImpl());
try { try {
server.start(); server.start();

View File

@ -23,6 +23,7 @@ import org.apache.commons.logging.*;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
@ -96,8 +97,8 @@ public class TestIPC {
} }
@Override @Override
public Writable call(String protocol, Writable param, long receiveTime) public Writable call(RpcKind rpcKind, String protocol, Writable param,
throws IOException { long receiveTime) throws IOException {
if (sleep) { if (sleep) {
// sleep a bit // sleep a bit
try { try {

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.Writable;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
/** /**
@ -72,8 +73,8 @@ public class TestIPCServerResponder extends TestCase {
} }
@Override @Override
public Writable call(String protocol, Writable param, long receiveTime) public Writable call(RpcKind rpcKind, String protocol, Writable param,
throws IOException { long receiveTime) throws IOException {
if (sleep) { if (sleep) {
try { try {
Thread.sleep(RANDOM.nextInt(20)); // sleep a bit Thread.sleep(RANDOM.nextInt(20)); // sleep a bit

View File

@ -23,10 +23,15 @@ import java.net.InetSocketAddress;
import org.junit.Assert; import org.junit.Assert;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
import org.apache.hadoop.ipc.TestProtoBufRpc.PBServerImpl;
import org.apache.hadoop.ipc.TestProtoBufRpc.TestRpcService;
import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcProto;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.junit.Before; import org.junit.Before;
import org.junit.After; import org.junit.After;
import org.junit.Test; import org.junit.Test;
import com.google.protobuf.BlockingService;
public class TestMultipleProtocolServer { public class TestMultipleProtocolServer {
private static final String ADDRESS = "0.0.0.0"; private static final String ADDRESS = "0.0.0.0";
@ -173,9 +178,19 @@ public class TestMultipleProtocolServer {
// create a server with two handlers // create a server with two handlers
server = RPC.getServer(Foo0.class, server = RPC.getServer(Foo0.class,
new Foo0Impl(), ADDRESS, 0, 2, false, conf, null); new Foo0Impl(), ADDRESS, 0, 2, false, conf, null);
server.addProtocol(Foo1.class, new Foo1Impl()); server.addProtocol(RpcKind.RPC_WRITABLE, Foo1.class, new Foo1Impl());
server.addProtocol(Bar.class, new BarImpl()); server.addProtocol(RpcKind.RPC_WRITABLE, Bar.class, new BarImpl());
server.addProtocol(Mixin.class, new BarImpl()); server.addProtocol(RpcKind.RPC_WRITABLE, Mixin.class, new BarImpl());
// Add Protobuf server
// Create server side implementation
PBServerImpl pbServerImpl =
new PBServerImpl();
BlockingService service = TestProtobufRpcProto
.newReflectiveBlockingService(pbServerImpl);
server.addProtocol(RpcKind.RPC_PROTOCOL_BUFFER, TestRpcService.class,
service);
server.start(); server.start();
addr = NetUtils.getConnectAddress(server); addr = NetUtils.getConnectAddress(server);
} }
@ -252,4 +267,15 @@ public class TestMultipleProtocolServer {
RPC.getServer(Foo1.class, RPC.getServer(Foo1.class,
new Foo0Impl(), ADDRESS, 0, 2, false, conf, null); new Foo0Impl(), ADDRESS, 0, 2, false, conf, null);
} }
// Now test a PB service - a server hosts both PB and Writable Rpcs.
@Test
public void testPBService() throws Exception {
// Set RPC engine to protobuf RPC engine
Configuration conf2 = new Configuration();
RPC.setProtocolEngine(conf2, TestRpcService.class,
ProtobufRpcEngine.class);
TestRpcService client = RPC.getProxy(TestRpcService.class, 0, addr, conf2);
TestProtoBufRpc.testProtoBufRpc(client);
}
} }

View File

@ -21,14 +21,18 @@ import java.io.IOException;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
import org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto; import org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto;
import org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto; import org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto;
import org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto; import org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto;
import org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto; import org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto;
import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcProto; import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcProto;
import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface; import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpc2Proto;
import org.apache.hadoop.net.NetUtils;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Test; import org.junit.Test;
import org.junit.Before;
import org.junit.After;
import com.google.protobuf.BlockingService; import com.google.protobuf.BlockingService;
import com.google.protobuf.RpcController; import com.google.protobuf.RpcController;
@ -42,8 +46,21 @@ import com.google.protobuf.ServiceException;
public class TestProtoBufRpc { public class TestProtoBufRpc {
public final static String ADDRESS = "0.0.0.0"; public final static String ADDRESS = "0.0.0.0";
public final static int PORT = 0; public final static int PORT = 0;
private static InetSocketAddress addr;
private static Configuration conf;
private static RPC.Server server;
public static class ServerImpl implements BlockingInterface { @ProtocolInfo(protocolName = "testProto", protocolVersion = 1)
public interface TestRpcService
extends TestProtobufRpcProto.BlockingInterface {
}
@ProtocolInfo(protocolName = "testProto2", protocolVersion = 1)
public interface TestRpcService2 extends
TestProtobufRpc2Proto.BlockingInterface {
}
public static class PBServerImpl implements TestRpcService {
@Override @Override
public EmptyResponseProto ping(RpcController unused, public EmptyResponseProto ping(RpcController unused,
@ -65,39 +82,77 @@ public class TestProtoBufRpc {
} }
} }
private static RPC.Server startRPCServer(Configuration conf) public static class PBServer2Impl implements TestRpcService2 {
throws IOException {
@Override
public EmptyResponseProto ping2(RpcController unused,
EmptyRequestProto request) throws ServiceException {
return EmptyResponseProto.newBuilder().build();
}
@Override
public EchoResponseProto echo2(RpcController unused, EchoRequestProto request)
throws ServiceException {
return EchoResponseProto.newBuilder().setMessage(request.getMessage())
.build();
}
}
@Before
public void setUp() throws IOException { // Setup server for both protocols
conf = new Configuration();
// Set RPC engine to protobuf RPC engine // Set RPC engine to protobuf RPC engine
RPC.setProtocolEngine(conf, BlockingService.class, ProtobufRpcEngine.class); RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class);
// Create server side implementation // Create server side implementation
ServerImpl serverImpl = new ServerImpl(); PBServerImpl serverImpl = new PBServerImpl();
BlockingService service = TestProtobufRpcProto BlockingService service = TestProtobufRpcProto
.newReflectiveBlockingService(serverImpl); .newReflectiveBlockingService(serverImpl);
// Get RPC server for serer side implementation // Get RPC server for server side implementation
RPC.Server server = RPC.getServer(BlockingService.class, service, ADDRESS, server = RPC.getServer(TestRpcService.class, service, ADDRESS, PORT, conf);
PORT, conf); addr = NetUtils.getConnectAddress(server);
// now the second protocol
PBServer2Impl server2Impl = new PBServer2Impl();
BlockingService service2 = TestProtobufRpc2Proto
.newReflectiveBlockingService(server2Impl);
server.addProtocol(RpcKind.RPC_PROTOCOL_BUFFER, TestRpcService2.class,
service2);
server.start(); server.start();
return server;
} }
private static BlockingInterface getClient(Configuration conf,
InetSocketAddress addr) throws IOException { @After
public void tearDown() throws Exception {
server.stop();
}
private static TestRpcService getClient() throws IOException {
// Set RPC engine to protobuf RPC engine // Set RPC engine to protobuf RPC engine
RPC.setProtocolEngine(conf, BlockingInterface.class, RPC.setProtocolEngine(conf, TestRpcService.class,
ProtobufRpcEngine.class); ProtobufRpcEngine.class);
BlockingInterface client = RPC.getProxy(BlockingInterface.class, 0, addr, return RPC.getProxy(TestRpcService.class, 0, addr,
conf);
}
private static TestRpcService2 getClient2() throws IOException {
// Set RPC engine to protobuf RPC engine
RPC.setProtocolEngine(conf, TestRpcService2.class,
ProtobufRpcEngine.class);
return RPC.getProxy(TestRpcService2.class, 0, addr,
conf); conf);
return client;
} }
@Test @Test
public void testProtoBufRpc() throws Exception { public void testProtoBufRpc() throws Exception {
Configuration conf = new Configuration(); TestRpcService client = getClient();
RPC.Server server = startRPCServer(conf); testProtoBufRpc(client);
BlockingInterface client = getClient(conf, server.getListenerAddress()); }
// separated test out so that other tests can call it.
public static void testProtoBufRpc(TestRpcService client) throws Exception {
// Test ping method // Test ping method
EmptyRequestProto emptyRequest = EmptyRequestProto.newBuilder().build(); EmptyRequestProto emptyRequest = EmptyRequestProto.newBuilder().build();
client.ping(null, emptyRequest); client.ping(null, emptyRequest);
@ -108,16 +163,29 @@ public class TestProtoBufRpc {
EchoResponseProto echoResponse = client.echo(null, echoRequest); EchoResponseProto echoResponse = client.echo(null, echoRequest);
Assert.assertEquals(echoResponse.getMessage(), "hello"); Assert.assertEquals(echoResponse.getMessage(), "hello");
// Test error method - it should be thrown as RemoteException // Test error method - error should be thrown as RemoteException
try { try {
client.error(null, emptyRequest); client.error(null, emptyRequest);
Assert.fail("Expected exception is not thrown"); Assert.fail("Expected exception is not thrown");
} catch (ServiceException e) { } catch (ServiceException e) {
RemoteException re = (RemoteException)e.getCause(); RemoteException re = (RemoteException)e.getCause();
re.printStackTrace();
RpcServerException rse = (RpcServerException) re RpcServerException rse = (RpcServerException) re
.unwrapRemoteException(RpcServerException.class); .unwrapRemoteException(RpcServerException.class);
rse.printStackTrace();
} }
} }
@Test
public void testProtoBufRpc2() throws Exception {
TestRpcService2 client = getClient2();
// Test ping method
EmptyRequestProto emptyRequest = EmptyRequestProto.newBuilder().build();
client.ping2(null, emptyRequest);
// Test echo method
EchoRequestProto echoRequest = EchoRequestProto.newBuilder()
.setMessage("hello").build();
EchoResponseProto echoResponse = client.echo2(null, echoRequest);
Assert.assertEquals(echoResponse.getMessage(), "hello");
}
} }

View File

@ -31,6 +31,7 @@ import junit.framework.Assert;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.junit.After; import org.junit.After;
import org.junit.Test; import org.junit.Test;
@ -56,6 +57,8 @@ public class TestRPCCompatibility {
String echo(String value) throws IOException; String echo(String value) throws IOException;
} }
// TestProtocol2 is a compatible impl of TestProtocol1 - hence use its name
@ProtocolInfo(protocolName= @ProtocolInfo(protocolName=
"org.apache.hadoop.ipc.TestRPCCompatibility$TestProtocol1") "org.apache.hadoop.ipc.TestRPCCompatibility$TestProtocol1")
public interface TestProtocol2 extends TestProtocol1 { public interface TestProtocol2 extends TestProtocol1 {
@ -114,9 +117,11 @@ public class TestRPCCompatibility {
public void tearDown() throws IOException { public void tearDown() throws IOException {
if (proxy != null) { if (proxy != null) {
RPC.stopProxy(proxy.getProxy()); RPC.stopProxy(proxy.getProxy());
proxy = null;
} }
if (server != null) { if (server != null) {
server.stop(); server.stop();
server = null;
} }
} }
@ -126,7 +131,7 @@ public class TestRPCCompatibility {
TestImpl1 impl = new TestImpl1(); TestImpl1 impl = new TestImpl1();
server = RPC.getServer(TestProtocol1.class, server = RPC.getServer(TestProtocol1.class,
impl, ADDRESS, 0, 2, false, conf, null); impl, ADDRESS, 0, 2, false, conf, null);
server.addProtocol(TestProtocol0.class, impl); server.addProtocol(RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
server.start(); server.start();
addr = NetUtils.getConnectAddress(server); addr = NetUtils.getConnectAddress(server);
@ -170,8 +175,10 @@ public class TestRPCCompatibility {
public int echo(int value) throws IOException, NumberFormatException { public int echo(int value) throws IOException, NumberFormatException {
if (serverInfo.isMethodSupported("echo", int.class)) { if (serverInfo.isMethodSupported("echo", int.class)) {
System.out.println("echo int is supported");
return -value; // use version 3 echo long return -value; // use version 3 echo long
} else { // server is version 2 } else { // server is version 2
System.out.println("echo int is NOT supported");
return Integer.parseInt(proxy2.echo(String.valueOf(value))); return Integer.parseInt(proxy2.echo(String.valueOf(value)));
} }
} }
@ -191,7 +198,7 @@ public class TestRPCCompatibility {
TestImpl1 impl = new TestImpl1(); TestImpl1 impl = new TestImpl1();
server = RPC.getServer(TestProtocol1.class, server = RPC.getServer(TestProtocol1.class,
impl, ADDRESS, 0, 2, false, conf, null); impl, ADDRESS, 0, 2, false, conf, null);
server.addProtocol(TestProtocol0.class, impl); server.addProtocol(RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
server.start(); server.start();
addr = NetUtils.getConnectAddress(server); addr = NetUtils.getConnectAddress(server);
@ -207,11 +214,12 @@ public class TestRPCCompatibility {
@Test // equal version client and server @Test // equal version client and server
public void testVersion2ClientVersion2Server() throws Exception { public void testVersion2ClientVersion2Server() throws Exception {
ProtocolSignature.resetCache();
// create a server with two handlers // create a server with two handlers
TestImpl2 impl = new TestImpl2(); TestImpl2 impl = new TestImpl2();
server = RPC.getServer(TestProtocol2.class, server = RPC.getServer(TestProtocol2.class,
impl, ADDRESS, 0, 2, false, conf, null); impl, ADDRESS, 0, 2, false, conf, null);
server.addProtocol(TestProtocol0.class, impl); server.addProtocol(RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
server.start(); server.start();
addr = NetUtils.getConnectAddress(server); addr = NetUtils.getConnectAddress(server);

View File

@ -359,6 +359,292 @@ public final class TestRpcServiceProtos {
} }
} }
public static abstract class TestProtobufRpc2Proto
implements com.google.protobuf.Service {
protected TestProtobufRpc2Proto() {}
public interface Interface {
public abstract void ping2(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto request,
com.google.protobuf.RpcCallback<org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto> done);
public abstract void echo2(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto request,
com.google.protobuf.RpcCallback<org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto> done);
}
public static com.google.protobuf.Service newReflectiveService(
final Interface impl) {
return new TestProtobufRpc2Proto() {
@java.lang.Override
public void ping2(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto request,
com.google.protobuf.RpcCallback<org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto> done) {
impl.ping2(controller, request, done);
}
@java.lang.Override
public void echo2(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto request,
com.google.protobuf.RpcCallback<org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto> done) {
impl.echo2(controller, request, done);
}
};
}
public static com.google.protobuf.BlockingService
newReflectiveBlockingService(final BlockingInterface impl) {
return new com.google.protobuf.BlockingService() {
public final com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptorForType() {
return getDescriptor();
}
public final com.google.protobuf.Message callBlockingMethod(
com.google.protobuf.Descriptors.MethodDescriptor method,
com.google.protobuf.RpcController controller,
com.google.protobuf.Message request)
throws com.google.protobuf.ServiceException {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.callBlockingMethod() given method descriptor for " +
"wrong service type.");
}
switch(method.getIndex()) {
case 0:
return impl.ping2(controller, (org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto)request);
case 1:
return impl.echo2(controller, (org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto)request);
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getRequestPrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getRequestPrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto.getDefaultInstance();
case 1:
return org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getResponsePrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getResponsePrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto.getDefaultInstance();
case 1:
return org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
};
}
public abstract void ping2(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto request,
com.google.protobuf.RpcCallback<org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto> done);
public abstract void echo2(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto request,
com.google.protobuf.RpcCallback<org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto> done);
public static final
com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptor() {
return org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.getDescriptor().getServices().get(1);
}
public final com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptorForType() {
return getDescriptor();
}
public final void callMethod(
com.google.protobuf.Descriptors.MethodDescriptor method,
com.google.protobuf.RpcController controller,
com.google.protobuf.Message request,
com.google.protobuf.RpcCallback<
com.google.protobuf.Message> done) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.callMethod() given method descriptor for wrong " +
"service type.");
}
switch(method.getIndex()) {
case 0:
this.ping2(controller, (org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto)request,
com.google.protobuf.RpcUtil.<org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto>specializeCallback(
done));
return;
case 1:
this.echo2(controller, (org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto)request,
com.google.protobuf.RpcUtil.<org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto>specializeCallback(
done));
return;
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getRequestPrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getRequestPrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto.getDefaultInstance();
case 1:
return org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getResponsePrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getResponsePrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto.getDefaultInstance();
case 1:
return org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public static Stub newStub(
com.google.protobuf.RpcChannel channel) {
return new Stub(channel);
}
public static final class Stub extends org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpc2Proto implements Interface {
private Stub(com.google.protobuf.RpcChannel channel) {
this.channel = channel;
}
private final com.google.protobuf.RpcChannel channel;
public com.google.protobuf.RpcChannel getChannel() {
return channel;
}
public void ping2(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto request,
com.google.protobuf.RpcCallback<org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto> done) {
channel.callMethod(
getDescriptor().getMethods().get(0),
controller,
request,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto.class,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto.getDefaultInstance()));
}
public void echo2(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto request,
com.google.protobuf.RpcCallback<org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto> done) {
channel.callMethod(
getDescriptor().getMethods().get(1),
controller,
request,
org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto.class,
org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto.getDefaultInstance()));
}
}
public static BlockingInterface newBlockingStub(
com.google.protobuf.BlockingRpcChannel channel) {
return new BlockingStub(channel);
}
public interface BlockingInterface {
public org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto ping2(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto echo2(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto request)
throws com.google.protobuf.ServiceException;
}
private static final class BlockingStub implements BlockingInterface {
private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
this.channel = channel;
}
private final com.google.protobuf.BlockingRpcChannel channel;
public org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto ping2(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(0),
controller,
request,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto.getDefaultInstance());
}
public org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto echo2(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(1),
controller,
request,
org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto.getDefaultInstance());
}
}
}
public static com.google.protobuf.Descriptors.FileDescriptor public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() { getDescriptor() {
@ -373,8 +659,11 @@ public final class TestRpcServiceProtos {
"uestProto\032\023.EmptyResponseProto\022-\n\004echo\022\021" + "uestProto\032\023.EmptyResponseProto\022-\n\004echo\022\021" +
".EchoRequestProto\032\022.EchoResponseProto\0220\n" + ".EchoRequestProto\032\022.EchoResponseProto\0220\n" +
"\005error\022\022.EmptyRequestProto\032\023.EmptyRespon" + "\005error\022\022.EmptyRequestProto\032\023.EmptyRespon" +
"seProtoB<\n\036org.apache.hadoop.ipc.protobu" + "seProto2y\n\025TestProtobufRpc2Proto\0220\n\005ping" +
"fB\024TestRpcServiceProtos\210\001\001\240\001\001" "2\022\022.EmptyRequestProto\032\023.EmptyResponsePro" +
"to\022.\n\005echo2\022\021.EchoRequestProto\032\022.EchoRes" +
"ponseProtoB<\n\036org.apache.hadoop.ipc.prot" +
"obufB\024TestRpcServiceProtos\210\001\001\240\001\001"
}; };
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {

View File

@ -31,3 +31,8 @@ service TestProtobufRpcProto {
rpc echo(EchoRequestProto) returns (EchoResponseProto); rpc echo(EchoRequestProto) returns (EchoResponseProto);
rpc error(EmptyRequestProto) returns (EmptyResponseProto); rpc error(EmptyRequestProto) returns (EmptyResponseProto);
} }
service TestProtobufRpc2Proto {
rpc ping2(EmptyRequestProto) returns (EmptyResponseProto);
rpc echo2(EchoRequestProto) returns (EchoResponseProto);
}

View File

@ -68,6 +68,10 @@ Trunk (unreleased changes)
HDFS-2410. Further cleanup of hardcoded configuration keys and values. HDFS-2410. Further cleanup of hardcoded configuration keys and values.
(suresh) (suresh)
HADOOP-7862 Hdfs changes to work with HADOOP 7862:
Move the support for multiple protocols to lower layer so that Writable,
PB and Avro can all use it (Sanjay)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-2477. Optimize computing the diff between a block report and the HDFS-2477. Optimize computing the diff between a block report and the
namenode state. (Tomasz Nykiel via hairong) namenode state. (Tomasz Nykiel via hairong)

View File

@ -142,6 +142,7 @@ import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.net.DNS; import org.apache.hadoop.net.DNS;
@ -394,7 +395,7 @@ public class DataNode extends Configured
InterDatanodeProtocolServerSideTranslatorR23 InterDatanodeProtocolServerSideTranslatorR23
interDatanodeProtocolServerTranslator = interDatanodeProtocolServerTranslator =
new InterDatanodeProtocolServerSideTranslatorR23(this); new InterDatanodeProtocolServerSideTranslatorR23(this);
ipcServer.addProtocol(InterDatanodeWireProtocol.class, ipcServer.addProtocol(RpcKind.RPC_WRITABLE, InterDatanodeWireProtocol.class,
interDatanodeProtocolServerTranslator); interDatanodeProtocolServerTranslator);
// set service-level authorization security policy // set service-level authorization security policy

View File

@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
/** /**
@ -211,7 +212,7 @@ public class BackupNode extends NameNode {
super(conf, nn); super(conf, nn);
JournalProtocolServerSideTranslatorR23 journalProtocolTranslator = JournalProtocolServerSideTranslatorR23 journalProtocolTranslator =
new JournalProtocolServerSideTranslatorR23(this); new JournalProtocolServerSideTranslatorR23(this);
this.clientRpcServer.addProtocol(JournalWireProtocol.class, this.clientRpcServer.addProtocol(RpcKind.RPC_WRITABLE, JournalWireProtocol.class,
journalProtocolTranslator); journalProtocolTranslator);
nnRpcAddress = nn.nnRpcAddress; nnRpcAddress = nn.nnRpcAddress;
} }

View File

@ -90,6 +90,7 @@ import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.ProtocolSignature; import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
import org.apache.hadoop.net.Node; import org.apache.hadoop.net.Node;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.Groups; import org.apache.hadoop.security.Groups;
@ -152,13 +153,16 @@ class NameNodeRpcServer implements NamenodeProtocols {
dnSocketAddr.getHostName(), dnSocketAddr.getPort(), dnSocketAddr.getHostName(), dnSocketAddr.getPort(),
serviceHandlerCount, serviceHandlerCount,
false, conf, namesystem.getDelegationTokenSecretManager()); false, conf, namesystem.getDelegationTokenSecretManager());
this.serviceRpcServer.addProtocol(DatanodeProtocol.class, this); this.serviceRpcServer.addProtocol(RpcKind.RPC_WRITABLE,
this.serviceRpcServer.addProtocol(NamenodeProtocol.class, this); DatanodeProtocol.class, this);
this.serviceRpcServer.addProtocol( this.serviceRpcServer.addProtocol(RpcKind.RPC_WRITABLE,
NamenodeProtocol.class, this);
this.serviceRpcServer.addProtocol(RpcKind.RPC_WRITABLE,
RefreshAuthorizationPolicyProtocol.class, this); RefreshAuthorizationPolicyProtocol.class, this);
this.serviceRpcServer.addProtocol( this.serviceRpcServer.addProtocol(RpcKind.RPC_WRITABLE,
RefreshUserMappingsProtocol.class, this); RefreshUserMappingsProtocol.class, this);
this.serviceRpcServer.addProtocol(GetUserMappingsProtocol.class, this); this.serviceRpcServer.addProtocol(RpcKind.RPC_WRITABLE,
GetUserMappingsProtocol.class, this);
this.serviceRpcServer.addProtocol(HAServiceProtocol.class, this); this.serviceRpcServer.addProtocol(HAServiceProtocol.class, this);
this.serviceRPCAddress = this.serviceRpcServer.getListenerAddress(); this.serviceRPCAddress = this.serviceRpcServer.getListenerAddress();
@ -174,12 +178,16 @@ class NameNodeRpcServer implements NamenodeProtocols {
clientProtocolServerTranslator, socAddr.getHostName(), clientProtocolServerTranslator, socAddr.getHostName(),
socAddr.getPort(), handlerCount, false, conf, socAddr.getPort(), handlerCount, false, conf,
namesystem.getDelegationTokenSecretManager()); namesystem.getDelegationTokenSecretManager());
this.clientRpcServer.addProtocol(DatanodeProtocol.class, this); this.clientRpcServer.addProtocol(RpcKind.RPC_WRITABLE,
this.clientRpcServer.addProtocol(NamenodeProtocol.class, this); DatanodeProtocol.class, this);
this.clientRpcServer.addProtocol( this.clientRpcServer.addProtocol(RpcKind.RPC_WRITABLE,
NamenodeProtocol.class, this);
this.clientRpcServer.addProtocol(RpcKind.RPC_WRITABLE,
RefreshAuthorizationPolicyProtocol.class, this); RefreshAuthorizationPolicyProtocol.class, this);
this.clientRpcServer.addProtocol(RefreshUserMappingsProtocol.class, this); this.clientRpcServer.addProtocol(RpcKind.RPC_WRITABLE,
this.clientRpcServer.addProtocol(GetUserMappingsProtocol.class, this); RefreshUserMappingsProtocol.class, this);
this.clientRpcServer.addProtocol(RpcKind.RPC_WRITABLE,
GetUserMappingsProtocol.class, this);
this.clientRpcServer.addProtocol(HAServiceProtocol.class, this); this.clientRpcServer.addProtocol(HAServiceProtocol.class, this);

View File

@ -62,6 +62,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.mockito.internal.stubbing.answers.ThrowsException; import org.mockito.internal.stubbing.answers.ThrowsException;
import org.mockito.invocation.InvocationOnMock; import org.mockito.invocation.InvocationOnMock;
@ -97,7 +98,7 @@ public class TestDFSClientRetries extends TestCase {
} }
@Override @Override
public Writable call(String protocol, Writable param, long receiveTime) public Writable call(RpcKind rpcKind, String protocol, Writable param, long receiveTime)
throws IOException { throws IOException {
if (sleep) { if (sleep) {
// sleep a bit // sleep a bit

View File

@ -28,6 +28,7 @@ import java.net.SocketTimeoutException;
import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
@ -81,7 +82,7 @@ public class TestInterDatanodeProtocol {
} }
@Override @Override
public Writable call(String protocol, Writable param, long receiveTime) public Writable call(RpcKind rpcKind, String protocol, Writable param, long receiveTime)
throws IOException { throws IOException {
if (sleep) { if (sleep) {
// sleep a bit // sleep a bit

View File

@ -35,11 +35,15 @@ Trunk (unreleased changes)
uri with no authority. (John George via jitendra) uri with no authority. (John George via jitendra)
MAPREDUCE-3169. Create a new MiniMRCluster equivalent which only provides MAPREDUCE-3169. Create a new MiniMRCluster equivalent which only provides
client APIs cross MR1 and MR2. (Ahmed via tucu) client APIs cross MR1 and MR2 (Ahmed via tucu)
MAPREDUCE-3415. improve MiniMRYarnCluster & DistributedShell JAR resolution. MAPREDUCE-3415. improve MiniMRYarnCluster & DistributedShell JAR resolution.
(tucu) (tucu)
HADOOP-7862 MR changes to work with HADOOP 7862:
Move the support for multiple protocols to lower layer so that Writable,
PB and Avro can all use it (Sanjay)
BUG FIXES BUG FIXES
MAPREDUCE-3412. Fix 'ant docs'. (amarrk) MAPREDUCE-3412. Fix 'ant docs'. (amarrk)
@ -68,6 +72,8 @@ Trunk (unreleased changes)
MAPREDUCE-3477. Hadoop site documentation cannot be built anymore. (jeagles via tucu) MAPREDUCE-3477. Hadoop site documentation cannot be built anymore. (jeagles via tucu)
MAPREDUCE-3500. MRJobConfig creates an LD_LIBRARY_PATH using the platform ARCH. (tucu)
Release 0.23.1 - Unreleased Release 0.23.1 - Unreleased
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
@ -129,6 +135,12 @@ Release 0.23.1 - Unreleased
MAPREDUCE-3448. TestCombineOutputCollector javac unchecked warning on mocked MAPREDUCE-3448. TestCombineOutputCollector javac unchecked warning on mocked
generics (Jonathan Eagles via mahadev) generics (Jonathan Eagles via mahadev)
MAPREDUCE-3169 amendment. Deprecate MiniMRCluster. (Ahmed Radwan via
sseth)
MAPREDUCE-3369. Migrate MR1 tests to run on MR2 using the new interfaces
introduced in MAPREDUCE-3169. (Ahmed Radwan via tomwhite)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES
@ -211,6 +223,26 @@ Release 0.23.1 - Unreleased
MAPREDUCE-3452. fifoscheduler web ui page always shows 0% used for the queue. MAPREDUCE-3452. fifoscheduler web ui page always shows 0% used for the queue.
(Jonathan Eagles via mahadev) (Jonathan Eagles via mahadev)
MAPREDUCE-3443. JobClient and Job should function in the context of the
UGI which created them. (Mahadev Konar via sseth)
MAPREDUCE-3460. MR AM can hang if containers are allocated on a node
blacklisted by the AM. (Hitesh Shah and Robert Joseph Evans via sseth)
MAPREDUCE-3453. RM web ui application details page shows RM cluster about
information. (Jonathan Eagles via sseth)
MAPREDUCE-3479. JobClient#getJob cannot find local jobs. (tomwhite)
MAPREDUCE-3456. $HADOOP_PREFIX/bin/yarn should set defaults for
$HADOOP_*_HOME (Eric Payne via mahadev)
MAPREDUCE-3458. Fix findbugs warnings in hadoop-examples. (Devaraj K
via mahadev)
MAPREDUCE-3485. DISKS_FAILED -101 error code should be defined in same location as
ABORTED_CONTAINER_EXIT_STATUS. (Ravi Gummadi via mahadev)
Release 0.23.0 - 2011-11-01 Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -87,7 +87,7 @@ public class RMContainerAllocator extends RMContainerRequestor
} }
/* /*
Vocabulory Used: Vocabulary Used:
pending -> requests which are NOT yet sent to RM pending -> requests which are NOT yet sent to RM
scheduled -> requests which are sent to RM but not yet assigned scheduled -> requests which are sent to RM but not yet assigned
assigned -> requests which are assigned to a container assigned -> requests which are assigned to a container
@ -565,6 +565,7 @@ public class RMContainerAllocator extends RMContainerRequestor
if (event.getEarlierAttemptFailed()) { if (event.getEarlierAttemptFailed()) {
earlierFailedMaps.add(event.getAttemptID()); earlierFailedMaps.add(event.getAttemptID());
request = new ContainerRequest(event, PRIORITY_FAST_FAIL_MAP); request = new ContainerRequest(event, PRIORITY_FAST_FAIL_MAP);
LOG.info("Added "+event.getAttemptID()+" to list of failed maps");
} else { } else {
for (String host : event.getHosts()) { for (String host : event.getHosts()) {
LinkedList<TaskAttemptId> list = mapsHostMapping.get(host); LinkedList<TaskAttemptId> list = mapsHostMapping.get(host);
@ -603,7 +604,9 @@ public class RMContainerAllocator extends RMContainerRequestor
containersAllocated += allocatedContainers.size(); containersAllocated += allocatedContainers.size();
while (it.hasNext()) { while (it.hasNext()) {
Container allocated = it.next(); Container allocated = it.next();
LOG.info("Assigning container " + allocated); LOG.info("Assigning container " + allocated.getId() +
" with priority " + allocated.getPriority() +
" to NM " + allocated.getNodeId());
// check if allocated container meets memory requirements // check if allocated container meets memory requirements
// and whether we have any scheduled tasks that need // and whether we have any scheduled tasks that need
@ -645,7 +648,8 @@ public class RMContainerAllocator extends RMContainerRequestor
// we need to request for a new container // we need to request for a new container
// and release the current one // and release the current one
LOG.info("Got allocated container on a blacklisted " LOG.info("Got allocated container on a blacklisted "
+ " host. Releasing container " + allocated); + " host "+allocated.getNodeId().getHost()
+". Releasing container " + allocated);
// find the request matching this allocated container // find the request matching this allocated container
// and replace it with a new one // and replace it with a new one
@ -727,10 +731,20 @@ public class RMContainerAllocator extends RMContainerRequestor
} }
private ContainerRequest getContainerReqToReplace(Container allocated) { private ContainerRequest getContainerReqToReplace(Container allocated) {
LOG.info("Finding containerReq for allocated container: " + allocated);
Priority priority = allocated.getPriority(); Priority priority = allocated.getPriority();
ContainerRequest toBeReplaced = null; ContainerRequest toBeReplaced = null;
if (PRIORITY_FAST_FAIL_MAP.equals(priority) if (PRIORITY_FAST_FAIL_MAP.equals(priority)) {
|| PRIORITY_MAP.equals(priority)) { LOG.info("Replacing FAST_FAIL_MAP container " + allocated.getId());
Iterator<TaskAttemptId> iter = earlierFailedMaps.iterator();
while (toBeReplaced == null && iter.hasNext()) {
toBeReplaced = maps.get(iter.next());
}
LOG.info("Found replacement: " + toBeReplaced);
return toBeReplaced;
}
else if (PRIORITY_MAP.equals(priority)) {
LOG.info("Replacing MAP container " + allocated.getId());
// allocated container was for a map // allocated container was for a map
String host = allocated.getNodeId().getHost(); String host = allocated.getNodeId().getHost();
LinkedList<TaskAttemptId> list = mapsHostMapping.get(host); LinkedList<TaskAttemptId> list = mapsHostMapping.get(host);
@ -749,6 +763,7 @@ public class RMContainerAllocator extends RMContainerRequestor
TaskAttemptId tId = reduces.keySet().iterator().next(); TaskAttemptId tId = reduces.keySet().iterator().next();
toBeReplaced = reduces.remove(tId); toBeReplaced = reduces.remove(tId);
} }
LOG.info("Found replacement: " + toBeReplaced);
return toBeReplaced; return toBeReplaced;
} }

View File

@ -105,6 +105,13 @@ public abstract class RMContainerRequestor extends RMCommunicator {
this.priority = priority; this.priority = priority;
} }
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("AttemptId[").append(attemptID).append("]");
sb.append("Capability[").append(capability).append("]");
sb.append("Priority[").append(priority).append("]");
return sb.toString();
}
} }
@Override @Override

View File

@ -580,6 +580,135 @@ public class TestRMContainerAllocator {
} }
} }
@Test
public void testBlackListedNodesWithSchedulingToThatNode() throws Exception {
LOG.info("Running testBlackListedNodesWithSchedulingToThatNode");
Configuration conf = new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE, true);
conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER, 1);
MyResourceManager rm = new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
.getDispatcher();
// Submit the application
RMApp app = rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
.getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
Job mockJob = mock(Job.class);
when(mockJob.getReport()).thenReturn(
MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
0, 0, 0, 0, 0, 0, "jobfile", null));
MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
appAttemptId, mockJob);
// add resources to scheduler
MockNM nodeManager1 = rm.registerNode("h1:1234", 10240);
MockNM nodeManager3 = rm.registerNode("h3:1234", 10240);
dispatcher.await();
LOG.info("Requesting 1 Containers _1 on H1");
// create the container request
ContainerRequestEvent event1 = createReq(jobId, 1, 1024,
new String[] { "h1" });
allocator.sendRequest(event1);
LOG.info("RM Heartbeat (to send the container requests)");
// this tells the scheduler about the requests
// as nodes are not added, no allocations
List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
LOG.info("h1 Heartbeat (To actually schedule the containers)");
// update resources in scheduler
nodeManager1.nodeHeartbeat(true); // Node heartbeat
dispatcher.await();
LOG.info("RM Heartbeat (To process the scheduled containers)");
assigned = allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
LOG.info("Failing container _1 on H1 (should blacklist the node)");
// Send events to blacklist nodes h1 and h2
ContainerFailedEvent f1 = createFailEvent(jobId, 1, "h1", false);
allocator.sendFailure(f1);
//At this stage, a request should be created for a fast fail map
//Create a FAST_FAIL request for a previously failed map.
ContainerRequestEvent event1f = createReq(jobId, 1, 1024,
new String[] { "h1" }, true, false);
allocator.sendRequest(event1f);
//Update the Scheduler with the new requests.
assigned = allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
// send another request with different resource and priority
ContainerRequestEvent event3 = createReq(jobId, 3, 1024,
new String[] { "h1", "h3" });
allocator.sendRequest(event3);
//Allocator is aware of prio:5 container, and prio:20 (h1+h3) container.
//RM is only aware of the prio:5 container
LOG.info("h1 Heartbeat (To actually schedule the containers)");
// update resources in scheduler
nodeManager1.nodeHeartbeat(true); // Node heartbeat
dispatcher.await();
LOG.info("RM Heartbeat (To process the scheduled containers)");
assigned = allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
//RMContainerAllocator gets assigned a p:5 on a blacklisted node.
//Send a release for the p:5 container + another request.
LOG.info("RM Heartbeat (To process the re-scheduled containers)");
assigned = allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
//Hearbeat from H3 to schedule on this host.
LOG.info("h3 Heartbeat (To re-schedule the containers)");
nodeManager3.nodeHeartbeat(true); // Node heartbeat
dispatcher.await();
LOG.info("RM Heartbeat (To process the re-scheduled containers for H3)");
assigned = allocator.schedule();
dispatcher.await();
// For debugging
for (TaskAttemptContainerAssignedEvent assig : assigned) {
LOG.info(assig.getTaskAttemptID() +
" assgined to " + assig.getContainer().getId() +
" with priority " + assig.getContainer().getPriority());
}
Assert.assertEquals("No of assignments must be 2", 2, assigned.size());
// validate that all containers are assigned to h3
for (TaskAttemptContainerAssignedEvent assig : assigned) {
Assert.assertEquals("Assigned container " + assig.getContainer().getId()
+ " host not correct", "h3", assig.getContainer().getNodeId().getHost());
}
}
private static class MyFifoScheduler extends FifoScheduler { private static class MyFifoScheduler extends FifoScheduler {
public MyFifoScheduler(RMContext rmContext) { public MyFifoScheduler(RMContext rmContext) {

View File

@ -0,0 +1,62 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import static junit.framework.Assert.assertNotNull;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
public class TestJobClientGetJob {
private static Path TEST_ROOT_DIR =
new Path(System.getProperty("test.build.data","/tmp"));
private Path createTempFile(String filename, String contents)
throws IOException {
Path path = new Path(TEST_ROOT_DIR, filename);
Configuration conf = new Configuration();
FSDataOutputStream os = FileSystem.getLocal(conf).create(path);
os.writeBytes(contents);
os.close();
return path;
}
@SuppressWarnings("deprecation")
@Test
public void testGetRunningJobFromJobClient() throws Exception {
JobConf conf = new JobConf();
conf.set("mapreduce.framework.name", "local");
FileInputFormat.addInputPath(conf, createTempFile("in", "hello"));
FileOutputFormat.setOutputPath(conf,
new Path(TEST_ROOT_DIR, getClass().getSimpleName()));
JobClient jc = new JobClient(conf);
RunningJob runningJob = jc.submitJob(conf);
assertNotNull("Running job", runningJob);
// Check that the running job can be retrieved by ID
RunningJob newRunningJob = jc.getJob(runningJob.getID());
assertNotNull("New running job", newRunningJob);
}
}

View File

@ -21,6 +21,7 @@ import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.net.URL; import java.net.URL;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.List; import java.util.List;
@ -42,6 +43,7 @@ import org.apache.hadoop.mapreduce.util.ConfigUtil;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenRenewer; import org.apache.hadoop.security.token.TokenRenewer;
import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.SecretManager.InvalidToken;
@ -421,6 +423,11 @@ public class JobClient extends CLI {
} }
Cluster cluster; Cluster cluster;
/**
* Ugi of the client. We store this ugi when the client is created and
* then make sure that the same ugi is used to run the various protocols.
*/
UserGroupInformation clientUgi;
/** /**
* Create a job client. * Create a job client.
@ -458,6 +465,7 @@ public class JobClient extends CLI {
public void init(JobConf conf) throws IOException { public void init(JobConf conf) throws IOException {
setConf(conf); setConf(conf);
cluster = new Cluster(conf); cluster = new Cluster(conf);
clientUgi = UserGroupInformation.getCurrentUser();
} }
@InterfaceAudience.Private @InterfaceAudience.Private
@ -488,7 +496,6 @@ public class JobClient extends CLI {
public boolean isManaged(Token<?> token) throws IOException { public boolean isManaged(Token<?> token) throws IOException {
return true; return true;
} }
} }
/** /**
@ -500,6 +507,7 @@ public class JobClient extends CLI {
public JobClient(InetSocketAddress jobTrackAddr, public JobClient(InetSocketAddress jobTrackAddr,
Configuration conf) throws IOException { Configuration conf) throws IOException {
cluster = new Cluster(jobTrackAddr, conf); cluster = new Cluster(jobTrackAddr, conf);
clientUgi = UserGroupInformation.getCurrentUser();
} }
/** /**
@ -562,21 +570,38 @@ public class JobClient extends CLI {
* @throws FileNotFoundException * @throws FileNotFoundException
* @throws IOException * @throws IOException
*/ */
public RunningJob submitJob(JobConf conf) throws FileNotFoundException, public RunningJob submitJob(final JobConf conf) throws FileNotFoundException,
IOException { IOException {
try { try {
conf.setBooleanIfUnset("mapred.mapper.new-api", false); conf.setBooleanIfUnset("mapred.mapper.new-api", false);
conf.setBooleanIfUnset("mapred.reducer.new-api", false); conf.setBooleanIfUnset("mapred.reducer.new-api", false);
Job job = Job.getInstance(conf); Job job = clientUgi.doAs(new PrivilegedExceptionAction<Job> () {
job.submit(); @Override
public Job run() throws IOException, ClassNotFoundException,
InterruptedException {
Job job = Job.getInstance(conf);
job.submit();
return job;
}
});
// update our Cluster instance with the one created by Job for submission
// (we can't pass our Cluster instance to Job, since Job wraps the config
// instance, and the two configs would then diverge)
cluster = job.getCluster();
return new NetworkedJob(job); return new NetworkedJob(job);
} catch (InterruptedException ie) { } catch (InterruptedException ie) {
throw new IOException("interrupted", ie); throw new IOException("interrupted", ie);
} catch (ClassNotFoundException cnfe) {
throw new IOException("class not found", cnfe);
} }
} }
private Job getJobUsingCluster(final JobID jobid) throws IOException,
InterruptedException {
return clientUgi.doAs(new PrivilegedExceptionAction<Job>() {
public Job run() throws IOException, InterruptedException {
return cluster.getJob(jobid);
}
});
}
/** /**
* Get an {@link RunningJob} object to track an ongoing job. Returns * Get an {@link RunningJob} object to track an ongoing job. Returns
* null if the id does not correspond to any known job. * null if the id does not correspond to any known job.
@ -586,9 +611,10 @@ public class JobClient extends CLI {
* <code>jobid</code> doesn't correspond to any known job. * <code>jobid</code> doesn't correspond to any known job.
* @throws IOException * @throws IOException
*/ */
public RunningJob getJob(JobID jobid) throws IOException { public RunningJob getJob(final JobID jobid) throws IOException {
try { try {
Job job = cluster.getJob(jobid);
Job job = getJobUsingCluster(jobid);
if (job != null) { if (job != null) {
JobStatus status = JobStatus.downgrade(job.getStatus()); JobStatus status = JobStatus.downgrade(job.getStatus());
if (status != null) { if (status != null) {
@ -621,9 +647,10 @@ public class JobClient extends CLI {
return getTaskReports(jobId, TaskType.MAP); return getTaskReports(jobId, TaskType.MAP);
} }
private TaskReport[] getTaskReports(JobID jobId, TaskType type) throws IOException { private TaskReport[] getTaskReports(final JobID jobId, TaskType type) throws
IOException {
try { try {
Job j = cluster.getJob(jobId); Job j = getJobUsingCluster(jobId);
if(j == null) { if(j == null) {
return EMPTY_TASK_REPORTS; return EMPTY_TASK_REPORTS;
} }
@ -688,10 +715,11 @@ public class JobClient extends CLI {
* @param state the state of the task * @param state the state of the task
* (pending/running/completed/failed/killed) * (pending/running/completed/failed/killed)
*/ */
public void displayTasks(JobID jobId, String type, String state) public void displayTasks(final JobID jobId, String type, String state)
throws IOException { throws IOException {
try { try {
super.displayTasks(cluster.getJob(jobId), type, state); Job job = getJobUsingCluster(jobId);
super.displayTasks(job, type, state);
} catch (InterruptedException ie) { } catch (InterruptedException ie) {
throw new IOException(ie); throw new IOException(ie);
} }
@ -706,15 +734,20 @@ public class JobClient extends CLI {
*/ */
public ClusterStatus getClusterStatus() throws IOException { public ClusterStatus getClusterStatus() throws IOException {
try { try {
ClusterMetrics metrics = cluster.getClusterStatus(); return clientUgi.doAs(new PrivilegedExceptionAction<ClusterStatus>() {
return new ClusterStatus(metrics.getTaskTrackerCount(), public ClusterStatus run() throws IOException, InterruptedException {
metrics.getBlackListedTaskTrackerCount(), cluster.getTaskTrackerExpiryInterval(), ClusterMetrics metrics = cluster.getClusterStatus();
metrics.getOccupiedMapSlots(), return new ClusterStatus(metrics.getTaskTrackerCount(),
metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(), metrics.getBlackListedTaskTrackerCount(), cluster.getTaskTrackerExpiryInterval(),
metrics.getReduceSlotCapacity(), metrics.getOccupiedMapSlots(),
cluster.getJobTrackerStatus(), metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
metrics.getDecommissionedTaskTrackerCount()); metrics.getReduceSlotCapacity(),
} catch (InterruptedException ie) { cluster.getJobTrackerStatus(),
metrics.getDecommissionedTaskTrackerCount());
}
});
}
catch (InterruptedException ie) {
throw new IOException(ie); throw new IOException(ie);
} }
} }
@ -750,13 +783,17 @@ public class JobClient extends CLI {
*/ */
public ClusterStatus getClusterStatus(boolean detailed) throws IOException { public ClusterStatus getClusterStatus(boolean detailed) throws IOException {
try { try {
ClusterMetrics metrics = cluster.getClusterStatus(); return clientUgi.doAs(new PrivilegedExceptionAction<ClusterStatus>() {
return new ClusterStatus(arrayToStringList(cluster.getActiveTaskTrackers()), public ClusterStatus run() throws IOException, InterruptedException {
arrayToBlackListInfo(cluster.getBlackListedTaskTrackers()), ClusterMetrics metrics = cluster.getClusterStatus();
cluster.getTaskTrackerExpiryInterval(), metrics.getOccupiedMapSlots(), return new ClusterStatus(arrayToStringList(cluster.getActiveTaskTrackers()),
metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(), arrayToBlackListInfo(cluster.getBlackListedTaskTrackers()),
metrics.getReduceSlotCapacity(), cluster.getTaskTrackerExpiryInterval(), metrics.getOccupiedMapSlots(),
cluster.getJobTrackerStatus()); metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
metrics.getReduceSlotCapacity(),
cluster.getJobTrackerStatus());
}
});
} catch (InterruptedException ie) { } catch (InterruptedException ie) {
throw new IOException(ie); throw new IOException(ie);
} }
@ -787,7 +824,14 @@ public class JobClient extends CLI {
*/ */
public JobStatus[] getAllJobs() throws IOException { public JobStatus[] getAllJobs() throws IOException {
try { try {
org.apache.hadoop.mapreduce.JobStatus[] jobs = cluster.getAllJobStatuses(); org.apache.hadoop.mapreduce.JobStatus[] jobs =
clientUgi.doAs(new PrivilegedExceptionAction<
org.apache.hadoop.mapreduce.JobStatus[]> () {
public org.apache.hadoop.mapreduce.JobStatus[] run()
throws IOException, InterruptedException {
return cluster.getAllJobStatuses();
}
});
JobStatus[] stats = new JobStatus[jobs.length]; JobStatus[] stats = new JobStatus[jobs.length];
for (int i = 0; i < jobs.length; i++) { for (int i = 0; i < jobs.length; i++) {
stats[i] = JobStatus.downgrade(jobs[i]); stats[i] = JobStatus.downgrade(jobs[i]);
@ -909,7 +953,12 @@ public class JobClient extends CLI {
*/ */
public int getDefaultMaps() throws IOException { public int getDefaultMaps() throws IOException {
try { try {
return cluster.getClusterStatus().getMapSlotCapacity(); return clientUgi.doAs(new PrivilegedExceptionAction<Integer>() {
@Override
public Integer run() throws IOException, InterruptedException {
return cluster.getClusterStatus().getMapSlotCapacity();
}
});
} catch (InterruptedException ie) { } catch (InterruptedException ie) {
throw new IOException(ie); throw new IOException(ie);
} }
@ -923,7 +972,12 @@ public class JobClient extends CLI {
*/ */
public int getDefaultReduces() throws IOException { public int getDefaultReduces() throws IOException {
try { try {
return cluster.getClusterStatus().getReduceSlotCapacity(); return clientUgi.doAs(new PrivilegedExceptionAction<Integer>() {
@Override
public Integer run() throws IOException, InterruptedException {
return cluster.getClusterStatus().getReduceSlotCapacity();
}
});
} catch (InterruptedException ie) { } catch (InterruptedException ie) {
throw new IOException(ie); throw new IOException(ie);
} }
@ -936,8 +990,13 @@ public class JobClient extends CLI {
*/ */
public Path getSystemDir() { public Path getSystemDir() {
try { try {
return cluster.getSystemDir(); return clientUgi.doAs(new PrivilegedExceptionAction<Path>() {
} catch (IOException ioe) { @Override
public Path run() throws IOException, InterruptedException {
return cluster.getSystemDir();
}
});
} catch (IOException ioe) {
return null; return null;
} catch (InterruptedException ie) { } catch (InterruptedException ie) {
return null; return null;
@ -962,7 +1021,11 @@ public class JobClient extends CLI {
*/ */
public JobQueueInfo[] getRootQueues() throws IOException { public JobQueueInfo[] getRootQueues() throws IOException {
try { try {
return getJobQueueInfoArray(cluster.getRootQueues()); return clientUgi.doAs(new PrivilegedExceptionAction<JobQueueInfo[]>() {
public JobQueueInfo[] run() throws IOException, InterruptedException {
return getJobQueueInfoArray(cluster.getRootQueues());
}
});
} catch (InterruptedException ie) { } catch (InterruptedException ie) {
throw new IOException(ie); throw new IOException(ie);
} }
@ -976,9 +1039,13 @@ public class JobClient extends CLI {
* @return the array of immediate children JobQueueInfo objects * @return the array of immediate children JobQueueInfo objects
* @throws IOException * @throws IOException
*/ */
public JobQueueInfo[] getChildQueues(String queueName) throws IOException { public JobQueueInfo[] getChildQueues(final String queueName) throws IOException {
try { try {
return getJobQueueInfoArray(cluster.getChildQueues(queueName)); return clientUgi.doAs(new PrivilegedExceptionAction<JobQueueInfo[]>() {
public JobQueueInfo[] run() throws IOException, InterruptedException {
return getJobQueueInfoArray(cluster.getChildQueues(queueName));
}
});
} catch (InterruptedException ie) { } catch (InterruptedException ie) {
throw new IOException(ie); throw new IOException(ie);
} }
@ -993,7 +1060,11 @@ public class JobClient extends CLI {
*/ */
public JobQueueInfo[] getQueues() throws IOException { public JobQueueInfo[] getQueues() throws IOException {
try { try {
return getJobQueueInfoArray(cluster.getQueues()); return clientUgi.doAs(new PrivilegedExceptionAction<JobQueueInfo[]>() {
public JobQueueInfo[] run() throws IOException, InterruptedException {
return getJobQueueInfoArray(cluster.getQueues());
}
});
} catch (InterruptedException ie) { } catch (InterruptedException ie) {
throw new IOException(ie); throw new IOException(ie);
} }
@ -1007,9 +1078,14 @@ public class JobClient extends CLI {
* @throws IOException * @throws IOException
*/ */
public JobStatus[] getJobsFromQueue(String queueName) throws IOException { public JobStatus[] getJobsFromQueue(final String queueName) throws IOException {
try { try {
QueueInfo queue = cluster.getQueue(queueName); QueueInfo queue = clientUgi.doAs(new PrivilegedExceptionAction<QueueInfo>() {
@Override
public QueueInfo run() throws IOException, InterruptedException {
return cluster.getQueue(queueName);
}
});
if (queue == null) { if (queue == null) {
return null; return null;
} }
@ -1032,9 +1108,14 @@ public class JobClient extends CLI {
* @return Queue information associated to particular queue. * @return Queue information associated to particular queue.
* @throws IOException * @throws IOException
*/ */
public JobQueueInfo getQueueInfo(String queueName) throws IOException { public JobQueueInfo getQueueInfo(final String queueName) throws IOException {
try { try {
QueueInfo queueInfo = cluster.getQueue(queueName); QueueInfo queueInfo = clientUgi.doAs(new
PrivilegedExceptionAction<QueueInfo>() {
public QueueInfo run() throws IOException, InterruptedException {
return cluster.getQueue(queueName);
}
});
if (queueInfo != null) { if (queueInfo != null) {
return new JobQueueInfo(queueInfo); return new JobQueueInfo(queueInfo);
} }
@ -1052,7 +1133,14 @@ public class JobClient extends CLI {
public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException { public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException {
try { try {
org.apache.hadoop.mapreduce.QueueAclsInfo[] acls = org.apache.hadoop.mapreduce.QueueAclsInfo[] acls =
cluster.getQueueAclsForCurrentUser(); clientUgi.doAs(new
PrivilegedExceptionAction
<org.apache.hadoop.mapreduce.QueueAclsInfo[]>() {
public org.apache.hadoop.mapreduce.QueueAclsInfo[] run()
throws IOException, InterruptedException {
return cluster.getQueueAclsForCurrentUser();
}
});
QueueAclsInfo[] ret = new QueueAclsInfo[acls.length]; QueueAclsInfo[] ret = new QueueAclsInfo[acls.length];
for (int i = 0 ; i < acls.length; i++ ) { for (int i = 0 ; i < acls.length; i++ ) {
ret[i] = QueueAclsInfo.downgrade(acls[i]); ret[i] = QueueAclsInfo.downgrade(acls[i]);
@ -1070,8 +1158,14 @@ public class JobClient extends CLI {
* @throws IOException * @throws IOException
*/ */
public Token<DelegationTokenIdentifier> public Token<DelegationTokenIdentifier>
getDelegationToken(Text renewer) throws IOException, InterruptedException { getDelegationToken(final Text renewer) throws IOException, InterruptedException {
return cluster.getDelegationToken(renewer); return clientUgi.doAs(new
PrivilegedExceptionAction<Token<DelegationTokenIdentifier>>() {
public Token<DelegationTokenIdentifier> run() throws IOException,
InterruptedException {
return cluster.getDelegationToken(renewer);
}
});
} }
/** /**

View File

@ -30,6 +30,7 @@ import java.net.URL;
import java.net.URLConnection; import java.net.URLConnection;
import java.net.URI; import java.net.URI;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
import java.util.List;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -315,7 +316,12 @@ public class Job extends JobContextImpl implements JobContext {
* @throws IOException * @throws IOException
*/ */
synchronized void updateStatus() throws IOException, InterruptedException { synchronized void updateStatus() throws IOException, InterruptedException {
this.status = cluster.getClient().getJobStatus(status.getJobID()); this.status = ugi.doAs(new PrivilegedExceptionAction<JobStatus>() {
@Override
public JobStatus run() throws IOException, InterruptedException {
return cluster.getClient().getJobStatus(status.getJobID());
}
});
if (this.status == null) { if (this.status == null) {
throw new IOException("Job status not available "); throw new IOException("Job status not available ");
} }
@ -431,6 +437,11 @@ public class Job extends JobContextImpl implements JobContext {
return status.isRetired(); return status.isRetired();
} }
@Private
public Cluster getCluster() {
return cluster;
}
/** Only for mocks in unit tests. */ /** Only for mocks in unit tests. */
@Private @Private
private void setCluster(Cluster cluster) { private void setCluster(Cluster cluster) {
@ -476,8 +487,16 @@ public class Job extends JobContextImpl implements JobContext {
InterruptedException { InterruptedException {
int failCount = 1; int failCount = 1;
TaskCompletionEvent lastEvent = null; TaskCompletionEvent lastEvent = null;
for (TaskCompletionEvent event : cluster.getClient().getTaskCompletionEvents( TaskCompletionEvent[] events = ugi.doAs(new
status.getJobID(), 0, 10)) { PrivilegedExceptionAction<TaskCompletionEvent[]>() {
@Override
public TaskCompletionEvent[] run() throws IOException,
InterruptedException {
return cluster.getClient().getTaskCompletionEvents(
status.getJobID(), 0, 10);
}
});
for (TaskCompletionEvent event : events) {
if (event.getStatus().equals(TaskCompletionEvent.Status.FAILED)) { if (event.getStatus().equals(TaskCompletionEvent.Status.FAILED)) {
failCount++; failCount++;
lastEvent = event; lastEvent = event;
@ -500,7 +519,12 @@ public class Job extends JobContextImpl implements JobContext {
public TaskReport[] getTaskReports(TaskType type) public TaskReport[] getTaskReports(TaskType type)
throws IOException, InterruptedException { throws IOException, InterruptedException {
ensureState(JobState.RUNNING); ensureState(JobState.RUNNING);
return cluster.getClient().getTaskReports(getJobID(), type); final TaskType tmpType = type;
return ugi.doAs(new PrivilegedExceptionAction<TaskReport[]>() {
public TaskReport[] run() throws IOException, InterruptedException {
return cluster.getClient().getTaskReports(getJobID(), tmpType);
}
});
} }
/** /**
@ -603,7 +627,14 @@ public class Job extends JobContextImpl implements JobContext {
org.apache.hadoop.mapred.JobPriority.valueOf(priority.name())); org.apache.hadoop.mapred.JobPriority.valueOf(priority.name()));
} else { } else {
ensureState(JobState.RUNNING); ensureState(JobState.RUNNING);
cluster.getClient().setJobPriority(getJobID(), priority.toString()); final JobPriority tmpPriority = priority;
ugi.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws IOException, InterruptedException {
cluster.getClient().setJobPriority(getJobID(), tmpPriority.toString());
return null;
}
});
} }
} }
@ -615,12 +646,17 @@ public class Job extends JobContextImpl implements JobContext {
* @return an array of {@link TaskCompletionEvent}s * @return an array of {@link TaskCompletionEvent}s
* @throws IOException * @throws IOException
*/ */
public TaskCompletionEvent[] getTaskCompletionEvents(int startFrom, public TaskCompletionEvent[] getTaskCompletionEvents(final int startFrom,
int numEvents) throws IOException, InterruptedException { final int numEvents) throws IOException, InterruptedException {
ensureState(JobState.RUNNING); ensureState(JobState.RUNNING);
return cluster.getClient().getTaskCompletionEvents(getJobID(), return ugi.doAs(new PrivilegedExceptionAction<TaskCompletionEvent[]>() {
startFrom, numEvents); @Override
} public TaskCompletionEvent[] run() throws IOException, InterruptedException {
return cluster.getClient().getTaskCompletionEvents(getJobID(),
startFrom, numEvents);
}
});
}
/** /**
* Kill indicated task attempt. * Kill indicated task attempt.
@ -628,10 +664,14 @@ public class Job extends JobContextImpl implements JobContext {
* @param taskId the id of the task to be terminated. * @param taskId the id of the task to be terminated.
* @throws IOException * @throws IOException
*/ */
public boolean killTask(TaskAttemptID taskId) public boolean killTask(final TaskAttemptID taskId)
throws IOException, InterruptedException { throws IOException, InterruptedException {
ensureState(JobState.RUNNING); ensureState(JobState.RUNNING);
return cluster.getClient().killTask(taskId, false); return ugi.doAs(new PrivilegedExceptionAction<Boolean>() {
public Boolean run() throws IOException, InterruptedException {
return cluster.getClient().killTask(taskId, false);
}
});
} }
/** /**
@ -640,10 +680,15 @@ public class Job extends JobContextImpl implements JobContext {
* @param taskId the id of the task to be terminated. * @param taskId the id of the task to be terminated.
* @throws IOException * @throws IOException
*/ */
public boolean failTask(TaskAttemptID taskId) public boolean failTask(final TaskAttemptID taskId)
throws IOException, InterruptedException { throws IOException, InterruptedException {
ensureState(JobState.RUNNING); ensureState(JobState.RUNNING);
return cluster.getClient().killTask(taskId, true); return ugi.doAs(new PrivilegedExceptionAction<Boolean>() {
@Override
public Boolean run() throws IOException, InterruptedException {
return cluster.getClient().killTask(taskId, true);
}
});
} }
/** /**
@ -656,7 +701,12 @@ public class Job extends JobContextImpl implements JobContext {
public Counters getCounters() public Counters getCounters()
throws IOException, InterruptedException { throws IOException, InterruptedException {
ensureState(JobState.RUNNING); ensureState(JobState.RUNNING);
return cluster.getClient().getJobCounters(getJobID()); return ugi.doAs(new PrivilegedExceptionAction<Counters>() {
@Override
public Counters run() throws IOException, InterruptedException {
return cluster.getClient().getJobCounters(getJobID());
}
});
} }
/** /**
@ -665,10 +715,15 @@ public class Job extends JobContextImpl implements JobContext {
* @return the list of diagnostic messages for the task * @return the list of diagnostic messages for the task
* @throws IOException * @throws IOException
*/ */
public String[] getTaskDiagnostics(TaskAttemptID taskid) public String[] getTaskDiagnostics(final TaskAttemptID taskid)
throws IOException, InterruptedException { throws IOException, InterruptedException {
ensureState(JobState.RUNNING); ensureState(JobState.RUNNING);
return cluster.getClient().getTaskDiagnostics(taskid); return ugi.doAs(new PrivilegedExceptionAction<String[]>() {
@Override
public String[] run() throws IOException, InterruptedException {
return cluster.getClient().getTaskDiagnostics(taskid);
}
});
} }
/** /**

View File

@ -17,7 +17,6 @@
*/ */
package org.apache.hadoop.mapreduce; package org.apache.hadoop.mapreduce;
import org.apache.hadoop.util.PlatformName;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
@ -439,7 +438,7 @@ public interface MRJobConfig {
"mapreduce.admin.user.env"; "mapreduce.admin.user.env";
public static final String DEFAULT_MAPRED_ADMIN_USER_ENV = public static final String DEFAULT_MAPRED_ADMIN_USER_ENV =
"LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native/" + PlatformName.getPlatformName(); "LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native";
public static final String WORKDIR = "work"; public static final String WORKDIR = "work";

View File

@ -22,7 +22,7 @@ import java.io.IOException;
import java.lang.reflect.InvocationTargetException; import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method; import java.lang.reflect.Method;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.security.PrivilegedAction; import java.security.PrivilegedExceptionAction;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
@ -156,6 +156,8 @@ public class ClientServiceDelegate {
application = rm.getApplicationReport(appId); application = rm.getApplicationReport(appId);
continue; continue;
} }
UserGroupInformation newUgi = UserGroupInformation.createRemoteUser(
UserGroupInformation.getCurrentUser().getUserName());
serviceAddr = application.getHost() + ":" + application.getRpcPort(); serviceAddr = application.getHost() + ":" + application.getRpcPort();
if (UserGroupInformation.isSecurityEnabled()) { if (UserGroupInformation.isSecurityEnabled()) {
String clientTokenEncoded = application.getClientToken(); String clientTokenEncoded = application.getClientToken();
@ -167,11 +169,17 @@ public class ClientServiceDelegate {
.getHost(), application.getRpcPort()); .getHost(), application.getRpcPort());
clientToken.setService(new Text(addr.getAddress().getHostAddress() clientToken.setService(new Text(addr.getAddress().getHostAddress()
+ ":" + addr.getPort())); + ":" + addr.getPort()));
UserGroupInformation.getCurrentUser().addToken(clientToken); newUgi.addToken(clientToken);
} }
LOG.info("The url to track the job: " + application.getTrackingUrl()); LOG.info("The url to track the job: " + application.getTrackingUrl());
LOG.debug("Connecting to " + serviceAddr); LOG.debug("Connecting to " + serviceAddr);
realProxy = instantiateAMProxy(serviceAddr); final String tempStr = serviceAddr;
realProxy = newUgi.doAs(new PrivilegedExceptionAction<MRClientProtocol>() {
@Override
public MRClientProtocol run() throws IOException {
return instantiateAMProxy(tempStr);
}
});
return realProxy; return realProxy;
} catch (IOException e) { } catch (IOException e) {
//possibly the AM has crashed //possibly the AM has crashed
@ -243,17 +251,11 @@ public class ClientServiceDelegate {
MRClientProtocol instantiateAMProxy(final String serviceAddr) MRClientProtocol instantiateAMProxy(final String serviceAddr)
throws IOException { throws IOException {
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
LOG.trace("Connecting to ApplicationMaster at: " + serviceAddr); LOG.trace("Connecting to ApplicationMaster at: " + serviceAddr);
MRClientProtocol proxy = currentUser YarnRPC rpc = YarnRPC.create(conf);
.doAs(new PrivilegedAction<MRClientProtocol>() { MRClientProtocol proxy =
@Override (MRClientProtocol) rpc.getProxy(MRClientProtocol.class,
public MRClientProtocol run() {
YarnRPC rpc = YarnRPC.create(conf);
return (MRClientProtocol) rpc.getProxy(MRClientProtocol.class,
NetUtils.createSocketAddr(serviceAddr), conf); NetUtils.createSocketAddr(serviceAddr), conf);
}
});
LOG.trace("Connected to ApplicationMaster at: " + serviceAddr); LOG.trace("Connected to ApplicationMaster at: " + serviceAddr);
return proxy; return proxy;
} }

View File

@ -59,19 +59,6 @@ public class TestNoDefaultsJobConf extends HadoopTestCase {
JobConf conf = new JobConf(false); JobConf conf = new JobConf(false);
//seeding JT and NN info into non-defaults (empty jobconf)
String jobTrackerAddress = createJobConf().get(JTConfig.JT_IPC_ADDRESS);
if (jobTrackerAddress == null) {
jobTrackerAddress = "local";
}
conf.set(JTConfig.JT_IPC_ADDRESS, jobTrackerAddress);
if (jobTrackerAddress == "local") {
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
}
else {
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.CLASSIC_FRAMEWORK_NAME);
}
conf.set("fs.default.name", createJobConf().get("fs.default.name")); conf.set("fs.default.name", createJobConf().get("fs.default.name"));
conf.setJobName("mr"); conf.setJobName("mr");

View File

@ -0,0 +1,214 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import junit.framework.TestCase;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.MRConfig;
import java.io.File;
import java.io.IOException;
/**
* Abstract Test case class to run MR in local or cluster mode and in local FS
* or DFS.
*
* The Hadoop instance is started and stopped on each test method.
*
* If using DFS the filesystem is reformated at each start (test method).
*
* Job Configurations should be created using a configuration returned by the
* 'createJobConf()' method.
*/
public abstract class HadoopTestCase extends TestCase {
public static final int LOCAL_MR = 1;
public static final int CLUSTER_MR = 2;
public static final int LOCAL_FS = 4;
public static final int DFS_FS = 8;
private boolean localMR;
private boolean localFS;
private int taskTrackers;
private int dataNodes;
/**
* Creates a testcase for local or cluster MR using DFS.
*
* The DFS will be formatted regardless if there was one or not before in the
* given location.
*
* @param mrMode indicates if the MR should be local (LOCAL_MR) or cluster
* (CLUSTER_MR)
* @param fsMode indicates if the FS should be local (LOCAL_FS) or DFS (DFS_FS)
*
* local FS when using relative PATHs)
*
* @param taskTrackers number of task trackers to start when using cluster
*
* @param dataNodes number of data nodes to start when using DFS
*
* @throws IOException thrown if the base directory cannot be set.
*/
public HadoopTestCase(int mrMode, int fsMode, int taskTrackers, int dataNodes)
throws IOException {
if (mrMode != LOCAL_MR && mrMode != CLUSTER_MR) {
throw new IllegalArgumentException(
"Invalid MapRed mode, must be LOCAL_MR or CLUSTER_MR");
}
if (fsMode != LOCAL_FS && fsMode != DFS_FS) {
throw new IllegalArgumentException(
"Invalid FileSystem mode, must be LOCAL_FS or DFS_FS");
}
if (taskTrackers < 1) {
throw new IllegalArgumentException(
"Invalid taskTrackers value, must be greater than 0");
}
if (dataNodes < 1) {
throw new IllegalArgumentException(
"Invalid dataNodes value, must be greater than 0");
}
localMR = (mrMode == LOCAL_MR);
localFS = (fsMode == LOCAL_FS);
/*
JobConf conf = new JobConf();
fsRoot = conf.get("hadoop.tmp.dir");
if (fsRoot == null) {
throw new IllegalArgumentException(
"hadoop.tmp.dir is not defined");
}
fsRoot = fsRoot.replace(' ', '+') + "/fs";
File file = new File(fsRoot);
if (!file.exists()) {
if (!file.mkdirs()) {
throw new RuntimeException("Could not create FS base path: " + file);
}
}
*/
this.taskTrackers = taskTrackers;
this.dataNodes = dataNodes;
}
/**
* Indicates if the MR is running in local or cluster mode.
*
* @return returns TRUE if the MR is running locally, FALSE if running in
* cluster mode.
*/
public boolean isLocalMR() {
return localMR;
}
/**
* Indicates if the filesystem is local or DFS.
*
* @return returns TRUE if the filesystem is local, FALSE if it is DFS.
*/
public boolean isLocalFS() {
return localFS;
}
private MiniDFSCluster dfsCluster = null;
private MiniMRCluster mrCluster = null;
private FileSystem fileSystem = null;
/**
* Creates Hadoop instance based on constructor configuration before
* a test case is run.
*
* @throws Exception
*/
protected void setUp() throws Exception {
super.setUp();
if (localFS) {
fileSystem = FileSystem.getLocal(new JobConf());
}
else {
dfsCluster = new MiniDFSCluster(new JobConf(), dataNodes, true, null);
fileSystem = dfsCluster.getFileSystem();
}
if (localMR) {
}
else {
//noinspection deprecation
mrCluster = new MiniMRCluster(taskTrackers, fileSystem.getUri().toString(), 1);
}
}
/**
* Destroys Hadoop instance based on constructor configuration after
* a test case is run.
*
* @throws Exception
*/
protected void tearDown() throws Exception {
try {
if (mrCluster != null) {
mrCluster.shutdown();
}
}
catch (Exception ex) {
System.out.println(ex);
}
try {
if (dfsCluster != null) {
dfsCluster.shutdown();
}
}
catch (Exception ex) {
System.out.println(ex);
}
super.tearDown();
}
/**
* Returns the Filesystem in use.
*
* TestCases should use this Filesystem as it
* is properly configured with the workingDir for relative PATHs.
*
* @return the filesystem used by Hadoop.
*/
protected FileSystem getFileSystem() {
return fileSystem;
}
/**
* Returns a job configuration preconfigured to run against the Hadoop
* managed by the testcase.
* @return configuration that works on the testcase Hadoop instance
*/
protected JobConf createJobConf() {
if (localMR) {
JobConf conf = new JobConf();
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
return conf;
}
else {
return mrCluster.createJobConf();
}
}
}

View File

@ -34,7 +34,11 @@ import org.apache.hadoop.security.UserGroupInformation;
* Due to major differences between MR1 and MR2, a number of methods are either * Due to major differences between MR1 and MR2, a number of methods are either
* unimplemented/unsupported or were re-implemented to provide wrappers around * unimplemented/unsupported or were re-implemented to provide wrappers around
* MR2 functionality. * MR2 functionality.
*
* @deprecated Use {@link org.apache.hadoop.mapred.MiniMRClientClusterFactory}
* instead
*/ */
@Deprecated
public class MiniMRCluster { public class MiniMRCluster {
private static final Log LOG = LogFactory.getLog(MiniMRCluster.class); private static final Log LOG = LogFactory.getLog(MiniMRCluster.class);

View File

@ -0,0 +1,224 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.servlet.Context;
import org.mortbay.jetty.servlet.ServletHolder;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.http.HttpServlet;
import javax.servlet.ServletException;
import java.io.IOException;
import java.io.DataOutputStream;
/**
* Base class to test Job end notification in local and cluster mode.
*
* Starts up hadoop on Local or Cluster mode (by extending of the
* HadoopTestCase class) and it starts a servlet engine that hosts
* a servlet that will receive the notification of job finalization.
*
* The notification servlet returns a HTTP 400 the first time is called
* and a HTTP 200 the second time, thus testing retry.
*
* In both cases local file system is used (this is irrelevant for
* the tested functionality)
*
*
*/
public abstract class NotificationTestCase extends HadoopTestCase {
protected NotificationTestCase(int mode) throws IOException {
super(mode, HadoopTestCase.LOCAL_FS, 1, 1);
}
private int port;
private String contextPath = "/notification";
private String servletPath = "/mapred";
private Server webServer;
private void startHttpServer() throws Exception {
// Create the webServer
if (webServer != null) {
webServer.stop();
webServer = null;
}
webServer = new Server(0);
Context context = new Context(webServer, contextPath);
// create servlet handler
context.addServlet(new ServletHolder(new NotificationServlet()),
servletPath);
// Start webServer
webServer.start();
port = webServer.getConnectors()[0].getLocalPort();
}
private void stopHttpServer() throws Exception {
if (webServer != null) {
webServer.stop();
webServer.destroy();
webServer = null;
}
}
public static class NotificationServlet extends HttpServlet {
public static int counter = 0;
private static final long serialVersionUID = 1L;
protected void doGet(HttpServletRequest req, HttpServletResponse res)
throws ServletException, IOException {
switch (counter) {
case 0:
{
assertTrue(req.getQueryString().contains("SUCCEEDED"));
}
break;
case 2:
{
assertTrue(req.getQueryString().contains("KILLED"));
}
break;
case 4:
{
assertTrue(req.getQueryString().contains("FAILED"));
}
break;
}
if (counter % 2 == 0) {
res.sendError(HttpServletResponse.SC_BAD_REQUEST, "forcing error");
}
else {
res.setStatus(HttpServletResponse.SC_OK);
}
counter++;
}
}
private String getNotificationUrlTemplate() {
return "http://localhost:" + port + contextPath + servletPath +
"?jobId=$jobId&amp;jobStatus=$jobStatus";
}
protected JobConf createJobConf() {
JobConf conf = super.createJobConf();
conf.setJobEndNotificationURI(getNotificationUrlTemplate());
conf.setInt(JobContext.MR_JOB_END_RETRY_ATTEMPTS, 3);
conf.setInt(JobContext.MR_JOB_END_RETRY_INTERVAL, 200);
return conf;
}
protected void setUp() throws Exception {
super.setUp();
startHttpServer();
}
protected void tearDown() throws Exception {
stopHttpServer();
super.tearDown();
}
public void testMR() throws Exception {
System.out.println(launchWordCount(this.createJobConf(),
"a b c d e f g h", 1, 1));
Thread.sleep(2000);
assertEquals(2, NotificationServlet.counter);
Path inDir = new Path("notificationjob/input");
Path outDir = new Path("notificationjob/output");
// Hack for local FS that does not have the concept of a 'mounting point'
if (isLocalFS()) {
String localPathRoot = System.getProperty("test.build.data","/tmp")
.toString().replace(' ', '+');;
inDir = new Path(localPathRoot, inDir);
outDir = new Path(localPathRoot, outDir);
}
// run a job with KILLED status
System.out.println(UtilsForTests.runJobKill(this.createJobConf(), inDir,
outDir).getID());
Thread.sleep(2000);
assertEquals(4, NotificationServlet.counter);
// run a job with FAILED status
System.out.println(UtilsForTests.runJobFail(this.createJobConf(), inDir,
outDir).getID());
Thread.sleep(2000);
assertEquals(6, NotificationServlet.counter);
}
private String launchWordCount(JobConf conf,
String input,
int numMaps,
int numReduces) throws IOException {
Path inDir = new Path("testing/wc/input");
Path outDir = new Path("testing/wc/output");
// Hack for local FS that does not have the concept of a 'mounting point'
if (isLocalFS()) {
String localPathRoot = System.getProperty("test.build.data","/tmp")
.toString().replace(' ', '+');;
inDir = new Path(localPathRoot, inDir);
outDir = new Path(localPathRoot, outDir);
}
FileSystem fs = FileSystem.get(conf);
fs.delete(outDir, true);
if (!fs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
{
DataOutputStream file = fs.create(new Path(inDir, "part-0"));
file.writeBytes(input);
file.close();
}
conf.setJobName("wordcount");
conf.setInputFormat(TextInputFormat.class);
// the keys are words (strings)
conf.setOutputKeyClass(Text.class);
// the values are counts (ints)
conf.setOutputValueClass(IntWritable.class);
conf.setMapperClass(WordCount.MapClass.class);
conf.setCombinerClass(WordCount.Reduce.class);
conf.setReducerClass(WordCount.Reduce.class);
FileInputFormat.setInputPaths(conf, inDir);
FileOutputFormat.setOutputPath(conf, outDir);
conf.setNumMapTasks(numMaps);
conf.setNumReduceTasks(numReduces);
JobClient.runJob(conf);
return MapReduceTestUtil.readOutput(outDir, conf);
}
}

View File

@ -0,0 +1,584 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.*;
import java.net.URI;
import java.util.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapred.lib.HashPartitioner;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.fs.*;
/**
* A set of utilities to validate the <b>sort</b> of the map-reduce framework.
* This utility program has 2 main parts:
* 1. Checking the records' statistics
* a) Validates the no. of bytes and records in sort's input & output.
* b) Validates the xor of the md5's of each key/value pair.
* c) Ensures same key/value is present in both input and output.
* 2. Check individual records to ensure each record is present in both
* the input and the output of the sort (expensive on large data-sets).
*
* To run: bin/hadoop jar build/hadoop-examples.jar sortvalidate
* [-m <i>maps</i>] [-r <i>reduces</i>] [-deep]
* -sortInput <i>sort-in-dir</i> -sortOutput <i>sort-out-dir</i>
*/
public class SortValidator extends Configured implements Tool {
static private final IntWritable sortInput = new IntWritable(1);
static private final IntWritable sortOutput = new IntWritable(2);
static public String SORT_REDUCES =
"mapreduce.sortvalidator.sort.reduce.tasks";
static public String MAPS_PER_HOST = "mapreduce.sortvalidator.mapsperhost";
static public String REDUCES_PER_HOST =
"mapreduce.sortvalidator.reducesperhost";
static void printUsage() {
System.err.println("sortvalidate [-m <maps>] [-r <reduces>] [-deep] " +
"-sortInput <sort-input-dir> -sortOutput <sort-output-dir>");
System.exit(1);
}
static private IntWritable deduceInputFile(JobConf job) {
Path[] inputPaths = FileInputFormat.getInputPaths(job);
Path inputFile = new Path(job.get(JobContext.MAP_INPUT_FILE));
// value == one for sort-input; value == two for sort-output
return (inputFile.getParent().equals(inputPaths[0])) ?
sortInput : sortOutput;
}
static private byte[] pair(BytesWritable a, BytesWritable b) {
byte[] pairData = new byte[a.getLength()+ b.getLength()];
System.arraycopy(a.getBytes(), 0, pairData, 0, a.getLength());
System.arraycopy(b.getBytes(), 0, pairData, a.getLength(), b.getLength());
return pairData;
}
private static final PathFilter sortPathsFilter = new PathFilter() {
public boolean accept(Path path) {
return (path.getName().startsWith("part-"));
}
};
/**
* A simple map-reduce job which checks consistency of the
* MapReduce framework's sort by checking:
* a) Records are sorted correctly
* b) Keys are partitioned correctly
* c) The input and output have same no. of bytes and records.
* d) The input and output have the correct 'checksum' by xor'ing
* the md5 of each record.
*
*/
public static class RecordStatsChecker {
/**
* Generic way to get <b>raw</b> data from a {@link Writable}.
*/
static class Raw {
/**
* Get raw data bytes from a {@link Writable}
* @param writable {@link Writable} object from whom to get the raw data
* @return raw data of the writable
*/
public byte[] getRawBytes(Writable writable) {
return writable.toString().getBytes();
}
/**
* Get number of raw data bytes of the {@link Writable}
* @param writable {@link Writable} object from whom to get the raw data
* length
* @return number of raw data bytes
*/
public int getRawBytesLength(Writable writable) {
return writable.toString().getBytes().length;
}
}
/**
* Specialization of {@link Raw} for {@link BytesWritable}.
*/
static class RawBytesWritable extends Raw {
public byte[] getRawBytes(Writable bw) {
return ((BytesWritable)bw).getBytes();
}
public int getRawBytesLength(Writable bw) {
return ((BytesWritable)bw).getLength();
}
}
/**
* Specialization of {@link Raw} for {@link Text}.
*/
static class RawText extends Raw {
public byte[] getRawBytes(Writable text) {
return ((Text)text).getBytes();
}
public int getRawBytesLength(Writable text) {
return ((Text)text).getLength();
}
}
private static Raw createRaw(Class rawClass) {
if (rawClass == Text.class) {
return new RawText();
} else if (rawClass == BytesWritable.class) {
System.err.println("Returning " + RawBytesWritable.class);
return new RawBytesWritable();
}
return new Raw();
}
public static class RecordStatsWritable implements Writable {
private long bytes = 0;
private long records = 0;
private int checksum = 0;
public RecordStatsWritable() {}
public RecordStatsWritable(long bytes, long records, int checksum) {
this.bytes = bytes;
this.records = records;
this.checksum = checksum;
}
public void write(DataOutput out) throws IOException {
WritableUtils.writeVLong(out, bytes);
WritableUtils.writeVLong(out, records);
WritableUtils.writeVInt(out, checksum);
}
public void readFields(DataInput in) throws IOException {
bytes = WritableUtils.readVLong(in);
records = WritableUtils.readVLong(in);
checksum = WritableUtils.readVInt(in);
}
public long getBytes() { return bytes; }
public long getRecords() { return records; }
public int getChecksum() { return checksum; }
}
public static class Map extends MapReduceBase
implements Mapper<WritableComparable, Writable,
IntWritable, RecordStatsWritable> {
private IntWritable key = null;
private WritableComparable prevKey = null;
private Class<? extends WritableComparable> keyClass;
private Partitioner<WritableComparable, Writable> partitioner = null;
private int partition = -1;
private int noSortReducers = -1;
private long recordId = -1;
private Raw rawKey;
private Raw rawValue;
public void configure(JobConf job) {
// 'key' == sortInput for sort-input; key == sortOutput for sort-output
key = deduceInputFile(job);
if (key == sortOutput) {
partitioner = new HashPartitioner<WritableComparable, Writable>();
// Figure the 'current' partition and no. of reduces of the 'sort'
try {
URI inputURI = new URI(job.get(JobContext.MAP_INPUT_FILE));
String inputFile = inputURI.getPath();
// part file is of the form part-r-xxxxx
partition = Integer.valueOf(inputFile.substring(
inputFile.lastIndexOf("part") + 7)).intValue();
noSortReducers = job.getInt(SORT_REDUCES, -1);
} catch (Exception e) {
System.err.println("Caught: " + e);
System.exit(-1);
}
}
}
@SuppressWarnings("unchecked")
public void map(WritableComparable key, Writable value,
OutputCollector<IntWritable, RecordStatsWritable> output,
Reporter reporter) throws IOException {
// Set up rawKey and rawValue on the first call to 'map'
if (recordId == -1) {
rawKey = createRaw(key.getClass());
rawValue = createRaw(value.getClass());
}
++recordId;
if (this.key == sortOutput) {
// Check if keys are 'sorted' if this
// record is from sort's output
if (prevKey == null) {
prevKey = key;
keyClass = prevKey.getClass();
} else {
// Sanity check
if (keyClass != key.getClass()) {
throw new IOException("Type mismatch in key: expected " +
keyClass.getName() + ", received " +
key.getClass().getName());
}
// Check if they were sorted correctly
if (prevKey.compareTo(key) > 0) {
throw new IOException("The 'map-reduce' framework wrongly" +
" classifed (" + prevKey + ") > (" +
key + ") "+ "for record# " + recordId);
}
prevKey = key;
}
// Check if the sorted output is 'partitioned' right
int keyPartition =
partitioner.getPartition(key, value, noSortReducers);
if (partition != keyPartition) {
throw new IOException("Partitions do not match for record# " +
recordId + " ! - '" + partition + "' v/s '" +
keyPartition + "'");
}
}
// Construct the record-stats and output (this.key, record-stats)
byte[] keyBytes = rawKey.getRawBytes(key);
int keyBytesLen = rawKey.getRawBytesLength(key);
byte[] valueBytes = rawValue.getRawBytes(value);
int valueBytesLen = rawValue.getRawBytesLength(value);
int keyValueChecksum =
(WritableComparator.hashBytes(keyBytes, keyBytesLen) ^
WritableComparator.hashBytes(valueBytes, valueBytesLen));
output.collect(this.key,
new RecordStatsWritable((keyBytesLen+valueBytesLen),
1, keyValueChecksum)
);
}
}
public static class Reduce extends MapReduceBase
implements Reducer<IntWritable, RecordStatsWritable,
IntWritable, RecordStatsWritable> {
public void reduce(IntWritable key, Iterator<RecordStatsWritable> values,
OutputCollector<IntWritable,
RecordStatsWritable> output,
Reporter reporter) throws IOException {
long bytes = 0;
long records = 0;
int xor = 0;
while (values.hasNext()) {
RecordStatsWritable stats = values.next();
bytes += stats.getBytes();
records += stats.getRecords();
xor ^= stats.getChecksum();
}
output.collect(key, new RecordStatsWritable(bytes, records, xor));
}
}
public static class NonSplitableSequenceFileInputFormat
extends SequenceFileInputFormat {
protected boolean isSplitable(FileSystem fs, Path filename) {
return false;
}
}
static void checkRecords(Configuration defaults,
Path sortInput, Path sortOutput) throws IOException {
FileSystem inputfs = sortInput.getFileSystem(defaults);
FileSystem outputfs = sortOutput.getFileSystem(defaults);
FileSystem defaultfs = FileSystem.get(defaults);
JobConf jobConf = new JobConf(defaults, RecordStatsChecker.class);
jobConf.setJobName("sortvalidate-recordstats-checker");
int noSortReduceTasks =
outputfs.listStatus(sortOutput, sortPathsFilter).length;
jobConf.setInt(SORT_REDUCES, noSortReduceTasks);
int noSortInputpaths = inputfs.listStatus(sortInput).length;
jobConf.setInputFormat(NonSplitableSequenceFileInputFormat.class);
jobConf.setOutputFormat(SequenceFileOutputFormat.class);
jobConf.setOutputKeyClass(IntWritable.class);
jobConf.setOutputValueClass(RecordStatsChecker.RecordStatsWritable.class);
jobConf.setMapperClass(Map.class);
jobConf.setCombinerClass(Reduce.class);
jobConf.setReducerClass(Reduce.class);
jobConf.setNumMapTasks(noSortReduceTasks);
jobConf.setNumReduceTasks(1);
FileInputFormat.setInputPaths(jobConf, sortInput);
FileInputFormat.addInputPath(jobConf, sortOutput);
Path outputPath = new Path("/tmp/sortvalidate/recordstatschecker");
if (defaultfs.exists(outputPath)) {
defaultfs.delete(outputPath, true);
}
FileOutputFormat.setOutputPath(jobConf, outputPath);
// Uncomment to run locally in a single process
//job_conf.set(JTConfig.JT, "local");
Path[] inputPaths = FileInputFormat.getInputPaths(jobConf);
System.out.println("\nSortValidator.RecordStatsChecker: Validate sort " +
"from " + inputPaths[0] + " (" +
noSortInputpaths + " files), " +
inputPaths[1] + " (" +
noSortReduceTasks +
" files) into " +
FileOutputFormat.getOutputPath(jobConf) +
" with 1 reducer.");
Date startTime = new Date();
System.out.println("Job started: " + startTime);
JobClient.runJob(jobConf);
Date end_time = new Date();
System.out.println("Job ended: " + end_time);
System.out.println("The job took " +
(end_time.getTime() - startTime.getTime()) /1000 + " seconds.");
// Check to ensure that the statistics of the
// framework's sort-input and sort-output match
SequenceFile.Reader stats = new SequenceFile.Reader(defaultfs,
new Path(outputPath, "part-00000"), defaults);
IntWritable k1 = new IntWritable();
IntWritable k2 = new IntWritable();
RecordStatsWritable v1 = new RecordStatsWritable();
RecordStatsWritable v2 = new RecordStatsWritable();
if (!stats.next(k1, v1)) {
throw new IOException("Failed to read record #1 from reduce's output");
}
if (!stats.next(k2, v2)) {
throw new IOException("Failed to read record #2 from reduce's output");
}
if ((v1.getBytes() != v2.getBytes()) || (v1.getRecords() != v2.getRecords()) ||
v1.getChecksum() != v2.getChecksum()) {
throw new IOException("(" +
v1.getBytes() + ", " + v1.getRecords() + ", " + v1.getChecksum() + ") v/s (" +
v2.getBytes() + ", " + v2.getRecords() + ", " + v2.getChecksum() + ")");
}
}
}
/**
* A simple map-reduce task to check if the input and the output
* of the framework's sort is consistent by ensuring each record
* is present in both the input and the output.
*
*/
public static class RecordChecker {
public static class Map extends MapReduceBase
implements Mapper<BytesWritable, BytesWritable,
BytesWritable, IntWritable> {
private IntWritable value = null;
public void configure(JobConf job) {
// value == one for sort-input; value == two for sort-output
value = deduceInputFile(job);
}
public void map(BytesWritable key,
BytesWritable value,
OutputCollector<BytesWritable, IntWritable> output,
Reporter reporter) throws IOException {
// newKey = (key, value)
BytesWritable keyValue = new BytesWritable(pair(key, value));
// output (newKey, value)
output.collect(keyValue, this.value);
}
}
public static class Reduce extends MapReduceBase
implements Reducer<BytesWritable, IntWritable,
BytesWritable, IntWritable> {
public void reduce(BytesWritable key, Iterator<IntWritable> values,
OutputCollector<BytesWritable, IntWritable> output,
Reporter reporter) throws IOException {
int ones = 0;
int twos = 0;
while (values.hasNext()) {
IntWritable count = values.next();
if (count.equals(sortInput)) {
++ones;
} else if (count.equals(sortOutput)) {
++twos;
} else {
throw new IOException("Invalid 'value' of " + count.get() +
" for (key,value): " + key.toString());
}
}
// Check to ensure there are equal no. of ones and twos
if (ones != twos) {
throw new IOException("Illegal ('one', 'two'): (" + ones + ", " + twos +
") for (key, value): " + key.toString());
}
}
}
static void checkRecords(Configuration defaults, int noMaps, int noReduces,
Path sortInput, Path sortOutput) throws IOException {
JobConf jobConf = new JobConf(defaults, RecordChecker.class);
jobConf.setJobName("sortvalidate-record-checker");
jobConf.setInputFormat(SequenceFileInputFormat.class);
jobConf.setOutputFormat(SequenceFileOutputFormat.class);
jobConf.setOutputKeyClass(BytesWritable.class);
jobConf.setOutputValueClass(IntWritable.class);
jobConf.setMapperClass(Map.class);
jobConf.setReducerClass(Reduce.class);
JobClient client = new JobClient(jobConf);
ClusterStatus cluster = client.getClusterStatus();
if (noMaps == -1) {
noMaps = cluster.getTaskTrackers() *
jobConf.getInt(MAPS_PER_HOST, 10);
}
if (noReduces == -1) {
noReduces = (int) (cluster.getMaxReduceTasks() * 0.9);
String sortReduces = jobConf.get(REDUCES_PER_HOST);
if (sortReduces != null) {
noReduces = cluster.getTaskTrackers() *
Integer.parseInt(sortReduces);
}
}
jobConf.setNumMapTasks(noMaps);
jobConf.setNumReduceTasks(noReduces);
FileInputFormat.setInputPaths(jobConf, sortInput);
FileInputFormat.addInputPath(jobConf, sortOutput);
Path outputPath = new Path("/tmp/sortvalidate/recordchecker");
FileSystem fs = FileSystem.get(defaults);
if (fs.exists(outputPath)) {
fs.delete(outputPath, true);
}
FileOutputFormat.setOutputPath(jobConf, outputPath);
// Uncomment to run locally in a single process
//job_conf.set(JTConfig.JT, "local");
Path[] inputPaths = FileInputFormat.getInputPaths(jobConf);
System.out.println("\nSortValidator.RecordChecker: Running on " +
cluster.getTaskTrackers() +
" nodes to validate sort from " +
inputPaths[0] + ", " +
inputPaths[1] + " into " +
FileOutputFormat.getOutputPath(jobConf) +
" with " + noReduces + " reduces.");
Date startTime = new Date();
System.out.println("Job started: " + startTime);
JobClient.runJob(jobConf);
Date end_time = new Date();
System.out.println("Job ended: " + end_time);
System.out.println("The job took " +
(end_time.getTime() - startTime.getTime()) /1000 + " seconds.");
}
}
/**
* The main driver for sort-validator program.
* Invoke this method to submit the map/reduce job.
* @throws IOException When there is communication problems with the
* job tracker.
*/
public int run(String[] args) throws Exception {
Configuration defaults = getConf();
int noMaps = -1, noReduces = -1;
Path sortInput = null, sortOutput = null;
boolean deepTest = false;
for(int i=0; i < args.length; ++i) {
try {
if ("-m".equals(args[i])) {
noMaps = Integer.parseInt(args[++i]);
} else if ("-r".equals(args[i])) {
noReduces = Integer.parseInt(args[++i]);
} else if ("-sortInput".equals(args[i])){
sortInput = new Path(args[++i]);
} else if ("-sortOutput".equals(args[i])){
sortOutput = new Path(args[++i]);
} else if ("-deep".equals(args[i])) {
deepTest = true;
} else {
printUsage();
return -1;
}
} catch (NumberFormatException except) {
System.err.println("ERROR: Integer expected instead of " + args[i]);
printUsage();
return -1;
} catch (ArrayIndexOutOfBoundsException except) {
System.err.println("ERROR: Required parameter missing from " +
args[i-1]);
printUsage();
return -1;
}
}
// Sanity check
if (sortInput == null || sortOutput == null) {
printUsage();
return -2;
}
// Check if the records are consistent and sorted correctly
RecordStatsChecker.checkRecords(defaults, sortInput, sortOutput);
// Check if the same records are present in sort's inputs & outputs
if (deepTest) {
RecordChecker.checkRecords(defaults, noMaps, noReduces, sortInput,
sortOutput);
}
System.out.println("\nSUCCESS! Validated the MapReduce framework's 'sort'" +
" successfully.");
return 0;
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new SortValidator(), args);
System.exit(res);
}
}

View File

@ -48,7 +48,6 @@ public class TestSpecialCharactersInOutputPath extends TestCase {
private static final String OUTPUT_FILENAME = "result[0]"; private static final String OUTPUT_FILENAME = "result[0]";
public static boolean launchJob(URI fileSys, public static boolean launchJob(URI fileSys,
String jobTracker,
JobConf conf, JobConf conf,
int numMaps, int numMaps,
int numReduces) throws IOException { int numReduces) throws IOException {
@ -68,8 +67,6 @@ public class TestSpecialCharactersInOutputPath extends TestCase {
// use WordCount example // use WordCount example
FileSystem.setDefaultUri(conf, fileSys); FileSystem.setDefaultUri(conf, fileSys);
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.CLASSIC_FRAMEWORK_NAME);
conf.set(JTConfig.JT_IPC_ADDRESS, jobTracker);
conf.setJobName("foo"); conf.setJobName("foo");
conf.setInputFormat(TextInputFormat.class); conf.setInputFormat(TextInputFormat.class);
@ -113,11 +110,9 @@ public class TestSpecialCharactersInOutputPath extends TestCase {
fileSys = dfs.getFileSystem(); fileSys = dfs.getFileSystem();
namenode = fileSys.getUri().toString(); namenode = fileSys.getUri().toString();
mr = new MiniMRCluster(taskTrackers, namenode, 2); mr = new MiniMRCluster(taskTrackers, namenode, 2);
final String jobTrackerName = "localhost:" + mr.getJobTrackerPort();
JobConf jobConf = new JobConf(); JobConf jobConf = new JobConf();
boolean result; boolean result;
result = launchJob(fileSys.getUri(), jobTrackerName, jobConf, result = launchJob(fileSys.getUri(), jobConf, 3, 1);
3, 1);
assertTrue(result); assertTrue(result);
} finally { } finally {

View File

@ -0,0 +1,787 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.File;
import java.io.FileInputStream;
import java.io.DataOutputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Enumeration;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.mapred.SortValidator.RecordStatsChecker.NonSplitableSequenceFileInputFormat;
import org.apache.hadoop.mapred.lib.IdentityMapper;
import org.apache.hadoop.mapred.lib.IdentityReducer;
import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.apache.hadoop.util.StringUtils;
import org.apache.commons.logging.Log;
/**
* Utilities used in unit test.
*
*/
public class UtilsForTests {
static final Log LOG = LogFactory.getLog(UtilsForTests.class);
final static long KB = 1024L * 1;
final static long MB = 1024L * KB;
final static long GB = 1024L * MB;
final static long TB = 1024L * GB;
final static long PB = 1024L * TB;
final static Object waitLock = new Object();
static DecimalFormat dfm = new DecimalFormat("####.000");
static DecimalFormat ifm = new DecimalFormat("###,###,###,###,###");
public static String dfmt(double d) {
return dfm.format(d);
}
public static String ifmt(double d) {
return ifm.format(d);
}
public static String formatBytes(long numBytes) {
StringBuffer buf = new StringBuffer();
boolean bDetails = true;
double num = numBytes;
if (numBytes < KB) {
buf.append(numBytes + " B");
bDetails = false;
} else if (numBytes < MB) {
buf.append(dfmt(num / KB) + " KB");
} else if (numBytes < GB) {
buf.append(dfmt(num / MB) + " MB");
} else if (numBytes < TB) {
buf.append(dfmt(num / GB) + " GB");
} else if (numBytes < PB) {
buf.append(dfmt(num / TB) + " TB");
} else {
buf.append(dfmt(num / PB) + " PB");
}
if (bDetails) {
buf.append(" (" + ifmt(numBytes) + " bytes)");
}
return buf.toString();
}
public static String formatBytes2(long numBytes) {
StringBuffer buf = new StringBuffer();
long u = 0;
if (numBytes >= TB) {
u = numBytes / TB;
numBytes -= u * TB;
buf.append(u + " TB ");
}
if (numBytes >= GB) {
u = numBytes / GB;
numBytes -= u * GB;
buf.append(u + " GB ");
}
if (numBytes >= MB) {
u = numBytes / MB;
numBytes -= u * MB;
buf.append(u + " MB ");
}
if (numBytes >= KB) {
u = numBytes / KB;
numBytes -= u * KB;
buf.append(u + " KB ");
}
buf.append(u + " B"); //even if zero
return buf.toString();
}
static final String regexpSpecials = "[]()?*+|.!^-\\~@";
public static String regexpEscape(String plain) {
StringBuffer buf = new StringBuffer();
char[] ch = plain.toCharArray();
int csup = ch.length;
for (int c = 0; c < csup; c++) {
if (regexpSpecials.indexOf(ch[c]) != -1) {
buf.append("\\");
}
buf.append(ch[c]);
}
return buf.toString();
}
public static String safeGetCanonicalPath(File f) {
try {
String s = f.getCanonicalPath();
return (s == null) ? f.toString() : s;
} catch (IOException io) {
return f.toString();
}
}
public static String slurp(File f) throws IOException {
int len = (int) f.length();
byte[] buf = new byte[len];
FileInputStream in = new FileInputStream(f);
String contents = null;
try {
in.read(buf, 0, len);
contents = new String(buf, "UTF-8");
} finally {
in.close();
}
return contents;
}
public static String slurpHadoop(Path p, FileSystem fs) throws IOException {
int len = (int) fs.getFileStatus(p).getLen();
byte[] buf = new byte[len];
InputStream in = fs.open(p);
String contents = null;
try {
in.read(buf, 0, len);
contents = new String(buf, "UTF-8");
} finally {
in.close();
}
return contents;
}
public static String rjustify(String s, int width) {
if (s == null) s = "null";
if (width > s.length()) {
s = getSpace(width - s.length()) + s;
}
return s;
}
public static String ljustify(String s, int width) {
if (s == null) s = "null";
if (width > s.length()) {
s = s + getSpace(width - s.length());
}
return s;
}
static char[] space;
static {
space = new char[300];
Arrays.fill(space, '\u0020');
}
public static String getSpace(int len) {
if (len > space.length) {
space = new char[Math.max(len, 2 * space.length)];
Arrays.fill(space, '\u0020');
}
return new String(space, 0, len);
}
/**
* Gets job status from the jobtracker given the jobclient and the job id
*/
static JobStatus getJobStatus(JobClient jc, JobID id) throws IOException {
JobStatus[] statuses = jc.getAllJobs();
for (JobStatus jobStatus : statuses) {
if (jobStatus.getJobID().equals(id)) {
return jobStatus;
}
}
return null;
}
/**
* A utility that waits for specified amount of time
*/
public static void waitFor(long duration) {
try {
synchronized (waitLock) {
waitLock.wait(duration);
}
} catch (InterruptedException ie) {}
}
/**
* Wait for the jobtracker to be RUNNING.
*/
static void waitForJobTracker(JobClient jobClient) {
while (true) {
try {
ClusterStatus status = jobClient.getClusterStatus();
while (status.getJobTrackerStatus() != JobTrackerStatus.RUNNING) {
waitFor(100);
status = jobClient.getClusterStatus();
}
break; // means that the jt is ready
} catch (IOException ioe) {}
}
}
/**
* Waits until all the jobs at the jobtracker complete.
*/
static void waitTillDone(JobClient jobClient) throws IOException {
// Wait for the last job to complete
while (true) {
boolean shouldWait = false;
for (JobStatus jobStatuses : jobClient.getAllJobs()) {
if (jobStatuses.getRunState() != JobStatus.SUCCEEDED
&& jobStatuses.getRunState() != JobStatus.FAILED
&& jobStatuses.getRunState() != JobStatus.KILLED) {
shouldWait = true;
break;
}
}
if (shouldWait) {
waitFor(100);
} else {
break;
}
}
}
/**
* Configure a waiting job
*/
static void configureWaitingJobConf(JobConf jobConf, Path inDir,
Path outputPath, int numMaps, int numRed,
String jobName, String mapSignalFilename,
String redSignalFilename)
throws IOException {
jobConf.setJobName(jobName);
jobConf.setInputFormat(NonSplitableSequenceFileInputFormat.class);
jobConf.setOutputFormat(SequenceFileOutputFormat.class);
FileInputFormat.setInputPaths(jobConf, inDir);
FileOutputFormat.setOutputPath(jobConf, outputPath);
jobConf.setMapperClass(UtilsForTests.HalfWaitingMapper.class);
jobConf.setReducerClass(IdentityReducer.class);
jobConf.setOutputKeyClass(BytesWritable.class);
jobConf.setOutputValueClass(BytesWritable.class);
jobConf.setInputFormat(RandomInputFormat.class);
jobConf.setNumMapTasks(numMaps);
jobConf.setNumReduceTasks(numRed);
jobConf.setJar("build/test/mapred/testjar/testjob.jar");
jobConf.set(getTaskSignalParameter(true), mapSignalFilename);
jobConf.set(getTaskSignalParameter(false), redSignalFilename);
}
/**
* Commonly used map and reduce classes
*/
/**
* Map is a Mapper that just waits for a file to be created on the dfs. The
* file creation is a signal to the mappers and hence acts as a waiting job.
*/
static class WaitingMapper
extends MapReduceBase
implements Mapper<WritableComparable, Writable,
WritableComparable, Writable> {
FileSystem fs = null;
Path signal;
int id = 0;
int totalMaps = 0;
/**
* Checks if the map task needs to wait. By default all the maps will wait.
* This method needs to be overridden to make a custom waiting mapper.
*/
public boolean shouldWait(int id) {
return true;
}
/**
* Returns a signal file on which the map task should wait. By default all
* the maps wait on a single file passed as test.mapred.map.waiting.target.
* This method needs to be overridden to make a custom waiting mapper
*/
public Path getSignalFile(int id) {
return signal;
}
/** The waiting function. The map exits once it gets a signal. Here the
* signal is the file existence.
*/
public void map(WritableComparable key, Writable val,
OutputCollector<WritableComparable, Writable> output,
Reporter reporter)
throws IOException {
if (shouldWait(id)) {
if (fs != null) {
while (!fs.exists(getSignalFile(id))) {
try {
reporter.progress();
synchronized (this) {
this.wait(1000); // wait for 1 sec
}
} catch (InterruptedException ie) {
System.out.println("Interrupted while the map was waiting for "
+ " the signal.");
break;
}
}
} else {
throw new IOException("Could not get the DFS!!");
}
}
}
public void configure(JobConf conf) {
try {
String taskId = conf.get(JobContext.TASK_ATTEMPT_ID);
id = Integer.parseInt(taskId.split("_")[4]);
totalMaps = Integer.parseInt(conf.get(JobContext.NUM_MAPS));
fs = FileSystem.get(conf);
signal = new Path(conf.get(getTaskSignalParameter(true)));
} catch (IOException ioe) {
System.out.println("Got an exception while obtaining the filesystem");
}
}
}
/** Only the later half of the maps wait for the signal while the rest
* complete immediately.
*/
static class HalfWaitingMapper extends WaitingMapper {
@Override
public boolean shouldWait(int id) {
return id >= (totalMaps / 2);
}
}
/**
* Reduce that just waits for a file to be created on the dfs. The
* file creation is a signal to the reduce.
*/
static class WaitingReducer extends MapReduceBase
implements Reducer<WritableComparable, Writable,
WritableComparable, Writable> {
FileSystem fs = null;
Path signal;
/** The waiting function. The reduce exits once it gets a signal. Here the
* signal is the file existence.
*/
public void reduce(WritableComparable key, Iterator<Writable> val,
OutputCollector<WritableComparable, Writable> output,
Reporter reporter)
throws IOException {
if (fs != null) {
while (!fs.exists(signal)) {
try {
reporter.progress();
synchronized (this) {
this.wait(1000); // wait for 1 sec
}
} catch (InterruptedException ie) {
System.out.println("Interrupted while the map was waiting for the"
+ " signal.");
break;
}
}
} else {
throw new IOException("Could not get the DFS!!");
}
}
public void configure(JobConf conf) {
try {
fs = FileSystem.get(conf);
signal = new Path(conf.get(getTaskSignalParameter(false)));
} catch (IOException ioe) {
System.out.println("Got an exception while obtaining the filesystem");
}
}
}
static String getTaskSignalParameter(boolean isMap) {
return isMap
? "test.mapred.map.waiting.target"
: "test.mapred.reduce.waiting.target";
}
/**
* Signal the maps/reduces to start.
*/
static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys,
String mapSignalFile,
String reduceSignalFile, int replication)
throws IOException {
writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(mapSignalFile),
(short)replication);
writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(reduceSignalFile),
(short)replication);
}
/**
* Signal the maps/reduces to start.
*/
static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys,
boolean isMap, String mapSignalFile,
String reduceSignalFile)
throws IOException {
// signal the maps to complete
writeFile(dfs.getNameNode(), fileSys.getConf(),
isMap
? new Path(mapSignalFile)
: new Path(reduceSignalFile), (short)1);
}
static String getSignalFile(Path dir) {
return (new Path(dir, "signal")).toString();
}
static String getMapSignalFile(Path dir) {
return (new Path(dir, "map-signal")).toString();
}
static String getReduceSignalFile(Path dir) {
return (new Path(dir, "reduce-signal")).toString();
}
static void writeFile(NameNode namenode, Configuration conf, Path name,
short replication) throws IOException {
FileSystem fileSys = FileSystem.get(conf);
SequenceFile.Writer writer =
SequenceFile.createWriter(fileSys, conf, name,
BytesWritable.class, BytesWritable.class,
CompressionType.NONE);
writer.append(new BytesWritable(), new BytesWritable());
writer.close();
fileSys.setReplication(name, replication);
DFSTestUtil.waitReplication(fileSys, name, replication);
}
// Input formats
/**
* A custom input format that creates virtual inputs of a single string
* for each map.
*/
public static class RandomInputFormat implements InputFormat<Text, Text> {
public InputSplit[] getSplits(JobConf job,
int numSplits) throws IOException {
InputSplit[] result = new InputSplit[numSplits];
Path outDir = FileOutputFormat.getOutputPath(job);
for(int i=0; i < result.length; ++i) {
result[i] = new FileSplit(new Path(outDir, "dummy-split-" + i),
0, 1, (String[])null);
}
return result;
}
static class RandomRecordReader implements RecordReader<Text, Text> {
Path name;
public RandomRecordReader(Path p) {
name = p;
}
public boolean next(Text key, Text value) {
if (name != null) {
key.set(name.getName());
name = null;
return true;
}
return false;
}
public Text createKey() {
return new Text();
}
public Text createValue() {
return new Text();
}
public long getPos() {
return 0;
}
public void close() {}
public float getProgress() {
return 0.0f;
}
}
public RecordReader<Text, Text> getRecordReader(InputSplit split,
JobConf job,
Reporter reporter)
throws IOException {
return new RandomRecordReader(((FileSplit) split).getPath());
}
}
// Start a job and return its RunningJob object
static RunningJob runJob(JobConf conf, Path inDir, Path outDir)
throws IOException {
return runJob(conf, inDir, outDir, conf.getNumMapTasks(), conf.getNumReduceTasks());
}
// Start a job and return its RunningJob object
static RunningJob runJob(JobConf conf, Path inDir, Path outDir, int numMaps,
int numReds) throws IOException {
String input = "The quick brown fox\n" + "has many silly\n"
+ "red fox sox\n";
// submit the job and wait for it to complete
return runJob(conf, inDir, outDir, numMaps, numReds, input);
}
// Start a job with the specified input and return its RunningJob object
static RunningJob runJob(JobConf conf, Path inDir, Path outDir, int numMaps,
int numReds, String input) throws IOException {
FileSystem fs = FileSystem.get(conf);
if (fs.exists(outDir)) {
fs.delete(outDir, true);
}
if (!fs.exists(inDir)) {
fs.mkdirs(inDir);
}
for (int i = 0; i < numMaps; ++i) {
DataOutputStream file = fs.create(new Path(inDir, "part-" + i));
file.writeBytes(input);
file.close();
}
conf.setInputFormat(TextInputFormat.class);
conf.setOutputKeyClass(LongWritable.class);
conf.setOutputValueClass(Text.class);
FileInputFormat.setInputPaths(conf, inDir);
FileOutputFormat.setOutputPath(conf, outDir);
conf.setNumMapTasks(numMaps);
conf.setNumReduceTasks(numReds);
JobClient jobClient = new JobClient(conf);
RunningJob job = jobClient.submitJob(conf);
return job;
}
// Run a job that will be succeeded and wait until it completes
public static RunningJob runJobSucceed(JobConf conf, Path inDir, Path outDir)
throws IOException {
conf.setJobName("test-job-succeed");
conf.setMapperClass(IdentityMapper.class);
conf.setReducerClass(IdentityReducer.class);
RunningJob job = UtilsForTests.runJob(conf, inDir, outDir);
while (!job.isComplete()) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
break;
}
}
return job;
}
// Run a job that will be failed and wait until it completes
public static RunningJob runJobFail(JobConf conf, Path inDir, Path outDir)
throws IOException {
conf.setJobName("test-job-fail");
conf.setMapperClass(FailMapper.class);
conf.setReducerClass(IdentityReducer.class);
conf.setMaxMapAttempts(1);
RunningJob job = UtilsForTests.runJob(conf, inDir, outDir);
while (!job.isComplete()) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
break;
}
}
return job;
}
// Run a job that will be killed and wait until it completes
public static RunningJob runJobKill(JobConf conf, Path inDir, Path outDir)
throws IOException {
conf.setJobName("test-job-kill");
conf.setMapperClass(KillMapper.class);
conf.setReducerClass(IdentityReducer.class);
RunningJob job = UtilsForTests.runJob(conf, inDir, outDir);
while (job.getJobState() != JobStatus.RUNNING) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
break;
}
}
job.killJob();
while (job.cleanupProgress() == 0.0f) {
try {
Thread.sleep(10);
} catch (InterruptedException ie) {
break;
}
}
return job;
}
/**
* Cleans up files/dirs inline. CleanupQueue deletes in a separate thread
* asynchronously.
*/
public static class InlineCleanupQueue extends CleanupQueue {
List<String> stalePaths = new ArrayList<String>();
public InlineCleanupQueue() {
// do nothing
}
@Override
public void addToQueue(PathDeletionContext... contexts) {
// delete paths in-line
for (PathDeletionContext context : contexts) {
try {
if (!deletePath(context)) {
LOG.warn("Stale path " + context.fullPath);
stalePaths.add(context.fullPath);
}
} catch (IOException e) {
LOG.warn("Caught exception while deleting path "
+ context.fullPath);
LOG.info(StringUtils.stringifyException(e));
stalePaths.add(context.fullPath);
}
}
}
}
static class FakeClock extends Clock {
long time = 0;
public void advance(long millis) {
time += millis;
}
@Override
long getTime() {
return time;
}
}
// Mapper that fails
static class FailMapper extends MapReduceBase implements
Mapper<WritableComparable, Writable, WritableComparable, Writable> {
public void map(WritableComparable key, Writable value,
OutputCollector<WritableComparable, Writable> out, Reporter reporter)
throws IOException {
//NOTE- the next line is required for the TestDebugScript test to succeed
System.err.println("failing map");
throw new RuntimeException("failing map");
}
}
// Mapper that sleeps for a long time.
// Used for running a job that will be killed
static class KillMapper extends MapReduceBase implements
Mapper<WritableComparable, Writable, WritableComparable, Writable> {
public void map(WritableComparable key, Writable value,
OutputCollector<WritableComparable, Writable> out, Reporter reporter)
throws IOException {
try {
Thread.sleep(1000000);
} catch (InterruptedException e) {
// Do nothing
}
}
}
static void setUpConfigFile(Properties confProps, File configFile)
throws IOException {
Configuration config = new Configuration(false);
FileOutputStream fos = new FileOutputStream(configFile);
for (Enumeration<?> e = confProps.propertyNames(); e.hasMoreElements();) {
String key = (String) e.nextElement();
config.set(key, confProps.getProperty(key));
}
config.writeXml(fos);
fos.close();
}
/**
* This creates a file in the dfs
* @param dfs FileSystem Local File System where file needs to be picked
* @param URIPATH Path dfs path where file needs to be copied
* @param permission FsPermission File permission
* @return returns the DataOutputStream
*/
public static DataOutputStream
createTmpFileDFS(FileSystem dfs, Path URIPATH,
FsPermission permission, String input) throws Exception {
//Creating the path with the file
DataOutputStream file =
FileSystem.create(dfs, URIPATH, permission);
file.writeBytes(input);
file.close();
return file;
}
/**
* This formats the long tasktracker name to just the FQDN
* @param taskTrackerLong String The long format of the tasktracker string
* @return String The FQDN of the tasktracker
* @throws Exception
*/
public static String getFQDNofTT (String taskTrackerLong) throws Exception {
//Getting the exact FQDN of the tasktracker from the tasktracker string.
String[] firstSplit = taskTrackerLong.split("_");
String tmpOutput = firstSplit[1];
String[] secondSplit = tmpOutput.split(":");
String tmpTaskTracker = secondSplit[0];
return tmpTaskTracker;
}
}

View File

@ -0,0 +1,159 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* This is an example Hadoop Map/Reduce application.
* It reads the text input files, breaks each line into words
* and counts them. The output is a locally sorted list of words and the
* count of how often they occurred.
*
* To run: bin/hadoop jar build/hadoop-examples.jar wordcount
* [-m <i>maps</i>] [-r <i>reduces</i>] <i>in-dir</i> <i>out-dir</i>
*/
public class WordCount extends Configured implements Tool {
/**
* Counts the words in each line.
* For each line of input, break the line into words and emit them as
* (<b>word</b>, <b>1</b>).
*/
public static class MapClass extends MapReduceBase
implements Mapper<LongWritable, Text, Text, IntWritable> {
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(LongWritable key, Text value,
OutputCollector<Text, IntWritable> output,
Reporter reporter) throws IOException {
String line = value.toString();
StringTokenizer itr = new StringTokenizer(line);
while (itr.hasMoreTokens()) {
word.set(itr.nextToken());
output.collect(word, one);
}
}
}
/**
* A reducer class that just emits the sum of the input values.
*/
public static class Reduce extends MapReduceBase
implements Reducer<Text, IntWritable, Text, IntWritable> {
public void reduce(Text key, Iterator<IntWritable> values,
OutputCollector<Text, IntWritable> output,
Reporter reporter) throws IOException {
int sum = 0;
while (values.hasNext()) {
sum += values.next().get();
}
output.collect(key, new IntWritable(sum));
}
}
static int printUsage() {
System.out.println("wordcount [-m <maps>] [-r <reduces>] <input> <output>");
ToolRunner.printGenericCommandUsage(System.out);
return -1;
}
/**
* The main driver for word count map/reduce program.
* Invoke this method to submit the map/reduce job.
* @throws IOException When there is communication problems with the
* job tracker.
*/
public int run(String[] args) throws Exception {
JobConf conf = new JobConf(getConf(), WordCount.class);
conf.setJobName("wordcount");
// the keys are words (strings)
conf.setOutputKeyClass(Text.class);
// the values are counts (ints)
conf.setOutputValueClass(IntWritable.class);
conf.setMapperClass(MapClass.class);
conf.setCombinerClass(Reduce.class);
conf.setReducerClass(Reduce.class);
List<String> other_args = new ArrayList<String>();
for(int i=0; i < args.length; ++i) {
try {
if ("-m".equals(args[i])) {
conf.setNumMapTasks(Integer.parseInt(args[++i]));
} else if ("-r".equals(args[i])) {
conf.setNumReduceTasks(Integer.parseInt(args[++i]));
} else {
other_args.add(args[i]);
}
} catch (NumberFormatException except) {
System.out.println("ERROR: Integer expected instead of " + args[i]);
return printUsage();
} catch (ArrayIndexOutOfBoundsException except) {
System.out.println("ERROR: Required parameter missing from " +
args[i-1]);
return printUsage();
}
}
// Make sure there are exactly 2 parameters left.
if (other_args.size() != 2) {
System.out.println("ERROR: Wrong number of parameters: " +
other_args.size() + " instead of 2.");
return printUsage();
}
FileInputFormat.setInputPaths(conf, other_args.get(0));
FileOutputFormat.setOutputPath(conf, new Path(other_args.get(1)));
JobClient.runJob(conf);
return 0;
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new WordCount(), args);
System.exit(res);
}
}

View File

@ -0,0 +1,154 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.jobcontrol;
import java.io.IOException;
import java.text.NumberFormat;
import java.util.Iterator;
import java.util.List;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
/**
* Utility methods used in various Job Control unit tests.
*/
public class JobControlTestUtils {
static private Random rand = new Random();
private static NumberFormat idFormat = NumberFormat.getInstance();
static {
idFormat.setMinimumIntegerDigits(4);
idFormat.setGroupingUsed(false);
}
/**
* Cleans the data from the passed Path in the passed FileSystem.
*
* @param fs FileSystem to delete data from.
* @param dirPath Path to be deleted.
* @throws IOException If an error occurs cleaning the data.
*/
static void cleanData(FileSystem fs, Path dirPath) throws IOException {
fs.delete(dirPath, true);
}
/**
* Generates a string of random digits.
*
* @return A random string.
*/
private static String generateRandomWord() {
return idFormat.format(rand.nextLong());
}
/**
* Generates a line of random text.
*
* @return A line of random text.
*/
private static String generateRandomLine() {
long r = rand.nextLong() % 7;
long n = r + 20;
StringBuffer sb = new StringBuffer();
for (int i = 0; i < n; i++) {
sb.append(generateRandomWord()).append(" ");
}
sb.append("\n");
return sb.toString();
}
/**
* Generates data that can be used for Job Control tests.
*
* @param fs FileSystem to create data in.
* @param dirPath Path to create the data in.
* @throws IOException If an error occurs creating the data.
*/
static void generateData(FileSystem fs, Path dirPath) throws IOException {
FSDataOutputStream out = fs.create(new Path(dirPath, "data.txt"));
for (int i = 0; i < 10000; i++) {
String line = generateRandomLine();
out.write(line.getBytes("UTF-8"));
}
out.close();
}
/**
* Creates a simple copy job.
*
* @param indirs List of input directories.
* @param outdir Output directory.
* @return JobConf initialised for a simple copy job.
* @throws Exception If an error occurs creating job configuration.
*/
static JobConf createCopyJob(List<Path> indirs, Path outdir) throws Exception {
Configuration defaults = new Configuration();
JobConf theJob = new JobConf(defaults, TestJobControl.class);
theJob.setJobName("DataMoveJob");
FileInputFormat.setInputPaths(theJob, indirs.toArray(new Path[0]));
theJob.setMapperClass(DataCopy.class);
FileOutputFormat.setOutputPath(theJob, outdir);
theJob.setOutputKeyClass(Text.class);
theJob.setOutputValueClass(Text.class);
theJob.setReducerClass(DataCopy.class);
theJob.setNumMapTasks(12);
theJob.setNumReduceTasks(4);
return theJob;
}
/**
* Simple Mapper and Reducer implementation which copies data it reads in.
*/
public static class DataCopy extends MapReduceBase implements
Mapper<LongWritable, Text, Text, Text>, Reducer<Text, Text, Text, Text> {
public void map(LongWritable key, Text value, OutputCollector<Text, Text> output,
Reporter reporter) throws IOException {
output.collect(new Text(key.toString()), value);
}
public void reduce(Text key, Iterator<Text> values,
OutputCollector<Text, Text> output, Reporter reporter)
throws IOException {
Text dumbKey = new Text("");
while (values.hasNext()) {
Text data = values.next();
output.collect(dumbKey, data);
}
}
}
}

View File

@ -0,0 +1,63 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<FindBugsFilter>
<!-- Ignore some irrelevant serialization warnings -->
<Match>
<Class name="org.apache.hadoop.examples.SecondarySort$FirstGroupingComparator" />
<Bug pattern="SE_COMPARATOR_SHOULD_BE_SERIALIZABLE" />
</Match>
<Match>
<Class name="org.apache.hadoop.examples.SecondarySort$IntPair$Comparator" />
<Bug pattern="SE_COMPARATOR_SHOULD_BE_SERIALIZABLE" />
</Match>
<!-- Ignore the irrelevant resource cleanup warnings-->
<Match>
<Class name="org.apache.hadoop.examples.DBCountPageView" />
<Method name="verify" />
<Bug pattern="OBL_UNSATISFIED_OBLIGATION" />
</Match>
<!-- Ignore the irrelevant closure warnings-->
<Match>
<Class name="org.apache.hadoop.examples.dancing.Pentomino$Piece" />
<Bug pattern="EI_EXPOSE_REP2" />
</Match>
<!-- Ignore the irrelevant package protection warnings-->
<Match>
<Class name="org.apache.hadoop.examples.dancing.Pentomino" />
<Or>
<Field name="fourRotations" />
<Field name="oneRotation" />
<Field name="twoRotations" />
</Or>
<Bug pattern="MS_PKGPROTECT" />
</Match>
<!-- Ignore the irrelevant right shift warnings, as only positive integers are given as input-->
<Match>
<Class name="org.apache.hadoop.examples.terasort.Unsigned16" />
<Method name="getHexDigit" />
<Bug pattern="ICAST_QUESTIONABLE_UNSIGNED_RIGHT_SHIFT" />
</Match>
</FindBugsFilter>

View File

@ -27,6 +27,10 @@
<name>Apache Hadoop MapReduce Examples</name> <name>Apache Hadoop MapReduce Examples</name>
<packaging>jar</packaging> <packaging>jar</packaging>
<properties>
<mr.examples.basedir>${basedir}</mr.examples.basedir>
</properties>
<dependencies> <dependencies>
<dependency> <dependency>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
@ -58,6 +62,18 @@
</archive> </archive>
</configuration> </configuration>
</plugin> </plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>findbugs-maven-plugin</artifactId>
<configuration>
<findbugsXmlOutput>true</findbugsXmlOutput>
<xmlOutput>true</xmlOutput>
<excludeFilterFile>${mr.examples.basedir}/dev-support/findbugs-exclude.xml</excludeFilterFile>
<effort>Max</effort>
</configuration>
</plugin>
</plugins> </plugins>
</build> </build>
</project> </project>

View File

@ -19,7 +19,9 @@
package org.apache.hadoop.examples; package org.apache.hadoop.examples;
import java.io.IOException; import java.io.IOException;
import java.util.*; import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured; import org.apache.hadoop.conf.Configured;
@ -29,9 +31,14 @@ import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.ClusterStatus; import org.apache.hadoop.mapred.ClusterStatus;
import org.apache.hadoop.mapred.JobClient; import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapreduce.*; import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat; import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.join.*; import org.apache.hadoop.mapreduce.lib.join.CompositeInputFormat;
import org.apache.hadoop.mapreduce.lib.join.TupleWritable;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat; import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.Tool;
@ -52,7 +59,7 @@ import org.apache.hadoop.util.ToolRunner;
* [<i>in-dir</i>]* <i>in-dir</i> <i>out-dir</i> * [<i>in-dir</i>]* <i>in-dir</i> <i>out-dir</i>
*/ */
public class Join extends Configured implements Tool { public class Join extends Configured implements Tool {
public static String REDUCES_PER_HOST = "mapreduce.join.reduces_per_host"; public static final String REDUCES_PER_HOST = "mapreduce.join.reduces_per_host";
static int printUsage() { static int printUsage() {
System.out.println("join [-r <reduces>] " + System.out.println("join [-r <reduces>] " +
"[-inFormat <input format class>] " + "[-inFormat <input format class>] " +

View File

@ -69,7 +69,7 @@ public class Pentomino {
} }
public int[] getRotations() { public int[] getRotations() {
return rotations; return rotations.clone();
} }
public boolean getFlippable() { public boolean getFlippable() {

View File

@ -70,7 +70,7 @@ public class TeraGen extends Configured implements Tool {
public static enum Counters {CHECKSUM} public static enum Counters {CHECKSUM}
public static String NUM_ROWS = "mapreduce.terasort.num-rows"; public static final String NUM_ROWS = "mapreduce.terasort.num-rows";
/** /**
* An input format that assigns ranges of longs to each mapper. * An input format that assigns ranges of longs to each mapper.
*/ */

View File

@ -156,10 +156,10 @@ public class TeraInputFormat extends FileInputFormat<Text,Text> {
* them and picks N-1 keys to generate N equally sized partitions. * them and picks N-1 keys to generate N equally sized partitions.
* @param job the job to sample * @param job the job to sample
* @param partFile where to write the output file to * @param partFile where to write the output file to
* @throws IOException if something goes wrong * @throws Throwable if something goes wrong
*/ */
public static void writePartitionFile(final JobContext job, public static void writePartitionFile(final JobContext job,
Path partFile) throws IOException, InterruptedException { Path partFile) throws Throwable {
long t1 = System.currentTimeMillis(); long t1 = System.currentTimeMillis();
Configuration conf = job.getConfiguration(); Configuration conf = job.getConfiguration();
final TeraInputFormat inFormat = new TeraInputFormat(); final TeraInputFormat inFormat = new TeraInputFormat();
@ -174,11 +174,12 @@ public class TeraInputFormat extends FileInputFormat<Text,Text> {
final long recordsPerSample = sampleSize / samples; final long recordsPerSample = sampleSize / samples;
final int sampleStep = splits.size() / samples; final int sampleStep = splits.size() / samples;
Thread[] samplerReader = new Thread[samples]; Thread[] samplerReader = new Thread[samples];
SamplerThreadGroup threadGroup = new SamplerThreadGroup("Sampler Reader Thread Group");
// take N samples from different parts of the input // take N samples from different parts of the input
for(int i=0; i < samples; ++i) { for(int i=0; i < samples; ++i) {
final int idx = i; final int idx = i;
samplerReader[i] = samplerReader[i] =
new Thread ("Sampler Reader " + idx) { new Thread (threadGroup,"Sampler Reader " + idx) {
{ {
setDaemon(true); setDaemon(true);
} }
@ -201,7 +202,7 @@ public class TeraInputFormat extends FileInputFormat<Text,Text> {
} catch (IOException ie){ } catch (IOException ie){
System.err.println("Got an exception while reading splits " + System.err.println("Got an exception while reading splits " +
StringUtils.stringifyException(ie)); StringUtils.stringifyException(ie));
System.exit(-1); throw new RuntimeException(ie);
} catch (InterruptedException e) { } catch (InterruptedException e) {
} }
@ -215,6 +216,9 @@ public class TeraInputFormat extends FileInputFormat<Text,Text> {
for (int i = 0; i < samples; i++) { for (int i = 0; i < samples; i++) {
try { try {
samplerReader[i].join(); samplerReader[i].join();
if(threadGroup.getThrowable() != null){
throw threadGroup.getThrowable();
}
} catch (InterruptedException e) { } catch (InterruptedException e) {
} }
} }
@ -226,6 +230,25 @@ public class TeraInputFormat extends FileInputFormat<Text,Text> {
System.out.println("Computing parititions took " + (t3 - t2) + "ms"); System.out.println("Computing parititions took " + (t3 - t2) + "ms");
} }
static class SamplerThreadGroup extends ThreadGroup{
private Throwable throwable;
public SamplerThreadGroup(String s) {
super(s);
}
@Override
public void uncaughtException(Thread thread, Throwable throwable) {
this.throwable = throwable;
}
public Throwable getThrowable() {
return this.throwable;
}
}
static class TeraRecordReader extends RecordReader<Text,Text> { static class TeraRecordReader extends RecordReader<Text,Text> {
private FSDataInputStream in; private FSDataInputStream in;
private long offset; private long offset;

View File

@ -31,7 +31,6 @@ import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Cluster;
import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.MRJobConfig;
@ -300,7 +299,12 @@ public class TeraSort extends Configured implements Tool {
TeraInputFormat.PARTITION_FILENAME); TeraInputFormat.PARTITION_FILENAME);
URI partitionUri = new URI(partitionFile.toString() + URI partitionUri = new URI(partitionFile.toString() +
"#" + TeraInputFormat.PARTITION_FILENAME); "#" + TeraInputFormat.PARTITION_FILENAME);
TeraInputFormat.writePartitionFile(job, partitionFile); try {
TeraInputFormat.writePartitionFile(job, partitionFile);
} catch (Throwable e) {
LOG.error(e.getMessage());
return -1;
}
job.addCacheFile(partitionUri); job.addCacheFile(partitionUri);
job.createSymlink(); job.createSymlink();
long end = System.currentTimeMillis(); long end = System.currentTimeMillis();

View File

@ -148,9 +148,13 @@ IFS=
# add hadoop-common libs to CLASSPATH # add hadoop-common libs to CLASSPATH
if [ ! -d "$HADOOP_COMMON_HOME" ]; then if [ ! -d "$HADOOP_COMMON_HOME" ]; then
echo No HADOOP_COMMON_HOME set. if [ -d "$HADOOP_PREFIX" ]; then
echo Please specify it either in yarn-env.sh or in the environment. export HADOOP_COMMON_HOME=$HADOOP_PREFIX
exit 1 else
echo No HADOOP_COMMON_HOME set.
echo Please specify it either in yarn-env.sh or in the environment.
exit 1
fi
fi fi
CLASSPATH=${CLASSPATH}:$HADOOP_COMMON_HOME/share/hadoop/common'/*' CLASSPATH=${CLASSPATH}:$HADOOP_COMMON_HOME/share/hadoop/common'/*'
@ -158,9 +162,13 @@ CLASSPATH=${CLASSPATH}:$HADOOP_COMMON_HOME/share/hadoop/common/lib'/*'
# add hadoop-hdfs libs to CLASSPATH # add hadoop-hdfs libs to CLASSPATH
if [ ! -d "$HADOOP_HDFS_HOME" ]; then if [ ! -d "$HADOOP_HDFS_HOME" ]; then
echo No HADOOP_HDFS_HOME set. if [ -d "$HADOOP_PREFIX" ]; then
echo Please specify it either in yarn-env.sh or in the environment. export HADOOP_HDFS_HOME=$HADOOP_PREFIX
exit 1 else
echo No HADOOP_HDFS_HOME set.
echo Please specify it either in yarn-env.sh or in the environment.
exit 1
fi
fi fi
CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/share/hadoop/hdfs'/*' CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/share/hadoop/hdfs'/*'
CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib'/*' CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib'/*'

View File

@ -74,6 +74,11 @@ public interface ContainerStatus {
* the application or being 'lost' due to node failures etc. have a special * the application or being 'lost' due to node failures etc. have a special
* exit code of {@literal -100}.</p> * exit code of {@literal -100}.</p>
* *
* <p>When threshold number of the nodemanager-local-directories or
* threshold number of the nodemanager-log-directories become bad, then
* container is not launched and is exited with exit status of
* {@literal -101}.</p>
*
* @return <em>exit status</em> for the container * @return <em>exit status</em> for the container
*/ */
@Public @Public

View File

@ -340,6 +340,21 @@ public class ContainerPBImpl extends ProtoBase<ContainerProto> implements Contai
return ((ContainerStatusPBImpl)t).getProto(); return ((ContainerStatusPBImpl)t).getProto();
} }
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("Container: [");
sb.append("ContainerId: ").append(getId()).append(", ");
sb.append("NodeId: ").append(getNodeId()).append(", ");
sb.append("NodeHttpAddress: ").append(getNodeHttpAddress()).append(", ");
sb.append("Resource: ").append(getResource()).append(", ");
sb.append("Priority: ").append(getPriority()).append(", ");
sb.append("State: ").append(getState()).append(", ");
sb.append("Token: ").append(getContainerToken()).append(", ");
sb.append("Status: ").append(getContainerStatus());
sb.append("]");
return sb.toString();
}
//TODO Comparator //TODO Comparator
@Override @Override
public int compareTo(Container other) { public int compareTo(Container other) {

View File

@ -431,6 +431,7 @@ public class YarnConfiguration extends Configuration {
public static final int INVALID_CONTAINER_EXIT_STATUS = -1000; public static final int INVALID_CONTAINER_EXIT_STATUS = -1000;
public static final int ABORTED_CONTAINER_EXIT_STATUS = -100; public static final int ABORTED_CONTAINER_EXIT_STATUS = -100;
public static final int DISKS_FAILED = -101;
//////////////////////////////// ////////////////////////////////
// Web Proxy Configs // Web Proxy Configs

View File

@ -40,6 +40,7 @@ import org.apache.hadoop.ipc.ProtocolProxy;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcEngine; import org.apache.hadoop.ipc.RpcEngine;
import org.apache.hadoop.ipc.ClientCache; import org.apache.hadoop.ipc.ClientCache;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenIdentifier;
@ -309,8 +310,8 @@ public class ProtoOverHadoopRpcEngine implements RpcEngine {
} }
@Override @Override
public Writable call(String protocol, Writable writableRequest, public Writable call(RpcKind rpcKind, String protocol,
long receiveTime) throws IOException { Writable writableRequest, long receiveTime) throws IOException {
ProtoSpecificRequestWritable request = (ProtoSpecificRequestWritable) writableRequest; ProtoSpecificRequestWritable request = (ProtoSpecificRequestWritable) writableRequest;
ProtoSpecificRpcRequest rpcRequest = request.message; ProtoSpecificRpcRequest rpcRequest = request.message;
String methodName = rpcRequest.getMethodName(); String methodName = rpcRequest.getMethodName();

View File

@ -122,8 +122,7 @@ public abstract class ContainerExecutor implements Configurable {
public enum ExitCode { public enum ExitCode {
FORCE_KILLED(137), FORCE_KILLED(137),
TERMINATED(143), TERMINATED(143);
DISKS_FAILED(-101);
private final int code; private final int code;
private ExitCode(int exitCode) { private ExitCode(int exitCode) {

View File

@ -181,7 +181,7 @@ public class ContainerLaunch implements Callable<Integer> {
List<String> logDirs = dirsHandler.getLogDirs(); List<String> logDirs = dirsHandler.getLogDirs();
if (!dirsHandler.areDisksHealthy()) { if (!dirsHandler.areDisksHealthy()) {
ret = ExitCode.DISKS_FAILED.getExitCode(); ret = YarnConfiguration.DISKS_FAILED;
throw new IOException("Most of the disks failed. " throw new IOException("Most of the disks failed. "
+ dirsHandler.getDisksHealthReport()); + dirsHandler.getDisksHealthReport());
} }

View File

@ -0,0 +1,38 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.webapp;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
import org.apache.hadoop.yarn.webapp.view.InfoBlock;
import com.google.inject.Inject;
public class AppBlock extends HtmlBlock {
@Inject
AppBlock(ResourceManager rm, ViewContext ctx) {
super(ctx);
}
@Override
protected void render(Block html) {
html._(InfoBlock.class);
}
}

View File

@ -0,0 +1,32 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.webapp;
import org.apache.hadoop.yarn.webapp.SubView;
public class AppPage extends RmView {
@Override protected void preHead(Page.HTML<_> html) {
commonPreHead(html);
}
@Override protected Class<? extends SubView> content() {
return AppBlock.class;
}
}

View File

@ -125,7 +125,7 @@ public class RmController extends Controller {
} else { } else {
info._("AM container logs:", "AM not yet registered with RM"); info._("AM container logs:", "AM not yet registered with RM");
} }
render(AboutPage.class); render(AppPage.class);
} }
public void nodes() { public void nodes() {

View File

@ -403,9 +403,10 @@ Hadoop MapReduce Next Generation - Cluster Setup
the health of the local disks (specifically checks nodemanager-local-dirs the health of the local disks (specifically checks nodemanager-local-dirs
and nodemanager-log-dirs) and after reaching the threshold of number of and nodemanager-log-dirs) and after reaching the threshold of number of
bad directories based on the value set for the config property bad directories based on the value set for the config property
yarn.nodemanager.disk-health-checker.min-healthy-disks. The boot disk is yarn.nodemanager.disk-health-checker.min-healthy-disks, the whole node is
either raided or a failure in the boot disk is identified by the health marked unhealthy and this info is sent to resource manager also. The boot
checker script. disk is either raided or a failure in the boot disk is identified by the
health checker script.
* {Slaves file} * {Slaves file}

View File

@ -349,7 +349,7 @@
UNTAR='tar xfBp -' UNTAR='tar xfBp -'
LIB_DIR="${BUILD_DIR}/native/target/usr/local/lib" LIB_DIR="${BUILD_DIR}/native/target/usr/local/lib"
if [ -d $${LIB_DIR} ] ; then if [ -d $${LIB_DIR} ] ; then
TARGET_DIR="${BUILD_DIR}/${project.artifactId}-${project.version}/lib" TARGET_DIR="${BUILD_DIR}/${project.artifactId}-${project.version}/lib/native"
mkdir -p $${TARGET_DIR} mkdir -p $${TARGET_DIR}
cd $${LIB_DIR} cd $${LIB_DIR}
$$TAR lib* | (cd $${TARGET_DIR}/; $$UNTAR) $$TAR lib* | (cd $${TARGET_DIR}/; $$UNTAR)