Merge branch 'trunk' into HDFS-7240
This commit is contained in:
commit
0f73cd7ff6
|
@ -19,6 +19,9 @@
|
||||||
package org.apache.hadoop.fs;
|
package org.apache.hadoop.fs;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.io.InvalidObjectException;
|
||||||
|
import java.io.ObjectInputValidation;
|
||||||
|
import java.io.Serializable;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.net.URISyntaxException;
|
import java.net.URISyntaxException;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
@ -37,7 +40,7 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
@Stringable
|
@Stringable
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
@InterfaceStability.Stable
|
@InterfaceStability.Stable
|
||||||
public class Path implements Comparable {
|
public class Path implements Comparable, Serializable, ObjectInputValidation {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The directory separator, a slash.
|
* The directory separator, a slash.
|
||||||
|
@ -66,6 +69,8 @@ public class Path implements Comparable {
|
||||||
private static final Pattern HAS_DRIVE_LETTER_SPECIFIER =
|
private static final Pattern HAS_DRIVE_LETTER_SPECIFIER =
|
||||||
Pattern.compile("^/?[a-zA-Z]:");
|
Pattern.compile("^/?[a-zA-Z]:");
|
||||||
|
|
||||||
|
private static final long serialVersionUID = 0xad00f;
|
||||||
|
|
||||||
private URI uri; // a hierarchical uri
|
private URI uri; // a hierarchical uri
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -565,4 +570,17 @@ public class Path implements Comparable {
|
||||||
}
|
}
|
||||||
return new Path(newUri);
|
return new Path(newUri);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validate the contents of a deserialized Path, so as
|
||||||
|
* to defend against malicious object streams.
|
||||||
|
* @throws InvalidObjectException if there's no URI
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public void validateObject() throws InvalidObjectException {
|
||||||
|
if (uri == null) {
|
||||||
|
throw new InvalidObjectException("No URI in deserialized Path");
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -60,7 +60,7 @@ public class ProtobufRpcEngine implements RpcEngine {
|
||||||
private static final ThreadLocal<AsyncGet<Message, Exception>>
|
private static final ThreadLocal<AsyncGet<Message, Exception>>
|
||||||
ASYNC_RETURN_MESSAGE = new ThreadLocal<>();
|
ASYNC_RETURN_MESSAGE = new ThreadLocal<>();
|
||||||
|
|
||||||
static { // Register the rpcRequest deserializer for ProtobufRpcEngine
|
static { // Register the rpcRequest deserializer for WritableRpcEngine
|
||||||
org.apache.hadoop.ipc.Server.registerProtocolEngine(
|
org.apache.hadoop.ipc.Server.registerProtocolEngine(
|
||||||
RPC.RpcKind.RPC_PROTOCOL_BUFFER, RpcProtobufRequest.class,
|
RPC.RpcKind.RPC_PROTOCOL_BUFFER, RpcProtobufRequest.class,
|
||||||
new Server.ProtoBufRpcInvoker());
|
new Server.ProtoBufRpcInvoker());
|
||||||
|
@ -194,8 +194,7 @@ public class ProtobufRpcEngine implements RpcEngine {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (args.length != 2) { // RpcController + Message
|
if (args.length != 2) { // RpcController + Message
|
||||||
throw new ServiceException(
|
throw new ServiceException("Too many parameters for request. Method: ["
|
||||||
"Too many or few parameters for request. Method: ["
|
|
||||||
+ method.getName() + "]" + ", Expected: 2, Actual: "
|
+ method.getName() + "]" + ", Expected: 2, Actual: "
|
||||||
+ args.length);
|
+ args.length);
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,8 +18,6 @@
|
||||||
|
|
||||||
package org.apache.hadoop.ipc;
|
package org.apache.hadoop.ipc;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.InterruptedIOException;
|
|
||||||
import java.lang.reflect.Field;
|
import java.lang.reflect.Field;
|
||||||
import java.lang.reflect.InvocationHandler;
|
import java.lang.reflect.InvocationHandler;
|
||||||
import java.lang.reflect.Proxy;
|
import java.lang.reflect.Proxy;
|
||||||
|
@ -28,6 +26,7 @@ import java.net.ConnectException;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.net.NoRouteToHostException;
|
import java.net.NoRouteToHostException;
|
||||||
import java.net.SocketTimeoutException;
|
import java.net.SocketTimeoutException;
|
||||||
|
import java.io.*;
|
||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
@ -38,12 +37,11 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
|
|
||||||
import javax.net.SocketFactory;
|
import javax.net.SocketFactory;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.*;
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.io.Writable;
|
import org.apache.hadoop.io.*;
|
||||||
import org.apache.hadoop.io.retry.RetryPolicy;
|
import org.apache.hadoop.io.retry.RetryPolicy;
|
||||||
import org.apache.hadoop.ipc.Client.ConnectionId;
|
import org.apache.hadoop.ipc.Client.ConnectionId;
|
||||||
import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolInfoService;
|
import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolInfoService;
|
||||||
|
@ -56,6 +54,7 @@ import org.apache.hadoop.security.token.SecretManager;
|
||||||
import org.apache.hadoop.security.token.TokenIdentifier;
|
import org.apache.hadoop.security.token.TokenIdentifier;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
import org.apache.hadoop.conf.*;
|
||||||
import org.apache.hadoop.util.ReflectionUtils;
|
import org.apache.hadoop.util.ReflectionUtils;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
|
|
||||||
|
@ -88,7 +87,7 @@ public class RPC {
|
||||||
RPC_WRITABLE ((short) 2), // Use WritableRpcEngine
|
RPC_WRITABLE ((short) 2), // Use WritableRpcEngine
|
||||||
RPC_PROTOCOL_BUFFER ((short) 3); // Use ProtobufRpcEngine
|
RPC_PROTOCOL_BUFFER ((short) 3); // Use ProtobufRpcEngine
|
||||||
final static short MAX_INDEX = RPC_PROTOCOL_BUFFER.value; // used for array size
|
final static short MAX_INDEX = RPC_PROTOCOL_BUFFER.value; // used for array size
|
||||||
private final short value;
|
public final short value; //TODO make it private
|
||||||
|
|
||||||
RpcKind(short val) {
|
RpcKind(short val) {
|
||||||
this.value = val;
|
this.value = val;
|
||||||
|
@ -208,7 +207,7 @@ public class RPC {
|
||||||
RpcEngine engine = PROTOCOL_ENGINES.get(protocol);
|
RpcEngine engine = PROTOCOL_ENGINES.get(protocol);
|
||||||
if (engine == null) {
|
if (engine == null) {
|
||||||
Class<?> impl = conf.getClass(ENGINE_PROP+"."+protocol.getName(),
|
Class<?> impl = conf.getClass(ENGINE_PROP+"."+protocol.getName(),
|
||||||
ProtobufRpcEngine.class);
|
WritableRpcEngine.class);
|
||||||
engine = (RpcEngine)ReflectionUtils.newInstance(impl, conf);
|
engine = (RpcEngine)ReflectionUtils.newInstance(impl, conf);
|
||||||
PROTOCOL_ENGINES.put(protocol, engine);
|
PROTOCOL_ENGINES.put(protocol, engine);
|
||||||
}
|
}
|
||||||
|
|
|
@ -237,14 +237,14 @@ public abstract class Server {
|
||||||
static class RpcKindMapValue {
|
static class RpcKindMapValue {
|
||||||
final Class<? extends Writable> rpcRequestWrapperClass;
|
final Class<? extends Writable> rpcRequestWrapperClass;
|
||||||
final RpcInvoker rpcInvoker;
|
final RpcInvoker rpcInvoker;
|
||||||
|
|
||||||
RpcKindMapValue (Class<? extends Writable> rpcRequestWrapperClass,
|
RpcKindMapValue (Class<? extends Writable> rpcRequestWrapperClass,
|
||||||
RpcInvoker rpcInvoker) {
|
RpcInvoker rpcInvoker) {
|
||||||
this.rpcInvoker = rpcInvoker;
|
this.rpcInvoker = rpcInvoker;
|
||||||
this.rpcRequestWrapperClass = rpcRequestWrapperClass;
|
this.rpcRequestWrapperClass = rpcRequestWrapperClass;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
static Map<RPC.RpcKind, RpcKindMapValue> rpcKindMap = new HashMap<>(4);
|
static Map<RPC.RpcKind, RpcKindMapValue> rpcKindMap = new
|
||||||
|
HashMap<RPC.RpcKind, RpcKindMapValue>(4);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -730,7 +730,7 @@ public class UserGroupInformation {
|
||||||
*
|
*
|
||||||
* @param user The principal name to load from the ticket
|
* @param user The principal name to load from the ticket
|
||||||
* cache
|
* cache
|
||||||
* @param ticketCache the path to the ticket cache file
|
* @param ticketCachePath the path to the ticket cache file
|
||||||
*
|
*
|
||||||
* @throws IOException if the kerberos login fails
|
* @throws IOException if the kerberos login fails
|
||||||
*/
|
*/
|
||||||
|
@ -790,7 +790,7 @@ public class UserGroupInformation {
|
||||||
/**
|
/**
|
||||||
* Create a UserGroupInformation from a Subject with Kerberos principal.
|
* Create a UserGroupInformation from a Subject with Kerberos principal.
|
||||||
*
|
*
|
||||||
* @param subject The KerberosPrincipal to use in UGI
|
* @param user The KerberosPrincipal to use in UGI
|
||||||
*
|
*
|
||||||
* @throws IOException if the kerberos login fails
|
* @throws IOException if the kerberos login fails
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -17,22 +17,33 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.util;
|
package org.apache.hadoop.util;
|
||||||
|
|
||||||
|
import java.util.concurrent.locks.Lock;
|
||||||
import java.util.concurrent.locks.ReentrantLock;
|
import java.util.concurrent.locks.ReentrantLock;
|
||||||
|
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This is a wrap class of a ReentrantLock. Extending AutoCloseable
|
* This is a wrap class of a ReentrantLock. Extending AutoCloseable
|
||||||
* interface such that the users can use a try-with-resource syntax.
|
* interface such that the users can use a try-with-resource syntax.
|
||||||
*/
|
*/
|
||||||
public class AutoCloseableLock implements AutoCloseable {
|
public class AutoCloseableLock implements AutoCloseable {
|
||||||
|
|
||||||
private final ReentrantLock lock;
|
private final Lock lock;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates an instance of {@code AutoCloseableLock}, initializes
|
* Creates an instance of {@code AutoCloseableLock}, initializes
|
||||||
* the underlying {@code ReentrantLock} object.
|
* the underlying lock instance with a new {@code ReentrantLock}.
|
||||||
*/
|
*/
|
||||||
public AutoCloseableLock() {
|
public AutoCloseableLock() {
|
||||||
this.lock = new ReentrantLock();
|
this(new ReentrantLock());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Wrap provided Lock instance.
|
||||||
|
* @param lock Lock instance to wrap in AutoCloseable API.
|
||||||
|
*/
|
||||||
|
public AutoCloseableLock(Lock lock) {
|
||||||
|
this.lock = lock;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -86,7 +97,7 @@ public class AutoCloseableLock implements AutoCloseable {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A wrapper method that makes a call to {@code tryLock()} of
|
* A wrapper method that makes a call to {@code tryLock()} of
|
||||||
* the underlying {@code ReentrantLock} object.
|
* the underlying {@code Lock} object.
|
||||||
*
|
*
|
||||||
* If the lock is not held by another thread, acquires the lock, set the
|
* If the lock is not held by another thread, acquires the lock, set the
|
||||||
* hold count to one and returns {@code true}.
|
* hold count to one and returns {@code true}.
|
||||||
|
@ -116,7 +127,12 @@ public class AutoCloseableLock implements AutoCloseable {
|
||||||
* @return {@code true} if any thread holds this lock and
|
* @return {@code true} if any thread holds this lock and
|
||||||
* {@code false} otherwise
|
* {@code false} otherwise
|
||||||
*/
|
*/
|
||||||
public boolean isLocked() {
|
@VisibleForTesting
|
||||||
return lock.isLocked();
|
boolean isLocked() {
|
||||||
|
if (lock instanceof ReentrantLock) {
|
||||||
|
return ((ReentrantLock)lock).isLocked();
|
||||||
|
}
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -304,7 +304,7 @@ public class DataChecksum implements Checksum {
|
||||||
bytesPerChecksum, checksums.array(), crcsOffset, fileName, basePos);
|
bytesPerChecksum, checksums.array(), crcsOffset, fileName, basePos);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (NativeCrc32.isAvailable()) {
|
if (NativeCrc32.isAvailable() && data.isDirect()) {
|
||||||
NativeCrc32.verifyChunkedSums(bytesPerChecksum, type.id, checksums, data,
|
NativeCrc32.verifyChunkedSums(bytesPerChecksum, type.id, checksums, data,
|
||||||
fileName, basePos);
|
fileName, basePos);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -106,7 +106,6 @@ public class NodeHealthScriptRunner extends AbstractService {
|
||||||
shexec.execute();
|
shexec.execute();
|
||||||
} catch (ExitCodeException e) {
|
} catch (ExitCodeException e) {
|
||||||
// ignore the exit code of the script
|
// ignore the exit code of the script
|
||||||
exceptionStackTrace = StringUtils.stringifyException(e);
|
|
||||||
status = HealthCheckerExitStatus.FAILED_WITH_EXIT_CODE;
|
status = HealthCheckerExitStatus.FAILED_WITH_EXIT_CODE;
|
||||||
// On Windows, we will not hit the Stream closed IOException
|
// On Windows, we will not hit the Stream closed IOException
|
||||||
// thrown by stdout buffered reader for timeout event.
|
// thrown by stdout buffered reader for timeout event.
|
||||||
|
@ -163,7 +162,7 @@ public class NodeHealthScriptRunner extends AbstractService {
|
||||||
setHealthStatus(false, exceptionStackTrace);
|
setHealthStatus(false, exceptionStackTrace);
|
||||||
break;
|
break;
|
||||||
case FAILED_WITH_EXIT_CODE:
|
case FAILED_WITH_EXIT_CODE:
|
||||||
setHealthStatus(false, exceptionStackTrace);
|
setHealthStatus(true, "", now);
|
||||||
break;
|
break;
|
||||||
case FAILED:
|
case FAILED:
|
||||||
setHealthStatus(false, shexec.getOutput());
|
setHealthStatus(false, shexec.getOutput());
|
||||||
|
|
|
@ -17,9 +17,14 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.fs;
|
package org.apache.hadoop.fs;
|
||||||
|
import org.junit.Assert;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.io.ByteArrayInputStream;
|
||||||
|
import java.io.ByteArrayOutputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.io.ObjectInputStream;
|
||||||
|
import java.io.ObjectOutputStream;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.net.URISyntaxException;
|
import java.net.URISyntaxException;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
@ -506,4 +511,19 @@ public class TestPath {
|
||||||
assertFalse(Path.isWindowsAbsolutePath("C:test", false));
|
assertFalse(Path.isWindowsAbsolutePath("C:test", false));
|
||||||
assertFalse(Path.isWindowsAbsolutePath("/C:test", true));
|
assertFalse(Path.isWindowsAbsolutePath("/C:test", true));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 30000)
|
||||||
|
public void testSerDeser() throws Throwable {
|
||||||
|
Path source = new Path("hdfs://localhost:4040/scratch");
|
||||||
|
ByteArrayOutputStream baos = new ByteArrayOutputStream(256);
|
||||||
|
try(ObjectOutputStream oos = new ObjectOutputStream(baos)) {
|
||||||
|
oos.writeObject(source);
|
||||||
|
}
|
||||||
|
ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
|
||||||
|
try (ObjectInputStream ois = new ObjectInputStream(bais)) {
|
||||||
|
Path deser = (Path) ois.readObject();
|
||||||
|
Assert.assertEquals(source, deser);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,8 +17,13 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.ipc;
|
package org.apache.hadoop.ipc;
|
||||||
|
|
||||||
import com.google.common.base.Joiner;
|
import java.io.IOException;
|
||||||
import com.google.protobuf.BlockingService;
|
import java.lang.management.ManagementFactory;
|
||||||
|
import java.lang.management.ThreadMXBean;
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
|
import java.security.PrivilegedExceptionAction;
|
||||||
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
|
|
||||||
import org.apache.commons.cli.CommandLine;
|
import org.apache.commons.cli.CommandLine;
|
||||||
import org.apache.commons.cli.CommandLineParser;
|
import org.apache.commons.cli.CommandLineParser;
|
||||||
import org.apache.commons.cli.GnuParser;
|
import org.apache.commons.cli.GnuParser;
|
||||||
|
@ -29,6 +34,7 @@ import org.apache.commons.cli.ParseException;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.ipc.RPC.Server;
|
import org.apache.hadoop.ipc.RPC.Server;
|
||||||
|
import org.apache.hadoop.ipc.TestRPC.TestProtocol;
|
||||||
import org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto;
|
import org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto;
|
||||||
import org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto;
|
import org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto;
|
||||||
import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcProto;
|
import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcProto;
|
||||||
|
@ -39,12 +45,8 @@ import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
|
||||||
import org.apache.hadoop.util.Tool;
|
import org.apache.hadoop.util.Tool;
|
||||||
import org.apache.hadoop.util.ToolRunner;
|
import org.apache.hadoop.util.ToolRunner;
|
||||||
|
|
||||||
import java.io.IOException;
|
import com.google.common.base.Joiner;
|
||||||
import java.lang.management.ManagementFactory;
|
import com.google.protobuf.BlockingService;
|
||||||
import java.lang.management.ThreadMXBean;
|
|
||||||
import java.net.InetSocketAddress;
|
|
||||||
import java.security.PrivilegedExceptionAction;
|
|
||||||
import java.util.concurrent.atomic.AtomicLong;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Benchmark for protobuf RPC.
|
* Benchmark for protobuf RPC.
|
||||||
|
@ -66,7 +68,7 @@ public class RPCCallBenchmark extends TestRpcBase implements Tool {
|
||||||
public int secondsToRun = 15;
|
public int secondsToRun = 15;
|
||||||
private int msgSize = 1024;
|
private int msgSize = 1024;
|
||||||
public Class<? extends RpcEngine> rpcEngine =
|
public Class<? extends RpcEngine> rpcEngine =
|
||||||
ProtobufRpcEngine.class;
|
WritableRpcEngine.class;
|
||||||
|
|
||||||
private MyOptions(String args[]) {
|
private MyOptions(String args[]) {
|
||||||
try {
|
try {
|
||||||
|
@ -133,7 +135,7 @@ public class RPCCallBenchmark extends TestRpcBase implements Tool {
|
||||||
|
|
||||||
opts.addOption(
|
opts.addOption(
|
||||||
OptionBuilder.withLongOpt("engine").hasArg(true)
|
OptionBuilder.withLongOpt("engine").hasArg(true)
|
||||||
.withArgName("protobuf")
|
.withArgName("writable|protobuf")
|
||||||
.withDescription("engine to use")
|
.withDescription("engine to use")
|
||||||
.create('e'));
|
.create('e'));
|
||||||
|
|
||||||
|
@ -182,6 +184,8 @@ public class RPCCallBenchmark extends TestRpcBase implements Tool {
|
||||||
String eng = line.getOptionValue('e');
|
String eng = line.getOptionValue('e');
|
||||||
if ("protobuf".equals(eng)) {
|
if ("protobuf".equals(eng)) {
|
||||||
rpcEngine = ProtobufRpcEngine.class;
|
rpcEngine = ProtobufRpcEngine.class;
|
||||||
|
} else if ("writable".equals(eng)) {
|
||||||
|
rpcEngine = WritableRpcEngine.class;
|
||||||
} else {
|
} else {
|
||||||
throw new ParseException("invalid engine: " + eng);
|
throw new ParseException("invalid engine: " + eng);
|
||||||
}
|
}
|
||||||
|
@ -233,6 +237,11 @@ public class RPCCallBenchmark extends TestRpcBase implements Tool {
|
||||||
server = new RPC.Builder(conf).setProtocol(TestRpcService.class)
|
server = new RPC.Builder(conf).setProtocol(TestRpcService.class)
|
||||||
.setInstance(service).setBindAddress(opts.host).setPort(opts.getPort())
|
.setInstance(service).setBindAddress(opts.host).setPort(opts.getPort())
|
||||||
.setNumHandlers(opts.serverThreads).setVerbose(false).build();
|
.setNumHandlers(opts.serverThreads).setVerbose(false).build();
|
||||||
|
} else if (opts.rpcEngine == WritableRpcEngine.class) {
|
||||||
|
server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
|
||||||
|
.setInstance(new TestRPC.TestImpl()).setBindAddress(opts.host)
|
||||||
|
.setPort(opts.getPort()).setNumHandlers(opts.serverThreads)
|
||||||
|
.setVerbose(false).build();
|
||||||
} else {
|
} else {
|
||||||
throw new RuntimeException("Bad engine: " + opts.rpcEngine);
|
throw new RuntimeException("Bad engine: " + opts.rpcEngine);
|
||||||
}
|
}
|
||||||
|
@ -390,6 +399,15 @@ public class RPCCallBenchmark extends TestRpcBase implements Tool {
|
||||||
return responseProto.getMessage();
|
return responseProto.getMessage();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
} else if (opts.rpcEngine == WritableRpcEngine.class) {
|
||||||
|
final TestProtocol proxy = RPC.getProxy(
|
||||||
|
TestProtocol.class, TestProtocol.versionID, addr, conf);
|
||||||
|
return new RpcServiceWrapper() {
|
||||||
|
@Override
|
||||||
|
public String doEcho(String msg) throws Exception {
|
||||||
|
return proxy.echo(msg);
|
||||||
|
}
|
||||||
|
};
|
||||||
} else {
|
} else {
|
||||||
throw new RuntimeException("unsupported engine: " + opts.rpcEngine);
|
throw new RuntimeException("unsupported engine: " + opts.rpcEngine);
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,28 +17,252 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.ipc;
|
package org.apache.hadoop.ipc;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
|
|
||||||
|
import org.junit.Assert;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.junit.After;
|
import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcProto;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
import org.junit.After;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
import com.google.protobuf.BlockingService;
|
||||||
|
|
||||||
public class TestMultipleProtocolServer extends TestRpcBase {
|
public class TestMultipleProtocolServer extends TestRpcBase {
|
||||||
|
private static InetSocketAddress addr;
|
||||||
private static RPC.Server server;
|
private static RPC.Server server;
|
||||||
|
|
||||||
@Before
|
private static Configuration conf = new Configuration();
|
||||||
public void setUp() throws Exception {
|
|
||||||
super.setupConf();
|
|
||||||
|
@ProtocolInfo(protocolName="Foo")
|
||||||
server = setupTestServer(conf, 2);
|
interface Foo0 extends VersionedProtocol {
|
||||||
|
public static final long versionID = 0L;
|
||||||
|
String ping() throws IOException;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@ProtocolInfo(protocolName="Foo")
|
||||||
|
interface Foo1 extends VersionedProtocol {
|
||||||
|
public static final long versionID = 1L;
|
||||||
|
String ping() throws IOException;
|
||||||
|
String ping2() throws IOException;
|
||||||
|
}
|
||||||
|
|
||||||
|
@ProtocolInfo(protocolName="Foo")
|
||||||
|
interface FooUnimplemented extends VersionedProtocol {
|
||||||
|
public static final long versionID = 2L;
|
||||||
|
String ping() throws IOException;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface Mixin extends VersionedProtocol{
|
||||||
|
public static final long versionID = 0L;
|
||||||
|
void hello() throws IOException;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
interface Bar extends Mixin {
|
||||||
|
public static final long versionID = 0L;
|
||||||
|
int echo(int i) throws IOException;
|
||||||
|
}
|
||||||
|
|
||||||
|
class Foo0Impl implements Foo0 {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getProtocolVersion(String protocol, long clientVersion)
|
||||||
|
throws IOException {
|
||||||
|
return Foo0.versionID;
|
||||||
|
}
|
||||||
|
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
@Override
|
||||||
|
public ProtocolSignature getProtocolSignature(String protocol,
|
||||||
|
long clientVersion, int clientMethodsHash) throws IOException {
|
||||||
|
Class<? extends VersionedProtocol> inter;
|
||||||
|
try {
|
||||||
|
inter = (Class<? extends VersionedProtocol>)getClass().
|
||||||
|
getGenericInterfaces()[0];
|
||||||
|
} catch (Exception e) {
|
||||||
|
throw new IOException(e);
|
||||||
|
}
|
||||||
|
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
|
||||||
|
getProtocolVersion(protocol, clientVersion), inter);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String ping() {
|
||||||
|
return "Foo0";
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
class Foo1Impl implements Foo1 {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getProtocolVersion(String protocol, long clientVersion)
|
||||||
|
throws IOException {
|
||||||
|
return Foo1.versionID;
|
||||||
|
}
|
||||||
|
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
@Override
|
||||||
|
public ProtocolSignature getProtocolSignature(String protocol,
|
||||||
|
long clientVersion, int clientMethodsHash) throws IOException {
|
||||||
|
Class<? extends VersionedProtocol> inter;
|
||||||
|
try {
|
||||||
|
inter = (Class<? extends VersionedProtocol>)getClass().
|
||||||
|
getGenericInterfaces()[0];
|
||||||
|
} catch (Exception e) {
|
||||||
|
throw new IOException(e);
|
||||||
|
}
|
||||||
|
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
|
||||||
|
getProtocolVersion(protocol, clientVersion), inter);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String ping() {
|
||||||
|
return "Foo1";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String ping2() {
|
||||||
|
return "Foo1";
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class BarImpl implements Bar {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getProtocolVersion(String protocol, long clientVersion)
|
||||||
|
throws IOException {
|
||||||
|
return Bar.versionID;
|
||||||
|
}
|
||||||
|
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
@Override
|
||||||
|
public ProtocolSignature getProtocolSignature(String protocol,
|
||||||
|
long clientVersion, int clientMethodsHash) throws IOException {
|
||||||
|
Class<? extends VersionedProtocol> inter;
|
||||||
|
try {
|
||||||
|
inter = (Class<? extends VersionedProtocol>)getClass().
|
||||||
|
getGenericInterfaces()[0];
|
||||||
|
} catch (Exception e) {
|
||||||
|
throw new IOException(e);
|
||||||
|
}
|
||||||
|
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
|
||||||
|
getProtocolVersion(protocol, clientVersion), inter);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int echo(int i) {
|
||||||
|
return i;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void hello() {
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@Before
|
||||||
|
public void setUp() throws Exception {
|
||||||
|
// create a server with two handlers
|
||||||
|
server = new RPC.Builder(conf).setProtocol(Foo0.class)
|
||||||
|
.setInstance(new Foo0Impl()).setBindAddress(ADDRESS).setPort(0)
|
||||||
|
.setNumHandlers(2).setVerbose(false).build();
|
||||||
|
server.addProtocol(RPC.RpcKind.RPC_WRITABLE, Foo1.class, new Foo1Impl());
|
||||||
|
server.addProtocol(RPC.RpcKind.RPC_WRITABLE, Bar.class, new BarImpl());
|
||||||
|
server.addProtocol(RPC.RpcKind.RPC_WRITABLE, Mixin.class, new BarImpl());
|
||||||
|
|
||||||
|
|
||||||
|
// Add Protobuf server
|
||||||
|
// Create server side implementation
|
||||||
|
PBServerImpl pbServerImpl = new PBServerImpl();
|
||||||
|
BlockingService service = TestProtobufRpcProto
|
||||||
|
.newReflectiveBlockingService(pbServerImpl);
|
||||||
|
server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, TestRpcService.class,
|
||||||
|
service);
|
||||||
|
server.start();
|
||||||
|
addr = NetUtils.getConnectAddress(server);
|
||||||
|
}
|
||||||
|
|
||||||
@After
|
@After
|
||||||
public void tearDown() throws Exception {
|
public void tearDown() throws Exception {
|
||||||
server.stop();
|
server.stop();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void test1() throws IOException {
|
||||||
|
ProtocolProxy<?> proxy;
|
||||||
|
proxy = RPC.getProtocolProxy(Foo0.class, Foo0.versionID, addr, conf);
|
||||||
|
|
||||||
|
Foo0 foo0 = (Foo0)proxy.getProxy();
|
||||||
|
Assert.assertEquals("Foo0", foo0.ping());
|
||||||
|
|
||||||
|
|
||||||
|
proxy = RPC.getProtocolProxy(Foo1.class, Foo1.versionID, addr, conf);
|
||||||
|
|
||||||
|
|
||||||
|
Foo1 foo1 = (Foo1)proxy.getProxy();
|
||||||
|
Assert.assertEquals("Foo1", foo1.ping());
|
||||||
|
Assert.assertEquals("Foo1", foo1.ping());
|
||||||
|
|
||||||
|
|
||||||
|
proxy = RPC.getProtocolProxy(Bar.class, Foo1.versionID, addr, conf);
|
||||||
|
|
||||||
|
|
||||||
|
Bar bar = (Bar)proxy.getProxy();
|
||||||
|
Assert.assertEquals(99, bar.echo(99));
|
||||||
|
|
||||||
|
// Now test Mixin class method
|
||||||
|
|
||||||
|
Mixin mixin = bar;
|
||||||
|
mixin.hello();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Server does not implement the FooUnimplemented version of protocol Foo.
|
||||||
|
// See that calls to it fail.
|
||||||
|
@Test(expected=IOException.class)
|
||||||
|
public void testNonExistingProtocol() throws IOException {
|
||||||
|
ProtocolProxy<?> proxy;
|
||||||
|
proxy = RPC.getProtocolProxy(FooUnimplemented.class,
|
||||||
|
FooUnimplemented.versionID, addr, conf);
|
||||||
|
|
||||||
|
FooUnimplemented foo = (FooUnimplemented)proxy.getProxy();
|
||||||
|
foo.ping();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* getProtocolVersion of an unimplemented version should return highest version
|
||||||
|
* Similarly getProtocolSignature should work.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testNonExistingProtocol2() throws IOException {
|
||||||
|
ProtocolProxy<?> proxy;
|
||||||
|
proxy = RPC.getProtocolProxy(FooUnimplemented.class,
|
||||||
|
FooUnimplemented.versionID, addr, conf);
|
||||||
|
|
||||||
|
FooUnimplemented foo = (FooUnimplemented)proxy.getProxy();
|
||||||
|
Assert.assertEquals(Foo1.versionID,
|
||||||
|
foo.getProtocolVersion(RPC.getProtocolName(FooUnimplemented.class),
|
||||||
|
FooUnimplemented.versionID));
|
||||||
|
foo.getProtocolSignature(RPC.getProtocolName(FooUnimplemented.class),
|
||||||
|
FooUnimplemented.versionID, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(expected=IOException.class)
|
||||||
|
public void testIncorrectServerCreation() throws IOException {
|
||||||
|
new RPC.Builder(conf).setProtocol(Foo1.class).setInstance(new Foo0Impl())
|
||||||
|
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false)
|
||||||
|
.build();
|
||||||
|
}
|
||||||
|
|
||||||
// Now test a PB service - a server hosts both PB and Writable Rpcs.
|
// Now test a PB service - a server hosts both PB and Writable Rpcs.
|
||||||
@Test
|
@Test
|
||||||
public void testPBService() throws Exception {
|
public void testPBService() throws Exception {
|
||||||
|
|
|
@ -25,6 +25,19 @@ import org.junit.Test;
|
||||||
|
|
||||||
public class TestRPCCallBenchmark {
|
public class TestRPCCallBenchmark {
|
||||||
|
|
||||||
|
@Test(timeout=20000)
|
||||||
|
public void testBenchmarkWithWritable() throws Exception {
|
||||||
|
int rc = ToolRunner.run(new RPCCallBenchmark(),
|
||||||
|
new String[] {
|
||||||
|
"--clientThreads", "30",
|
||||||
|
"--serverThreads", "30",
|
||||||
|
"--time", "5",
|
||||||
|
"--serverReaderThreads", "4",
|
||||||
|
"--messageSize", "1024",
|
||||||
|
"--engine", "writable"});
|
||||||
|
assertEquals(0, rc);
|
||||||
|
}
|
||||||
|
|
||||||
@Test(timeout=20000)
|
@Test(timeout=20000)
|
||||||
public void testBenchmarkWithProto() throws Exception {
|
public void testBenchmarkWithProto() throws Exception {
|
||||||
int rc = ToolRunner.run(new RPCCallBenchmark(),
|
int rc = ToolRunner.run(new RPCCallBenchmark(),
|
||||||
|
|
|
@ -18,19 +18,27 @@
|
||||||
|
|
||||||
package org.apache.hadoop.ipc;
|
package org.apache.hadoop.ipc;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import static org.junit.Assert.assertEquals;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import static org.junit.Assert.assertFalse;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import static org.junit.Assert.fail;
|
||||||
import org.junit.After;
|
|
||||||
import org.junit.Before;
|
|
||||||
import org.junit.Test;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.lang.reflect.Method;
|
import java.lang.reflect.Method;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import org.junit.Assert;
|
||||||
import static org.junit.Assert.assertFalse;
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureRequestProto;
|
||||||
|
import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureResponseProto;
|
||||||
|
import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolSignatureProto;
|
||||||
|
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcErrorCodeProto;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
/** Unit test for supporting method-name based compatible RPCs. */
|
/** Unit test for supporting method-name based compatible RPCs. */
|
||||||
public class TestRPCCompatibility {
|
public class TestRPCCompatibility {
|
||||||
|
@ -41,7 +49,7 @@ public class TestRPCCompatibility {
|
||||||
|
|
||||||
public static final Log LOG =
|
public static final Log LOG =
|
||||||
LogFactory.getLog(TestRPCCompatibility.class);
|
LogFactory.getLog(TestRPCCompatibility.class);
|
||||||
|
|
||||||
private static Configuration conf = new Configuration();
|
private static Configuration conf = new Configuration();
|
||||||
|
|
||||||
public interface TestProtocol0 extends VersionedProtocol {
|
public interface TestProtocol0 extends VersionedProtocol {
|
||||||
|
@ -112,21 +120,6 @@ public class TestRPCCompatibility {
|
||||||
@Before
|
@Before
|
||||||
public void setUp() {
|
public void setUp() {
|
||||||
ProtocolSignature.resetCache();
|
ProtocolSignature.resetCache();
|
||||||
|
|
||||||
RPC.setProtocolEngine(conf,
|
|
||||||
TestProtocol0.class, ProtobufRpcEngine.class);
|
|
||||||
|
|
||||||
RPC.setProtocolEngine(conf,
|
|
||||||
TestProtocol1.class, ProtobufRpcEngine.class);
|
|
||||||
|
|
||||||
RPC.setProtocolEngine(conf,
|
|
||||||
TestProtocol2.class, ProtobufRpcEngine.class);
|
|
||||||
|
|
||||||
RPC.setProtocolEngine(conf,
|
|
||||||
TestProtocol3.class, ProtobufRpcEngine.class);
|
|
||||||
|
|
||||||
RPC.setProtocolEngine(conf,
|
|
||||||
TestProtocol4.class, ProtobufRpcEngine.class);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@After
|
@After
|
||||||
|
@ -140,7 +133,117 @@ public class TestRPCCompatibility {
|
||||||
server = null;
|
server = null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test // old client vs new server
|
||||||
|
public void testVersion0ClientVersion1Server() throws Exception {
|
||||||
|
// create a server with two handlers
|
||||||
|
TestImpl1 impl = new TestImpl1();
|
||||||
|
server = new RPC.Builder(conf).setProtocol(TestProtocol1.class)
|
||||||
|
.setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2)
|
||||||
|
.setVerbose(false).build();
|
||||||
|
server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
|
||||||
|
server.start();
|
||||||
|
addr = NetUtils.getConnectAddress(server);
|
||||||
|
|
||||||
|
proxy = RPC.getProtocolProxy(
|
||||||
|
TestProtocol0.class, TestProtocol0.versionID, addr, conf);
|
||||||
|
|
||||||
|
TestProtocol0 proxy0 = (TestProtocol0)proxy.getProxy();
|
||||||
|
proxy0.ping();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test // old client vs new server
|
||||||
|
public void testVersion1ClientVersion0Server() throws Exception {
|
||||||
|
// create a server with two handlers
|
||||||
|
server = new RPC.Builder(conf).setProtocol(TestProtocol0.class)
|
||||||
|
.setInstance(new TestImpl0()).setBindAddress(ADDRESS).setPort(0)
|
||||||
|
.setNumHandlers(2).setVerbose(false).build();
|
||||||
|
server.start();
|
||||||
|
addr = NetUtils.getConnectAddress(server);
|
||||||
|
|
||||||
|
proxy = RPC.getProtocolProxy(
|
||||||
|
TestProtocol1.class, TestProtocol1.versionID, addr, conf);
|
||||||
|
|
||||||
|
TestProtocol1 proxy1 = (TestProtocol1)proxy.getProxy();
|
||||||
|
proxy1.ping();
|
||||||
|
try {
|
||||||
|
proxy1.echo("hello");
|
||||||
|
fail("Echo should fail");
|
||||||
|
} catch(IOException e) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private class Version2Client {
|
||||||
|
|
||||||
|
private TestProtocol2 proxy2;
|
||||||
|
private ProtocolProxy<TestProtocol2> serverInfo;
|
||||||
|
|
||||||
|
private Version2Client() throws IOException {
|
||||||
|
serverInfo = RPC.getProtocolProxy(
|
||||||
|
TestProtocol2.class, TestProtocol2.versionID, addr, conf);
|
||||||
|
proxy2 = serverInfo.getProxy();
|
||||||
|
}
|
||||||
|
|
||||||
|
public int echo(int value) throws IOException, NumberFormatException {
|
||||||
|
if (serverInfo.isMethodSupported("echo", int.class)) {
|
||||||
|
System.out.println("echo int is supported");
|
||||||
|
return -value; // use version 3 echo long
|
||||||
|
} else { // server is version 2
|
||||||
|
System.out.println("echo int is NOT supported");
|
||||||
|
return Integer.parseInt(proxy2.echo(String.valueOf(value)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public String echo(String value) throws IOException {
|
||||||
|
return proxy2.echo(value);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void ping() throws IOException {
|
||||||
|
proxy2.ping();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test // Compatible new client & old server
|
||||||
|
public void testVersion2ClientVersion1Server() throws Exception {
|
||||||
|
// create a server with two handlers
|
||||||
|
TestImpl1 impl = new TestImpl1();
|
||||||
|
server = new RPC.Builder(conf).setProtocol(TestProtocol1.class)
|
||||||
|
.setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2)
|
||||||
|
.setVerbose(false).build();
|
||||||
|
server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
|
||||||
|
server.start();
|
||||||
|
addr = NetUtils.getConnectAddress(server);
|
||||||
|
|
||||||
|
|
||||||
|
Version2Client client = new Version2Client();
|
||||||
|
client.ping();
|
||||||
|
assertEquals("hello", client.echo("hello"));
|
||||||
|
|
||||||
|
// echo(int) is not supported by server, so returning 3
|
||||||
|
// This verifies that echo(int) and echo(String)'s hash codes are different
|
||||||
|
assertEquals(3, client.echo(3));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test // equal version client and server
|
||||||
|
public void testVersion2ClientVersion2Server() throws Exception {
|
||||||
|
// create a server with two handlers
|
||||||
|
TestImpl2 impl = new TestImpl2();
|
||||||
|
server = new RPC.Builder(conf).setProtocol(TestProtocol2.class)
|
||||||
|
.setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2)
|
||||||
|
.setVerbose(false).build();
|
||||||
|
server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
|
||||||
|
server.start();
|
||||||
|
addr = NetUtils.getConnectAddress(server);
|
||||||
|
|
||||||
|
Version2Client client = new Version2Client();
|
||||||
|
|
||||||
|
client.ping();
|
||||||
|
assertEquals("hello", client.echo("hello"));
|
||||||
|
|
||||||
|
// now that echo(int) is supported by the server, echo(int) should return -3
|
||||||
|
assertEquals(-3, client.echo(3));
|
||||||
|
}
|
||||||
|
|
||||||
public interface TestProtocol3 {
|
public interface TestProtocol3 {
|
||||||
int echo(String value);
|
int echo(String value);
|
||||||
int echo(int value);
|
int echo(int value);
|
||||||
|
@ -194,4 +297,97 @@ public class TestRPCCompatibility {
|
||||||
@Override
|
@Override
|
||||||
int echo(int value) throws IOException;
|
int echo(int value) throws IOException;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testVersionMismatch() throws IOException {
|
||||||
|
server = new RPC.Builder(conf).setProtocol(TestProtocol2.class)
|
||||||
|
.setInstance(new TestImpl2()).setBindAddress(ADDRESS).setPort(0)
|
||||||
|
.setNumHandlers(2).setVerbose(false).build();
|
||||||
|
server.start();
|
||||||
|
addr = NetUtils.getConnectAddress(server);
|
||||||
|
|
||||||
|
TestProtocol4 proxy = RPC.getProxy(TestProtocol4.class,
|
||||||
|
TestProtocol4.versionID, addr, conf);
|
||||||
|
try {
|
||||||
|
proxy.echo(21);
|
||||||
|
fail("The call must throw VersionMismatch exception");
|
||||||
|
} catch (RemoteException ex) {
|
||||||
|
Assert.assertEquals(RPC.VersionMismatch.class.getName(),
|
||||||
|
ex.getClassName());
|
||||||
|
Assert.assertTrue(ex.getErrorCode().equals(
|
||||||
|
RpcErrorCodeProto.ERROR_RPC_VERSION_MISMATCH));
|
||||||
|
} catch (IOException ex) {
|
||||||
|
fail("Expected version mismatch but got " + ex);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testIsMethodSupported() throws IOException {
|
||||||
|
server = new RPC.Builder(conf).setProtocol(TestProtocol2.class)
|
||||||
|
.setInstance(new TestImpl2()).setBindAddress(ADDRESS).setPort(0)
|
||||||
|
.setNumHandlers(2).setVerbose(false).build();
|
||||||
|
server.start();
|
||||||
|
addr = NetUtils.getConnectAddress(server);
|
||||||
|
|
||||||
|
TestProtocol2 proxy = RPC.getProxy(TestProtocol2.class,
|
||||||
|
TestProtocol2.versionID, addr, conf);
|
||||||
|
boolean supported = RpcClientUtil.isMethodSupported(proxy,
|
||||||
|
TestProtocol2.class, RPC.RpcKind.RPC_WRITABLE,
|
||||||
|
RPC.getProtocolVersion(TestProtocol2.class), "echo");
|
||||||
|
Assert.assertTrue(supported);
|
||||||
|
supported = RpcClientUtil.isMethodSupported(proxy,
|
||||||
|
TestProtocol2.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
|
||||||
|
RPC.getProtocolVersion(TestProtocol2.class), "echo");
|
||||||
|
Assert.assertFalse(supported);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Verify that ProtocolMetaInfoServerSideTranslatorPB correctly looks up
|
||||||
|
* the server registry to extract protocol signatures and versions.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testProtocolMetaInfoSSTranslatorPB() throws Exception {
|
||||||
|
TestImpl1 impl = new TestImpl1();
|
||||||
|
server = new RPC.Builder(conf).setProtocol(TestProtocol1.class)
|
||||||
|
.setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2)
|
||||||
|
.setVerbose(false).build();
|
||||||
|
server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
|
||||||
|
server.start();
|
||||||
|
|
||||||
|
ProtocolMetaInfoServerSideTranslatorPB xlator =
|
||||||
|
new ProtocolMetaInfoServerSideTranslatorPB(server);
|
||||||
|
|
||||||
|
GetProtocolSignatureResponseProto resp = xlator.getProtocolSignature(
|
||||||
|
null,
|
||||||
|
createGetProtocolSigRequestProto(TestProtocol1.class,
|
||||||
|
RPC.RpcKind.RPC_PROTOCOL_BUFFER));
|
||||||
|
//No signatures should be found
|
||||||
|
Assert.assertEquals(0, resp.getProtocolSignatureCount());
|
||||||
|
resp = xlator.getProtocolSignature(
|
||||||
|
null,
|
||||||
|
createGetProtocolSigRequestProto(TestProtocol1.class,
|
||||||
|
RPC.RpcKind.RPC_WRITABLE));
|
||||||
|
Assert.assertEquals(1, resp.getProtocolSignatureCount());
|
||||||
|
ProtocolSignatureProto sig = resp.getProtocolSignatureList().get(0);
|
||||||
|
Assert.assertEquals(TestProtocol1.versionID, sig.getVersion());
|
||||||
|
boolean found = false;
|
||||||
|
int expected = ProtocolSignature.getFingerprint(TestProtocol1.class
|
||||||
|
.getMethod("echo", String.class));
|
||||||
|
for (int m : sig.getMethodsList()) {
|
||||||
|
if (expected == m) {
|
||||||
|
found = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Assert.assertTrue(found);
|
||||||
|
}
|
||||||
|
|
||||||
|
private GetProtocolSignatureRequestProto createGetProtocolSigRequestProto(
|
||||||
|
Class<?> protocol, RPC.RpcKind rpcKind) {
|
||||||
|
GetProtocolSignatureRequestProto.Builder builder =
|
||||||
|
GetProtocolSignatureRequestProto.newBuilder();
|
||||||
|
builder.setProtocol(protocol.getName());
|
||||||
|
builder.setRpcKind(rpcKind.toString());
|
||||||
|
return builder.build();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,6 +18,8 @@
|
||||||
package org.apache.hadoop.ipc;
|
package org.apache.hadoop.ipc;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.*;
|
||||||
|
import org.apache.hadoop.ipc.TestRPC.TestProtocol;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
|
@ -28,13 +30,11 @@ import java.net.ConnectException;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.nio.channels.ClosedByInterruptException;
|
import java.nio.channels.ClosedByInterruptException;
|
||||||
|
|
||||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
|
|
||||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* tests that the proxy can be interrupted
|
* tests that the proxy can be interrupted
|
||||||
*/
|
*/
|
||||||
public class TestRPCWaitForProxy extends TestRpcBase {
|
public class TestRPCWaitForProxy extends Assert {
|
||||||
|
private static final String ADDRESS = "0.0.0.0";
|
||||||
private static final Logger
|
private static final Logger
|
||||||
LOG = LoggerFactory.getLogger(TestRPCWaitForProxy.class);
|
LOG = LoggerFactory.getLogger(TestRPCWaitForProxy.class);
|
||||||
|
|
||||||
|
@ -46,15 +46,14 @@ public class TestRPCWaitForProxy extends TestRpcBase {
|
||||||
*
|
*
|
||||||
* @throws Throwable any exception other than that which was expected
|
* @throws Throwable any exception other than that which was expected
|
||||||
*/
|
*/
|
||||||
@Test(timeout = 50000)
|
@Test(timeout = 10000)
|
||||||
public void testWaitForProxy() throws Throwable {
|
public void testWaitForProxy() throws Throwable {
|
||||||
RpcThread worker = new RpcThread(0);
|
RpcThread worker = new RpcThread(0);
|
||||||
worker.start();
|
worker.start();
|
||||||
worker.join();
|
worker.join();
|
||||||
Throwable caught = worker.getCaught();
|
Throwable caught = worker.getCaught();
|
||||||
Throwable cause = caught.getCause();
|
assertNotNull("No exception was raised", caught);
|
||||||
Assert.assertNotNull("No exception was raised", cause);
|
if (!(caught instanceof ConnectException)) {
|
||||||
if (!(cause instanceof ConnectException)) {
|
|
||||||
throw caught;
|
throw caught;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -70,11 +69,11 @@ public class TestRPCWaitForProxy extends TestRpcBase {
|
||||||
RpcThread worker = new RpcThread(100);
|
RpcThread worker = new RpcThread(100);
|
||||||
worker.start();
|
worker.start();
|
||||||
Thread.sleep(1000);
|
Thread.sleep(1000);
|
||||||
Assert.assertTrue("worker hasn't started", worker.waitStarted);
|
assertTrue("worker hasn't started", worker.waitStarted);
|
||||||
worker.interrupt();
|
worker.interrupt();
|
||||||
worker.join();
|
worker.join();
|
||||||
Throwable caught = worker.getCaught();
|
Throwable caught = worker.getCaught();
|
||||||
Assert.assertNotNull("No exception was raised", caught);
|
assertNotNull("No exception was raised", caught);
|
||||||
// looking for the root cause here, which can be wrapped
|
// looking for the root cause here, which can be wrapped
|
||||||
// as part of the NetUtils work. Having this test look
|
// as part of the NetUtils work. Having this test look
|
||||||
// a the type of exception there would be brittle to improvements
|
// a the type of exception there would be brittle to improvements
|
||||||
|
@ -83,8 +82,6 @@ public class TestRPCWaitForProxy extends TestRpcBase {
|
||||||
if (cause == null) {
|
if (cause == null) {
|
||||||
// no inner cause, use outer exception as root cause.
|
// no inner cause, use outer exception as root cause.
|
||||||
cause = caught;
|
cause = caught;
|
||||||
} else if (cause.getCause() != null) {
|
|
||||||
cause = cause.getCause();
|
|
||||||
}
|
}
|
||||||
if (!(cause instanceof InterruptedIOException)
|
if (!(cause instanceof InterruptedIOException)
|
||||||
&& !(cause instanceof ClosedByInterruptException)) {
|
&& !(cause instanceof ClosedByInterruptException)) {
|
||||||
|
@ -115,16 +112,12 @@ public class TestRPCWaitForProxy extends TestRpcBase {
|
||||||
IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
|
IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
|
||||||
connectRetries);
|
connectRetries);
|
||||||
waitStarted = true;
|
waitStarted = true;
|
||||||
|
TestProtocol proxy = RPC.waitForProxy(TestProtocol.class,
|
||||||
short invalidPort = 20;
|
TestProtocol.versionID,
|
||||||
InetSocketAddress invalidAddress = new InetSocketAddress(ADDRESS,
|
new InetSocketAddress(ADDRESS, 20),
|
||||||
invalidPort);
|
config,
|
||||||
TestRpcBase.TestRpcService proxy = RPC.getProxy(
|
15000L);
|
||||||
TestRpcBase.TestRpcService.class,
|
proxy.echo("");
|
||||||
1L, invalidAddress, conf);
|
|
||||||
// Test echo method
|
|
||||||
proxy.echo(null, newEchoRequest("hello"));
|
|
||||||
|
|
||||||
} catch (Throwable throwable) {
|
} catch (Throwable throwable) {
|
||||||
caught = throwable;
|
caught = throwable;
|
||||||
}
|
}
|
||||||
|
|
|
@ -112,8 +112,7 @@ public class TestRpcBase {
|
||||||
return setupTestServer(builder);
|
return setupTestServer(builder);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected static RPC.Server setupTestServer(
|
protected static RPC.Server setupTestServer(RPC.Builder builder) throws IOException {
|
||||||
RPC.Builder builder) throws IOException {
|
|
||||||
RPC.Server server = builder.build();
|
RPC.Server server = builder.build();
|
||||||
|
|
||||||
server.start();
|
server.start();
|
||||||
|
@ -176,21 +175,17 @@ public class TestRpcBase {
|
||||||
public TestTokenIdentifier() {
|
public TestTokenIdentifier() {
|
||||||
this(new Text(), new Text());
|
this(new Text(), new Text());
|
||||||
}
|
}
|
||||||
|
|
||||||
public TestTokenIdentifier(Text tokenid) {
|
public TestTokenIdentifier(Text tokenid) {
|
||||||
this(tokenid, new Text());
|
this(tokenid, new Text());
|
||||||
}
|
}
|
||||||
|
|
||||||
public TestTokenIdentifier(Text tokenid, Text realUser) {
|
public TestTokenIdentifier(Text tokenid, Text realUser) {
|
||||||
this.tokenid = tokenid == null ? new Text() : tokenid;
|
this.tokenid = tokenid == null ? new Text() : tokenid;
|
||||||
this.realUser = realUser == null ? new Text() : realUser;
|
this.realUser = realUser == null ? new Text() : realUser;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Text getKind() {
|
public Text getKind() {
|
||||||
return KIND_NAME;
|
return KIND_NAME;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public UserGroupInformation getUser() {
|
public UserGroupInformation getUser() {
|
||||||
if (realUser.toString().isEmpty()) {
|
if (realUser.toString().isEmpty()) {
|
||||||
|
@ -208,7 +203,6 @@ public class TestRpcBase {
|
||||||
tokenid.readFields(in);
|
tokenid.readFields(in);
|
||||||
realUser.readFields(in);
|
realUser.readFields(in);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void write(DataOutput out) throws IOException {
|
public void write(DataOutput out) throws IOException {
|
||||||
tokenid.write(out);
|
tokenid.write(out);
|
||||||
|
@ -240,7 +234,7 @@ public class TestRpcBase {
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
@Override
|
@Override
|
||||||
public Token<TestTokenIdentifier> selectToken(Text service,
|
public Token<TestTokenIdentifier> selectToken(Text service,
|
||||||
Collection<Token<? extends TokenIdentifier>> tokens) {
|
Collection<Token<? extends TokenIdentifier>> tokens) {
|
||||||
if (service == null) {
|
if (service == null) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
@ -394,17 +388,19 @@ public class TestRpcBase {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public TestProtos.UserResponseProto getAuthUser(
|
public TestProtos.AuthUserResponseProto getAuthUser(
|
||||||
RpcController controller, TestProtos.EmptyRequestProto request)
|
RpcController controller, TestProtos.EmptyRequestProto request)
|
||||||
throws ServiceException {
|
throws ServiceException {
|
||||||
UserGroupInformation authUser;
|
UserGroupInformation authUser = null;
|
||||||
try {
|
try {
|
||||||
authUser = UserGroupInformation.getCurrentUser();
|
authUser = UserGroupInformation.getCurrentUser();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new ServiceException(e);
|
throw new ServiceException(e);
|
||||||
}
|
}
|
||||||
|
|
||||||
return newUserResponse(authUser.getUserName());
|
return TestProtos.AuthUserResponseProto.newBuilder()
|
||||||
|
.setAuthUser(authUser.getUserName())
|
||||||
|
.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -436,34 +432,6 @@ public class TestRpcBase {
|
||||||
|
|
||||||
return TestProtos.EmptyResponseProto.newBuilder().build();
|
return TestProtos.EmptyResponseProto.newBuilder().build();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public TestProtos.UserResponseProto getCurrentUser(
|
|
||||||
RpcController controller,
|
|
||||||
TestProtos.EmptyRequestProto request) throws ServiceException {
|
|
||||||
String user;
|
|
||||||
try {
|
|
||||||
user = UserGroupInformation.getCurrentUser().toString();
|
|
||||||
} catch (IOException e) {
|
|
||||||
throw new ServiceException("Failed to get current user", e);
|
|
||||||
}
|
|
||||||
|
|
||||||
return newUserResponse(user);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public TestProtos.UserResponseProto getServerRemoteUser(
|
|
||||||
RpcController controller,
|
|
||||||
TestProtos.EmptyRequestProto request) throws ServiceException {
|
|
||||||
String serverRemoteUser = Server.getRemoteUser().toString();
|
|
||||||
return newUserResponse(serverRemoteUser);
|
|
||||||
}
|
|
||||||
|
|
||||||
private TestProtos.UserResponseProto newUserResponse(String user) {
|
|
||||||
return TestProtos.UserResponseProto.newBuilder()
|
|
||||||
.setUser(user)
|
|
||||||
.build();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
protected static TestProtos.EmptyRequestProto newEmptyRequest() {
|
protected static TestProtos.EmptyRequestProto newEmptyRequest() {
|
||||||
|
@ -510,4 +478,8 @@ public class TestRpcBase {
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected static String convert(TestProtos.AuthUserResponseProto response) {
|
||||||
|
return response.getAuthUser();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,55 +45,30 @@ import org.junit.runner.RunWith;
|
||||||
import org.junit.runners.Parameterized;
|
import org.junit.runners.Parameterized;
|
||||||
import org.junit.runners.Parameterized.Parameters;
|
import org.junit.runners.Parameterized.Parameters;
|
||||||
|
|
||||||
import javax.security.auth.callback.Callback;
|
import javax.security.auth.callback.*;
|
||||||
import javax.security.auth.callback.CallbackHandler;
|
import javax.security.sasl.*;
|
||||||
import javax.security.auth.callback.NameCallback;
|
|
||||||
import javax.security.auth.callback.PasswordCallback;
|
|
||||||
import javax.security.auth.callback.UnsupportedCallbackException;
|
|
||||||
import javax.security.sasl.AuthorizeCallback;
|
|
||||||
import javax.security.sasl.Sasl;
|
|
||||||
import javax.security.sasl.SaslClient;
|
|
||||||
import javax.security.sasl.SaslException;
|
|
||||||
import javax.security.sasl.SaslServer;
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.lang.annotation.Annotation;
|
import java.lang.annotation.Annotation;
|
||||||
import java.net.InetAddress;
|
import java.net.InetAddress;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.security.PrivilegedExceptionAction;
|
import java.security.PrivilegedExceptionAction;
|
||||||
import java.security.Security;
|
import java.security.Security;
|
||||||
import java.util.ArrayList;
|
import java.util.*;
|
||||||
import java.util.Collection;
|
import java.util.concurrent.*;
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Set;
|
|
||||||
import java.util.concurrent.Callable;
|
|
||||||
import java.util.concurrent.ExecutorService;
|
|
||||||
import java.util.concurrent.Executors;
|
|
||||||
import java.util.concurrent.Future;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
import java.util.concurrent.TimeoutException;
|
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION;
|
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION;
|
||||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
|
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
|
||||||
import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.KERBEROS;
|
import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.*;
|
||||||
import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.SIMPLE;
|
import static org.junit.Assert.*;
|
||||||
import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.TOKEN;
|
|
||||||
import static org.junit.Assert.assertEquals;
|
|
||||||
import static org.junit.Assert.assertFalse;
|
|
||||||
import static org.junit.Assert.assertNotNull;
|
|
||||||
import static org.junit.Assert.assertNotSame;
|
|
||||||
import static org.junit.Assert.assertNull;
|
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
import static org.junit.Assert.fail;
|
|
||||||
|
|
||||||
/** Unit tests for using Sasl over RPC. */
|
/** Unit tests for using Sasl over RPC. */
|
||||||
@RunWith(Parameterized.class)
|
@RunWith(Parameterized.class)
|
||||||
public class TestSaslRPC extends TestRpcBase {
|
public class TestSaslRPC extends TestRpcBase {
|
||||||
@Parameters
|
@Parameters
|
||||||
public static Collection<Object[]> data() {
|
public static Collection<Object[]> data() {
|
||||||
Collection<Object[]> params = new ArrayList<>();
|
Collection<Object[]> params = new ArrayList<Object[]>();
|
||||||
for (QualityOfProtection qop : QualityOfProtection.values()) {
|
for (QualityOfProtection qop : QualityOfProtection.values()) {
|
||||||
params.add(new Object[]{ new QualityOfProtection[]{qop},qop, null });
|
params.add(new Object[]{ new QualityOfProtection[]{qop},qop, null });
|
||||||
}
|
}
|
||||||
|
@ -139,7 +114,7 @@ public class TestSaslRPC extends TestRpcBase {
|
||||||
NONE(),
|
NONE(),
|
||||||
VALID(),
|
VALID(),
|
||||||
INVALID(),
|
INVALID(),
|
||||||
OTHER()
|
OTHER();
|
||||||
}
|
}
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
|
@ -255,7 +230,7 @@ public class TestSaslRPC extends TestRpcBase {
|
||||||
final Server server = setupTestServer(conf, 5, sm);
|
final Server server = setupTestServer(conf, 5, sm);
|
||||||
doDigestRpc(server, sm);
|
doDigestRpc(server, sm);
|
||||||
} finally {
|
} finally {
|
||||||
SecurityUtil.setSecurityInfoProviders();
|
SecurityUtil.setSecurityInfoProviders(new SecurityInfo[0]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -284,7 +259,7 @@ public class TestSaslRPC extends TestRpcBase {
|
||||||
addr = NetUtils.getConnectAddress(server);
|
addr = NetUtils.getConnectAddress(server);
|
||||||
TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
|
TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
|
||||||
.getUserName()));
|
.getUserName()));
|
||||||
Token<TestTokenIdentifier> token = new Token<>(tokenId, sm);
|
Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId, sm);
|
||||||
SecurityUtil.setTokenService(token, addr);
|
SecurityUtil.setTokenService(token, addr);
|
||||||
current.addToken(token);
|
current.addToken(token);
|
||||||
|
|
||||||
|
@ -321,8 +296,8 @@ public class TestSaslRPC extends TestRpcBase {
|
||||||
|
|
||||||
// set doPing to true
|
// set doPing to true
|
||||||
newConf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, true);
|
newConf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, true);
|
||||||
ConnectionId remoteId = ConnectionId.getConnectionId(
|
ConnectionId remoteId = ConnectionId.getConnectionId(new InetSocketAddress(0),
|
||||||
new InetSocketAddress(0), TestRpcService.class, null, 0, null, newConf);
|
TestRpcService.class, null, 0, null, newConf);
|
||||||
assertEquals(CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT,
|
assertEquals(CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT,
|
||||||
remoteId.getPingInterval());
|
remoteId.getPingInterval());
|
||||||
// set doPing to false
|
// set doPing to false
|
||||||
|
@ -831,13 +806,13 @@ public class TestSaslRPC extends TestRpcBase {
|
||||||
final TestTokenSecretManager sm = new TestTokenSecretManager();
|
final TestTokenSecretManager sm = new TestTokenSecretManager();
|
||||||
boolean useSecretManager = (serverAuth != SIMPLE);
|
boolean useSecretManager = (serverAuth != SIMPLE);
|
||||||
if (enableSecretManager != null) {
|
if (enableSecretManager != null) {
|
||||||
useSecretManager &= enableSecretManager;
|
useSecretManager &= enableSecretManager.booleanValue();
|
||||||
}
|
}
|
||||||
if (forceSecretManager != null) {
|
if (forceSecretManager != null) {
|
||||||
useSecretManager |= forceSecretManager;
|
useSecretManager |= forceSecretManager.booleanValue();
|
||||||
}
|
}
|
||||||
final SecretManager<?> serverSm = useSecretManager ? sm : null;
|
final SecretManager<?> serverSm = useSecretManager ? sm : null;
|
||||||
|
|
||||||
Server server = serverUgi.doAs(new PrivilegedExceptionAction<Server>() {
|
Server server = serverUgi.doAs(new PrivilegedExceptionAction<Server>() {
|
||||||
@Override
|
@Override
|
||||||
public Server run() throws IOException {
|
public Server run() throws IOException {
|
||||||
|
@ -892,13 +867,13 @@ public class TestSaslRPC extends TestRpcBase {
|
||||||
proxy.ping(null, newEmptyRequest());
|
proxy.ping(null, newEmptyRequest());
|
||||||
// make sure the other side thinks we are who we said we are!!!
|
// make sure the other side thinks we are who we said we are!!!
|
||||||
assertEquals(clientUgi.getUserName(),
|
assertEquals(clientUgi.getUserName(),
|
||||||
proxy.getAuthUser(null, newEmptyRequest()).getUser());
|
convert(proxy.getAuthUser(null, newEmptyRequest())));
|
||||||
AuthMethod authMethod =
|
AuthMethod authMethod =
|
||||||
convert(proxy.getAuthMethod(null, newEmptyRequest()));
|
convert(proxy.getAuthMethod(null, newEmptyRequest()));
|
||||||
// verify sasl completed with correct QOP
|
// verify sasl completed with correct QOP
|
||||||
assertEquals((authMethod != SIMPLE) ? expectedQop.saslQop : null,
|
assertEquals((authMethod != SIMPLE) ? expectedQop.saslQop : null,
|
||||||
RPC.getConnectionIdForProxy(proxy).getSaslQop());
|
RPC.getConnectionIdForProxy(proxy).getSaslQop());
|
||||||
return authMethod != null ? authMethod.toString() : null;
|
return authMethod.toString();
|
||||||
} catch (ServiceException se) {
|
} catch (ServiceException se) {
|
||||||
if (se.getCause() instanceof RemoteException) {
|
if (se.getCause() instanceof RemoteException) {
|
||||||
throw (RemoteException) se.getCause();
|
throw (RemoteException) se.getCause();
|
||||||
|
@ -923,18 +898,21 @@ public class TestSaslRPC extends TestRpcBase {
|
||||||
String actual) {
|
String actual) {
|
||||||
assertEquals(expect.toString(), actual);
|
assertEquals(expect.toString(), actual);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void assertAuthEquals(Pattern expect, String actual) {
|
private static void assertAuthEquals(Pattern expect,
|
||||||
|
String actual) {
|
||||||
// this allows us to see the regexp and the value it didn't match
|
// this allows us to see the regexp and the value it didn't match
|
||||||
if (!expect.matcher(actual).matches()) {
|
if (!expect.matcher(actual).matches()) {
|
||||||
fail(); // it failed
|
assertEquals(expect, actual); // it failed
|
||||||
|
} else {
|
||||||
|
assertTrue(true); // it matched
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Class used to test overriding QOP values using SaslPropertiesResolver
|
* Class used to test overriding QOP values using SaslPropertiesResolver
|
||||||
*/
|
*/
|
||||||
static class AuthSaslPropertiesResolver extends SaslPropertiesResolver {
|
static class AuthSaslPropertiesResolver extends SaslPropertiesResolver{
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Map<String, String> getServerProperties(InetAddress address) {
|
public Map<String, String> getServerProperties(InetAddress address) {
|
||||||
|
@ -943,7 +921,7 @@ public class TestSaslRPC extends TestRpcBase {
|
||||||
return newPropertes;
|
return newPropertes;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void main(String[] args) throws Exception {
|
public static void main(String[] args) throws Exception {
|
||||||
System.out.println("Testing Kerberos authentication over RPC");
|
System.out.println("Testing Kerberos authentication over RPC");
|
||||||
if (args.length != 2) {
|
if (args.length != 2) {
|
||||||
|
|
|
@ -17,35 +17,40 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.security;
|
package org.apache.hadoop.security;
|
||||||
|
|
||||||
import com.google.protobuf.ServiceException;
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
|
||||||
import org.apache.hadoop.io.Text;
|
|
||||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
|
||||||
import org.apache.hadoop.ipc.RPC;
|
|
||||||
import org.apache.hadoop.ipc.Server;
|
|
||||||
import org.apache.hadoop.ipc.TestRpcBase;
|
|
||||||
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
|
||||||
import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
|
|
||||||
import org.apache.hadoop.security.authorize.ProxyUsers;
|
|
||||||
import org.apache.hadoop.security.token.Token;
|
|
||||||
import org.junit.Assert;
|
|
||||||
import org.junit.Before;
|
|
||||||
import org.junit.Test;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.InetAddress;
|
import java.net.InetAddress;
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
import java.net.NetworkInterface;
|
import java.net.NetworkInterface;
|
||||||
import java.security.PrivilegedExceptionAction;
|
import java.security.PrivilegedExceptionAction;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Enumeration;
|
import java.util.Enumeration;
|
||||||
|
|
||||||
|
import org.junit.Assert;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.io.Text;
|
||||||
|
import org.apache.hadoop.ipc.ProtocolSignature;
|
||||||
|
import org.apache.hadoop.ipc.RPC;
|
||||||
|
import org.apache.hadoop.ipc.Server;
|
||||||
|
import org.apache.hadoop.ipc.VersionedProtocol;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
|
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
||||||
|
import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
|
||||||
|
import org.apache.hadoop.security.authorize.ProxyUsers;
|
||||||
|
import org.apache.hadoop.security.token.Token;
|
||||||
|
import org.apache.hadoop.security.token.TokenInfo;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.apache.hadoop.ipc.TestRpcBase.TestTokenSecretManager;
|
||||||
|
import org.apache.hadoop.ipc.TestRpcBase.TestTokenIdentifier;
|
||||||
|
import org.apache.hadoop.ipc.TestRpcBase.TestTokenSelector;
|
||||||
|
import org.apache.commons.logging.*;
|
||||||
|
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test do as effective user.
|
*
|
||||||
*/
|
*/
|
||||||
public class TestDoAsEffectiveUser extends TestRpcBase {
|
public class TestDoAsEffectiveUser {
|
||||||
final private static String REAL_USER_NAME = "realUser1@HADOOP.APACHE.ORG";
|
final private static String REAL_USER_NAME = "realUser1@HADOOP.APACHE.ORG";
|
||||||
final private static String REAL_USER_SHORT_NAME = "realUser1";
|
final private static String REAL_USER_SHORT_NAME = "realUser1";
|
||||||
final private static String PROXY_USER_NAME = "proxyUser";
|
final private static String PROXY_USER_NAME = "proxyUser";
|
||||||
|
@ -53,8 +58,8 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
||||||
final private static String GROUP2_NAME = "group2";
|
final private static String GROUP2_NAME = "group2";
|
||||||
final private static String[] GROUP_NAMES = new String[] { GROUP1_NAME,
|
final private static String[] GROUP_NAMES = new String[] { GROUP1_NAME,
|
||||||
GROUP2_NAME };
|
GROUP2_NAME };
|
||||||
|
private static final String ADDRESS = "0.0.0.0";
|
||||||
private TestRpcService client;
|
private TestProtocol proxy;
|
||||||
private static final Configuration masterConf = new Configuration();
|
private static final Configuration masterConf = new Configuration();
|
||||||
|
|
||||||
|
|
||||||
|
@ -77,7 +82,7 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
||||||
|
|
||||||
private void configureSuperUserIPAddresses(Configuration conf,
|
private void configureSuperUserIPAddresses(Configuration conf,
|
||||||
String superUserShortName) throws IOException {
|
String superUserShortName) throws IOException {
|
||||||
ArrayList<String> ipList = new ArrayList<>();
|
ArrayList<String> ipList = new ArrayList<String>();
|
||||||
Enumeration<NetworkInterface> netInterfaceList = NetworkInterface
|
Enumeration<NetworkInterface> netInterfaceList = NetworkInterface
|
||||||
.getNetworkInterfaces();
|
.getNetworkInterfaces();
|
||||||
while (netInterfaceList.hasMoreElements()) {
|
while (netInterfaceList.hasMoreElements()) {
|
||||||
|
@ -125,19 +130,50 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
||||||
curUGI.toString());
|
curUGI.toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
private void checkRemoteUgi(final UserGroupInformation ugi,
|
@TokenInfo(TestTokenSelector.class)
|
||||||
final Configuration conf) throws Exception {
|
public interface TestProtocol extends VersionedProtocol {
|
||||||
|
public static final long versionID = 1L;
|
||||||
|
|
||||||
|
String aMethod() throws IOException;
|
||||||
|
String getServerRemoteUser() throws IOException;
|
||||||
|
}
|
||||||
|
|
||||||
|
public class TestImpl implements TestProtocol {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String aMethod() throws IOException {
|
||||||
|
return UserGroupInformation.getCurrentUser().toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getServerRemoteUser() throws IOException {
|
||||||
|
return Server.getRemoteUser().toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getProtocolVersion(String protocol, long clientVersion)
|
||||||
|
throws IOException {
|
||||||
|
return TestProtocol.versionID;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ProtocolSignature getProtocolSignature(String protocol,
|
||||||
|
long clientVersion, int clientMethodsHash) throws IOException {
|
||||||
|
return new ProtocolSignature(TestProtocol.versionID, null);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void checkRemoteUgi(final Server server,
|
||||||
|
final UserGroupInformation ugi, final Configuration conf)
|
||||||
|
throws Exception {
|
||||||
ugi.doAs(new PrivilegedExceptionAction<Void>() {
|
ugi.doAs(new PrivilegedExceptionAction<Void>() {
|
||||||
@Override
|
@Override
|
||||||
public Void run() throws ServiceException {
|
public Void run() throws IOException {
|
||||||
client = getClient(addr, conf);
|
proxy = RPC.getProxy(
|
||||||
String currentUser = client.getCurrentUser(null,
|
TestProtocol.class, TestProtocol.versionID,
|
||||||
newEmptyRequest()).getUser();
|
NetUtils.getConnectAddress(server), conf);
|
||||||
String serverRemoteUser = client.getServerRemoteUser(null,
|
Assert.assertEquals(ugi.toString(), proxy.aMethod());
|
||||||
newEmptyRequest()).getUser();
|
Assert.assertEquals(ugi.toString(), proxy.getServerRemoteUser());
|
||||||
|
|
||||||
Assert.assertEquals(ugi.toString(), currentUser);
|
|
||||||
Assert.assertEquals(ugi.toString(), serverRemoteUser);
|
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -149,27 +185,29 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
||||||
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
|
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
|
||||||
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1");
|
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1");
|
||||||
configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
|
configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
|
||||||
// Set RPC engine to protobuf RPC engine
|
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
|
||||||
RPC.setProtocolEngine(conf, TestRpcService.class,
|
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
|
||||||
ProtobufRpcEngine.class);
|
.setNumHandlers(5).setVerbose(true).build();
|
||||||
UserGroupInformation.setConfiguration(conf);
|
|
||||||
final Server server = setupTestServer(conf, 5);
|
|
||||||
|
|
||||||
refreshConf(conf);
|
refreshConf(conf);
|
||||||
try {
|
try {
|
||||||
|
server.start();
|
||||||
|
|
||||||
UserGroupInformation realUserUgi = UserGroupInformation
|
UserGroupInformation realUserUgi = UserGroupInformation
|
||||||
.createRemoteUser(REAL_USER_NAME);
|
.createRemoteUser(REAL_USER_NAME);
|
||||||
checkRemoteUgi(realUserUgi, conf);
|
checkRemoteUgi(server, realUserUgi, conf);
|
||||||
|
|
||||||
UserGroupInformation proxyUserUgi =
|
UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
|
||||||
UserGroupInformation.createProxyUserForTesting(
|
|
||||||
PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
|
PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
|
||||||
checkRemoteUgi(proxyUserUgi, conf);
|
checkRemoteUgi(server, proxyUserUgi, conf);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
Assert.fail();
|
Assert.fail();
|
||||||
} finally {
|
} finally {
|
||||||
stop(server, client);
|
server.stop();
|
||||||
|
if (proxy != null) {
|
||||||
|
RPC.stopProxy(proxy);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -180,25 +218,29 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
||||||
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
|
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
|
||||||
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
|
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
|
||||||
"group1");
|
"group1");
|
||||||
RPC.setProtocolEngine(conf, TestRpcService.class,
|
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
|
||||||
ProtobufRpcEngine.class);
|
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
|
||||||
UserGroupInformation.setConfiguration(conf);
|
.setNumHandlers(2).setVerbose(false).build();
|
||||||
final Server server = setupTestServer(conf, 5);
|
|
||||||
|
|
||||||
refreshConf(conf);
|
refreshConf(conf);
|
||||||
try {
|
try {
|
||||||
|
server.start();
|
||||||
|
|
||||||
UserGroupInformation realUserUgi = UserGroupInformation
|
UserGroupInformation realUserUgi = UserGroupInformation
|
||||||
.createRemoteUser(REAL_USER_NAME);
|
.createRemoteUser(REAL_USER_NAME);
|
||||||
checkRemoteUgi(realUserUgi, conf);
|
checkRemoteUgi(server, realUserUgi, conf);
|
||||||
|
|
||||||
UserGroupInformation proxyUserUgi = UserGroupInformation
|
UserGroupInformation proxyUserUgi = UserGroupInformation
|
||||||
.createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
|
.createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
|
||||||
checkRemoteUgi(proxyUserUgi, conf);
|
checkRemoteUgi(server, proxyUserUgi, conf);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
Assert.fail();
|
Assert.fail();
|
||||||
} finally {
|
} finally {
|
||||||
stop(server, client);
|
server.stop();
|
||||||
|
if (proxy != null) {
|
||||||
|
RPC.stopProxy(proxy);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -214,14 +256,17 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
||||||
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
|
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
|
||||||
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
|
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
|
||||||
"group1");
|
"group1");
|
||||||
RPC.setProtocolEngine(conf, TestRpcService.class,
|
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
|
||||||
ProtobufRpcEngine.class);
|
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
|
||||||
UserGroupInformation.setConfiguration(conf);
|
.setNumHandlers(2).setVerbose(false).build();
|
||||||
final Server server = setupTestServer(conf, 5);
|
|
||||||
|
|
||||||
refreshConf(conf);
|
refreshConf(conf);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
server.start();
|
||||||
|
|
||||||
|
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
||||||
|
|
||||||
UserGroupInformation realUserUgi = UserGroupInformation
|
UserGroupInformation realUserUgi = UserGroupInformation
|
||||||
.createRemoteUser(REAL_USER_NAME);
|
.createRemoteUser(REAL_USER_NAME);
|
||||||
|
|
||||||
|
@ -230,10 +275,11 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
||||||
String retVal = proxyUserUgi
|
String retVal = proxyUserUgi
|
||||||
.doAs(new PrivilegedExceptionAction<String>() {
|
.doAs(new PrivilegedExceptionAction<String>() {
|
||||||
@Override
|
@Override
|
||||||
public String run() throws ServiceException {
|
public String run() throws IOException {
|
||||||
client = getClient(addr, conf);
|
proxy = RPC.getProxy(TestProtocol.class,
|
||||||
return client.getCurrentUser(null,
|
TestProtocol.versionID, addr, conf);
|
||||||
newEmptyRequest()).getUser();
|
String ret = proxy.aMethod();
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -241,7 +287,10 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
} finally {
|
} finally {
|
||||||
stop(server, client);
|
server.stop();
|
||||||
|
if (proxy != null) {
|
||||||
|
RPC.stopProxy(proxy);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -250,14 +299,17 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
||||||
final Configuration conf = new Configuration();
|
final Configuration conf = new Configuration();
|
||||||
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
|
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
|
||||||
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1");
|
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1");
|
||||||
RPC.setProtocolEngine(conf, TestRpcService.class,
|
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
|
||||||
ProtobufRpcEngine.class);
|
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
|
||||||
UserGroupInformation.setConfiguration(conf);
|
.setNumHandlers(2).setVerbose(false).build();
|
||||||
final Server server = setupTestServer(conf, 2);
|
|
||||||
|
|
||||||
refreshConf(conf);
|
refreshConf(conf);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
server.start();
|
||||||
|
|
||||||
|
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
||||||
|
|
||||||
UserGroupInformation realUserUgi = UserGroupInformation
|
UserGroupInformation realUserUgi = UserGroupInformation
|
||||||
.createRemoteUser(REAL_USER_NAME);
|
.createRemoteUser(REAL_USER_NAME);
|
||||||
|
|
||||||
|
@ -266,10 +318,11 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
||||||
String retVal = proxyUserUgi
|
String retVal = proxyUserUgi
|
||||||
.doAs(new PrivilegedExceptionAction<String>() {
|
.doAs(new PrivilegedExceptionAction<String>() {
|
||||||
@Override
|
@Override
|
||||||
public String run() throws ServiceException {
|
public String run() throws IOException {
|
||||||
client = getClient(addr, conf);
|
proxy = RPC.getProxy(TestProtocol.class,
|
||||||
return client.getCurrentUser(null,
|
TestProtocol.versionID, addr, conf);
|
||||||
newEmptyRequest()).getUser();
|
String ret = proxy.aMethod();
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -277,7 +330,10 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
} finally {
|
} finally {
|
||||||
stop(server, client);
|
server.stop();
|
||||||
|
if (proxy != null) {
|
||||||
|
RPC.stopProxy(proxy);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -285,12 +341,15 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
||||||
public void testRealUserGroupNotSpecified() throws IOException {
|
public void testRealUserGroupNotSpecified() throws IOException {
|
||||||
final Configuration conf = new Configuration();
|
final Configuration conf = new Configuration();
|
||||||
configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
|
configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
|
||||||
RPC.setProtocolEngine(conf, TestRpcService.class,
|
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
|
||||||
ProtobufRpcEngine.class);
|
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
|
||||||
UserGroupInformation.setConfiguration(conf);
|
.setNumHandlers(2).setVerbose(false).build();
|
||||||
final Server server = setupTestServer(conf, 2);
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
server.start();
|
||||||
|
|
||||||
|
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
||||||
|
|
||||||
UserGroupInformation realUserUgi = UserGroupInformation
|
UserGroupInformation realUserUgi = UserGroupInformation
|
||||||
.createRemoteUser(REAL_USER_NAME);
|
.createRemoteUser(REAL_USER_NAME);
|
||||||
|
|
||||||
|
@ -299,10 +358,11 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
||||||
String retVal = proxyUserUgi
|
String retVal = proxyUserUgi
|
||||||
.doAs(new PrivilegedExceptionAction<String>() {
|
.doAs(new PrivilegedExceptionAction<String>() {
|
||||||
@Override
|
@Override
|
||||||
public String run() throws ServiceException {
|
public String run() throws IOException {
|
||||||
client = getClient(addr, conf);
|
proxy = (TestProtocol) RPC.getProxy(TestProtocol.class,
|
||||||
return client.getCurrentUser(null,
|
TestProtocol.versionID, addr, conf);
|
||||||
newEmptyRequest()).getUser();
|
String ret = proxy.aMethod();
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -310,7 +370,10 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
} finally {
|
} finally {
|
||||||
stop(server, client);
|
server.stop();
|
||||||
|
if (proxy != null) {
|
||||||
|
RPC.stopProxy(proxy);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -321,14 +384,17 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
||||||
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
|
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
|
||||||
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
|
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
|
||||||
"group3");
|
"group3");
|
||||||
RPC.setProtocolEngine(conf, TestRpcService.class,
|
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
|
||||||
ProtobufRpcEngine.class);
|
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
|
||||||
UserGroupInformation.setConfiguration(conf);
|
.setNumHandlers(2).setVerbose(false).build();
|
||||||
final Server server = setupTestServer(conf, 2);
|
|
||||||
|
|
||||||
refreshConf(conf);
|
refreshConf(conf);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
server.start();
|
||||||
|
|
||||||
|
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
||||||
|
|
||||||
UserGroupInformation realUserUgi = UserGroupInformation
|
UserGroupInformation realUserUgi = UserGroupInformation
|
||||||
.createRemoteUser(REAL_USER_NAME);
|
.createRemoteUser(REAL_USER_NAME);
|
||||||
|
|
||||||
|
@ -337,10 +403,11 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
||||||
String retVal = proxyUserUgi
|
String retVal = proxyUserUgi
|
||||||
.doAs(new PrivilegedExceptionAction<String>() {
|
.doAs(new PrivilegedExceptionAction<String>() {
|
||||||
@Override
|
@Override
|
||||||
public String run() throws ServiceException {
|
public String run() throws IOException {
|
||||||
client = getClient(addr, conf);
|
proxy = RPC.getProxy(TestProtocol.class,
|
||||||
return client.getCurrentUser(null,
|
TestProtocol.versionID, addr, conf);
|
||||||
newEmptyRequest()).getUser();
|
String ret = proxy.aMethod();
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -348,7 +415,10 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
} finally {
|
} finally {
|
||||||
stop(server, client);
|
server.stop();
|
||||||
|
if (proxy != null) {
|
||||||
|
RPC.stopProxy(proxy);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -362,17 +432,20 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
||||||
final Configuration conf = new Configuration(masterConf);
|
final Configuration conf = new Configuration(masterConf);
|
||||||
TestTokenSecretManager sm = new TestTokenSecretManager();
|
TestTokenSecretManager sm = new TestTokenSecretManager();
|
||||||
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
|
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
|
||||||
RPC.setProtocolEngine(conf, TestRpcService.class,
|
|
||||||
ProtobufRpcEngine.class);
|
|
||||||
UserGroupInformation.setConfiguration(conf);
|
UserGroupInformation.setConfiguration(conf);
|
||||||
final Server server = setupTestServer(conf, 5, sm);
|
final Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
|
||||||
|
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
|
||||||
|
.setNumHandlers(5).setVerbose(true).setSecretManager(sm).build();
|
||||||
|
|
||||||
|
server.start();
|
||||||
|
|
||||||
final UserGroupInformation current = UserGroupInformation
|
final UserGroupInformation current = UserGroupInformation
|
||||||
.createRemoteUser(REAL_USER_NAME);
|
.createRemoteUser(REAL_USER_NAME);
|
||||||
|
|
||||||
|
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
||||||
TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
|
TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
|
||||||
.getUserName()), new Text("SomeSuperUser"));
|
.getUserName()), new Text("SomeSuperUser"));
|
||||||
Token<TestTokenIdentifier> token = new Token<>(tokenId,
|
Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId,
|
||||||
sm);
|
sm);
|
||||||
SecurityUtil.setTokenService(token, addr);
|
SecurityUtil.setTokenService(token, addr);
|
||||||
UserGroupInformation proxyUserUgi = UserGroupInformation
|
UserGroupInformation proxyUserUgi = UserGroupInformation
|
||||||
|
@ -380,19 +453,23 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
||||||
proxyUserUgi.addToken(token);
|
proxyUserUgi.addToken(token);
|
||||||
|
|
||||||
refreshConf(conf);
|
refreshConf(conf);
|
||||||
|
|
||||||
String retVal = proxyUserUgi.doAs(new PrivilegedExceptionAction<String>() {
|
String retVal = proxyUserUgi.doAs(new PrivilegedExceptionAction<String>() {
|
||||||
@Override
|
@Override
|
||||||
public String run() throws Exception {
|
public String run() throws Exception {
|
||||||
try {
|
try {
|
||||||
client = getClient(addr, conf);
|
proxy = RPC.getProxy(TestProtocol.class,
|
||||||
return client.getCurrentUser(null,
|
TestProtocol.versionID, addr, conf);
|
||||||
newEmptyRequest()).getUser();
|
String ret = proxy.aMethod();
|
||||||
|
return ret;
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
throw e;
|
throw e;
|
||||||
} finally {
|
} finally {
|
||||||
stop(server, client);
|
server.stop();
|
||||||
|
if (proxy != null) {
|
||||||
|
RPC.stopProxy(proxy);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -409,34 +486,42 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
||||||
TestTokenSecretManager sm = new TestTokenSecretManager();
|
TestTokenSecretManager sm = new TestTokenSecretManager();
|
||||||
final Configuration newConf = new Configuration(masterConf);
|
final Configuration newConf = new Configuration(masterConf);
|
||||||
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, newConf);
|
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, newConf);
|
||||||
// Set RPC engine to protobuf RPC engine
|
|
||||||
RPC.setProtocolEngine(newConf, TestRpcService.class,
|
|
||||||
ProtobufRpcEngine.class);
|
|
||||||
UserGroupInformation.setConfiguration(newConf);
|
UserGroupInformation.setConfiguration(newConf);
|
||||||
final Server server = setupTestServer(newConf, 5, sm);
|
final Server server = new RPC.Builder(newConf)
|
||||||
|
.setProtocol(TestProtocol.class).setInstance(new TestImpl())
|
||||||
|
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
|
||||||
|
.setSecretManager(sm).build();
|
||||||
|
|
||||||
|
server.start();
|
||||||
|
|
||||||
final UserGroupInformation current = UserGroupInformation
|
final UserGroupInformation current = UserGroupInformation
|
||||||
.createUserForTesting(REAL_USER_NAME, GROUP_NAMES);
|
.createUserForTesting(REAL_USER_NAME, GROUP_NAMES);
|
||||||
|
|
||||||
refreshConf(newConf);
|
refreshConf(newConf);
|
||||||
|
|
||||||
|
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
||||||
TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
|
TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
|
||||||
.getUserName()), new Text("SomeSuperUser"));
|
.getUserName()), new Text("SomeSuperUser"));
|
||||||
Token<TestTokenIdentifier> token = new Token<>(tokenId, sm);
|
Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId,
|
||||||
|
sm);
|
||||||
SecurityUtil.setTokenService(token, addr);
|
SecurityUtil.setTokenService(token, addr);
|
||||||
current.addToken(token);
|
current.addToken(token);
|
||||||
String retVal = current.doAs(new PrivilegedExceptionAction<String>() {
|
String retVal = current.doAs(new PrivilegedExceptionAction<String>() {
|
||||||
@Override
|
@Override
|
||||||
public String run() throws Exception {
|
public String run() throws Exception {
|
||||||
try {
|
try {
|
||||||
client = getClient(addr, newConf);
|
proxy = RPC.getProxy(TestProtocol.class,
|
||||||
return client.getCurrentUser(null,
|
TestProtocol.versionID, addr, newConf);
|
||||||
newEmptyRequest()).getUser();
|
String ret = proxy.aMethod();
|
||||||
|
return ret;
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
throw e;
|
throw e;
|
||||||
} finally {
|
} finally {
|
||||||
stop(server, client);
|
server.stop();
|
||||||
|
if (proxy != null) {
|
||||||
|
RPC.stopProxy(proxy);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
|
@ -20,7 +20,6 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.ipc.TestRpcBase.TestTokenIdentifier;
|
|
||||||
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
|
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
|
||||||
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
|
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
|
||||||
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
||||||
|
@ -29,11 +28,7 @@ import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.security.token.TokenIdentifier;
|
import org.apache.hadoop.security.token.TokenIdentifier;
|
||||||
import org.apache.hadoop.util.Shell;
|
import org.apache.hadoop.util.Shell;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.junit.After;
|
import org.junit.*;
|
||||||
import org.junit.Assert;
|
|
||||||
import org.junit.Before;
|
|
||||||
import org.junit.BeforeClass;
|
|
||||||
import org.junit.Test;
|
|
||||||
|
|
||||||
import javax.security.auth.Subject;
|
import javax.security.auth.Subject;
|
||||||
import javax.security.auth.kerberos.KerberosPrincipal;
|
import javax.security.auth.kerberos.KerberosPrincipal;
|
||||||
|
@ -55,22 +50,9 @@ import java.util.Set;
|
||||||
|
|
||||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS;
|
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS;
|
||||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL;
|
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL;
|
||||||
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
|
import static org.apache.hadoop.ipc.TestSaslRPC.*;
|
||||||
import static org.apache.hadoop.test.MetricsAsserts.assertCounterGt;
|
import static org.apache.hadoop.test.MetricsAsserts.*;
|
||||||
import static org.apache.hadoop.test.MetricsAsserts.assertGaugeGt;
|
import static org.junit.Assert.*;
|
||||||
import static org.apache.hadoop.test.MetricsAsserts.assertQuantileGauges;
|
|
||||||
import static org.apache.hadoop.test.MetricsAsserts.getDoubleGauge;
|
|
||||||
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
|
|
||||||
import static org.junit.Assert.assertArrayEquals;
|
|
||||||
import static org.junit.Assert.assertEquals;
|
|
||||||
import static org.junit.Assert.assertFalse;
|
|
||||||
import static org.junit.Assert.assertNotEquals;
|
|
||||||
import static org.junit.Assert.assertNotNull;
|
|
||||||
import static org.junit.Assert.assertNotSame;
|
|
||||||
import static org.junit.Assert.assertNull;
|
|
||||||
import static org.junit.Assert.assertSame;
|
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
import static org.junit.Assert.fail;
|
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
import static org.mockito.Mockito.when;
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
|
@ -127,7 +109,7 @@ public class TestUserGroupInformation {
|
||||||
UserGroupInformation.setLoginUser(null);
|
UserGroupInformation.setLoginUser(null);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout = 30000)
|
@Test (timeout = 30000)
|
||||||
public void testSimpleLogin() throws IOException {
|
public void testSimpleLogin() throws IOException {
|
||||||
tryLoginAuthenticationMethod(AuthenticationMethod.SIMPLE, true);
|
tryLoginAuthenticationMethod(AuthenticationMethod.SIMPLE, true);
|
||||||
}
|
}
|
||||||
|
|
|
@ -91,7 +91,6 @@ public class TestNodeHealthScriptRunner {
|
||||||
public void testNodeHealthScript() throws Exception {
|
public void testNodeHealthScript() throws Exception {
|
||||||
String errorScript = "echo ERROR\n echo \"Tracker not healthy\"";
|
String errorScript = "echo ERROR\n echo \"Tracker not healthy\"";
|
||||||
String normalScript = "echo \"I am all fine\"";
|
String normalScript = "echo \"I am all fine\"";
|
||||||
String failWithExitCodeScript = "echo \"Not healthy\"; exit -1";
|
|
||||||
String timeOutScript =
|
String timeOutScript =
|
||||||
Shell.WINDOWS ? "@echo off\nping -n 4 127.0.0.1 >nul\necho \"I am fine\""
|
Shell.WINDOWS ? "@echo off\nping -n 4 127.0.0.1 >nul\necho \"I am fine\""
|
||||||
: "sleep 4\necho \"I am fine\"";
|
: "sleep 4\necho \"I am fine\"";
|
||||||
|
@ -125,12 +124,6 @@ public class TestNodeHealthScriptRunner {
|
||||||
nodeHealthScriptRunner.isHealthy());
|
nodeHealthScriptRunner.isHealthy());
|
||||||
Assert.assertEquals("", nodeHealthScriptRunner.getHealthReport());
|
Assert.assertEquals("", nodeHealthScriptRunner.getHealthReport());
|
||||||
|
|
||||||
// Script which fails with exit code.
|
|
||||||
writeNodeHealthScriptFile(failWithExitCodeScript, true);
|
|
||||||
timerTask.run();
|
|
||||||
Assert.assertFalse("Node health status reported healthy",
|
|
||||||
nodeHealthScriptRunner.isHealthy());
|
|
||||||
|
|
||||||
// Timeout script.
|
// Timeout script.
|
||||||
writeNodeHealthScriptFile(timeOutScript, true);
|
writeNodeHealthScriptFile(timeOutScript, true);
|
||||||
timerTask.run();
|
timerTask.run();
|
||||||
|
|
|
@ -88,6 +88,6 @@ message AuthMethodResponseProto {
|
||||||
required string mechanismName = 2;
|
required string mechanismName = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message UserResponseProto {
|
message AuthUserResponseProto {
|
||||||
required string user = 1;
|
required string authUser = 1;
|
||||||
}
|
}
|
|
@ -40,11 +40,9 @@ service TestProtobufRpcProto {
|
||||||
rpc exchange(ExchangeRequestProto) returns (ExchangeResponseProto);
|
rpc exchange(ExchangeRequestProto) returns (ExchangeResponseProto);
|
||||||
rpc sleep(SleepRequestProto) returns (EmptyResponseProto);
|
rpc sleep(SleepRequestProto) returns (EmptyResponseProto);
|
||||||
rpc getAuthMethod(EmptyRequestProto) returns (AuthMethodResponseProto);
|
rpc getAuthMethod(EmptyRequestProto) returns (AuthMethodResponseProto);
|
||||||
rpc getAuthUser(EmptyRequestProto) returns (UserResponseProto);
|
rpc getAuthUser(EmptyRequestProto) returns (AuthUserResponseProto);
|
||||||
rpc echoPostponed(EchoRequestProto) returns (EchoResponseProto);
|
rpc echoPostponed(EchoRequestProto) returns (EchoResponseProto);
|
||||||
rpc sendPostponed(EmptyRequestProto) returns (EmptyResponseProto);
|
rpc sendPostponed(EmptyRequestProto) returns (EmptyResponseProto);
|
||||||
rpc getCurrentUser(EmptyRequestProto) returns (UserResponseProto);
|
|
||||||
rpc getServerRemoteUser(EmptyRequestProto) returns (UserResponseProto);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
service TestProtobufRpc2Proto {
|
service TestProtobufRpc2Proto {
|
||||||
|
|
|
@ -533,7 +533,8 @@ public class DFSInputStream extends FSInputStream
|
||||||
* Open a DataInputStream to a DataNode so that it can be read from.
|
* Open a DataInputStream to a DataNode so that it can be read from.
|
||||||
* We get block ID and the IDs of the destinations at startup, from the namenode.
|
* We get block ID and the IDs of the destinations at startup, from the namenode.
|
||||||
*/
|
*/
|
||||||
private synchronized DatanodeInfo blockSeekTo(long target) throws IOException {
|
private synchronized DatanodeInfo blockSeekTo(long target)
|
||||||
|
throws IOException {
|
||||||
if (target >= getFileLength()) {
|
if (target >= getFileLength()) {
|
||||||
throw new IOException("Attempted to read past end of file");
|
throw new IOException("Attempted to read past end of file");
|
||||||
}
|
}
|
||||||
|
@ -962,14 +963,14 @@ public class DFSInputStream extends FSInputStream
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void fetchBlockByteRange(LocatedBlock block, long start, long end,
|
protected void fetchBlockByteRange(LocatedBlock block, long start, long end,
|
||||||
byte[] buf, int offset, CorruptedBlocks corruptedBlocks)
|
ByteBuffer buf, CorruptedBlocks corruptedBlocks)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
block = refreshLocatedBlock(block);
|
block = refreshLocatedBlock(block);
|
||||||
while (true) {
|
while (true) {
|
||||||
DNAddrPair addressPair = chooseDataNode(block, null);
|
DNAddrPair addressPair = chooseDataNode(block, null);
|
||||||
try {
|
try {
|
||||||
actualGetFromOneDataNode(addressPair, block, start, end,
|
actualGetFromOneDataNode(addressPair, block, start, end,
|
||||||
buf, offset, corruptedBlocks);
|
buf, corruptedBlocks);
|
||||||
return;
|
return;
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
checkInterrupted(e); // check if the read has been interrupted
|
checkInterrupted(e); // check if the read has been interrupted
|
||||||
|
@ -988,12 +989,10 @@ public class DFSInputStream extends FSInputStream
|
||||||
return new Callable<ByteBuffer>() {
|
return new Callable<ByteBuffer>() {
|
||||||
@Override
|
@Override
|
||||||
public ByteBuffer call() throws Exception {
|
public ByteBuffer call() throws Exception {
|
||||||
byte[] buf = bb.array();
|
|
||||||
int offset = bb.position();
|
|
||||||
try (TraceScope ignored = dfsClient.getTracer().
|
try (TraceScope ignored = dfsClient.getTracer().
|
||||||
newScope("hedgedRead" + hedgedReadId, parentSpanId)) {
|
newScope("hedgedRead" + hedgedReadId, parentSpanId)) {
|
||||||
actualGetFromOneDataNode(datanode, block, start, end, buf,
|
actualGetFromOneDataNode(datanode, block, start, end, bb,
|
||||||
offset, corruptedBlocks);
|
corruptedBlocks);
|
||||||
return bb;
|
return bb;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1007,13 +1006,12 @@ public class DFSInputStream extends FSInputStream
|
||||||
* @param block the located block containing the requested data
|
* @param block the located block containing the requested data
|
||||||
* @param startInBlk the startInBlk offset of the block
|
* @param startInBlk the startInBlk offset of the block
|
||||||
* @param endInBlk the endInBlk offset of the block
|
* @param endInBlk the endInBlk offset of the block
|
||||||
* @param buf the given byte array into which the data is read
|
* @param buf the given byte buffer into which the data is read
|
||||||
* @param offset the offset in buf
|
|
||||||
* @param corruptedBlocks map recording list of datanodes with corrupted
|
* @param corruptedBlocks map recording list of datanodes with corrupted
|
||||||
* block replica
|
* block replica
|
||||||
*/
|
*/
|
||||||
void actualGetFromOneDataNode(final DNAddrPair datanode, LocatedBlock block,
|
void actualGetFromOneDataNode(final DNAddrPair datanode, LocatedBlock block,
|
||||||
final long startInBlk, final long endInBlk, byte[] buf, int offset,
|
final long startInBlk, final long endInBlk, ByteBuffer buf,
|
||||||
CorruptedBlocks corruptedBlocks)
|
CorruptedBlocks corruptedBlocks)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
DFSClientFaultInjector.get().startFetchFromDatanode();
|
DFSClientFaultInjector.get().startFetchFromDatanode();
|
||||||
|
@ -1031,7 +1029,22 @@ public class DFSInputStream extends FSInputStream
|
||||||
DFSClientFaultInjector.get().fetchFromDatanodeException();
|
DFSClientFaultInjector.get().fetchFromDatanodeException();
|
||||||
reader = getBlockReader(block, startInBlk, len, datanode.addr,
|
reader = getBlockReader(block, startInBlk, len, datanode.addr,
|
||||||
datanode.storageType, datanode.info);
|
datanode.storageType, datanode.info);
|
||||||
int nread = reader.readAll(buf, offset, len);
|
|
||||||
|
//Behave exactly as the readAll() call
|
||||||
|
ByteBuffer tmp = buf.duplicate();
|
||||||
|
tmp.limit(tmp.position() + len);
|
||||||
|
tmp = tmp.slice();
|
||||||
|
int nread = 0;
|
||||||
|
int ret;
|
||||||
|
while (true) {
|
||||||
|
ret = reader.read(tmp);
|
||||||
|
if (ret <= 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
nread += ret;
|
||||||
|
}
|
||||||
|
buf.position(buf.position() + nread);
|
||||||
|
|
||||||
IOUtilsClient.updateReadStatistics(readStatistics, nread, reader);
|
IOUtilsClient.updateReadStatistics(readStatistics, nread, reader);
|
||||||
dfsClient.updateFileSystemReadStats(
|
dfsClient.updateFileSystemReadStats(
|
||||||
reader.getNetworkDistance(), nread);
|
reader.getNetworkDistance(), nread);
|
||||||
|
@ -1098,7 +1111,7 @@ public class DFSInputStream extends FSInputStream
|
||||||
* time. We then wait on which ever read returns first.
|
* time. We then wait on which ever read returns first.
|
||||||
*/
|
*/
|
||||||
private void hedgedFetchBlockByteRange(LocatedBlock block, long start,
|
private void hedgedFetchBlockByteRange(LocatedBlock block, long start,
|
||||||
long end, byte[] buf, int offset, CorruptedBlocks corruptedBlocks)
|
long end, ByteBuffer buf, CorruptedBlocks corruptedBlocks)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
final DfsClientConf conf = dfsClient.getConf();
|
final DfsClientConf conf = dfsClient.getConf();
|
||||||
ArrayList<Future<ByteBuffer>> futures = new ArrayList<>();
|
ArrayList<Future<ByteBuffer>> futures = new ArrayList<>();
|
||||||
|
@ -1130,8 +1143,8 @@ public class DFSInputStream extends FSInputStream
|
||||||
conf.getHedgedReadThresholdMillis(), TimeUnit.MILLISECONDS);
|
conf.getHedgedReadThresholdMillis(), TimeUnit.MILLISECONDS);
|
||||||
if (future != null) {
|
if (future != null) {
|
||||||
ByteBuffer result = future.get();
|
ByteBuffer result = future.get();
|
||||||
System.arraycopy(result.array(), result.position(), buf, offset,
|
result.flip();
|
||||||
len);
|
buf.put(result);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
DFSClient.LOG.debug("Waited {}ms to read from {}; spawning hedged "
|
DFSClient.LOG.debug("Waited {}ms to read from {}; spawning hedged "
|
||||||
|
@ -1173,8 +1186,8 @@ public class DFSInputStream extends FSInputStream
|
||||||
// cancel the rest.
|
// cancel the rest.
|
||||||
cancelAll(futures);
|
cancelAll(futures);
|
||||||
dfsClient.getHedgedReadMetrics().incHedgedReadWins();
|
dfsClient.getHedgedReadMetrics().incHedgedReadWins();
|
||||||
System.arraycopy(result.array(), result.position(), buf, offset,
|
result.flip();
|
||||||
len);
|
buf.put(result);
|
||||||
return;
|
return;
|
||||||
} catch (InterruptedException ie) {
|
} catch (InterruptedException ie) {
|
||||||
// Ignore and retry
|
// Ignore and retry
|
||||||
|
@ -1244,7 +1257,8 @@ public class DFSInputStream extends FSInputStream
|
||||||
* access key from its memory since it's considered expired based on
|
* access key from its memory since it's considered expired based on
|
||||||
* the estimated expiration date.
|
* the estimated expiration date.
|
||||||
*/
|
*/
|
||||||
if (ex instanceof InvalidBlockTokenException || ex instanceof InvalidToken) {
|
if (ex instanceof InvalidBlockTokenException ||
|
||||||
|
ex instanceof InvalidToken) {
|
||||||
DFSClient.LOG.info("Access token was invalid when connecting to "
|
DFSClient.LOG.info("Access token was invalid when connecting to "
|
||||||
+ targetAddr + " : " + ex);
|
+ targetAddr + " : " + ex);
|
||||||
return true;
|
return true;
|
||||||
|
@ -1272,7 +1286,8 @@ public class DFSInputStream extends FSInputStream
|
||||||
try (TraceScope scope = dfsClient.
|
try (TraceScope scope = dfsClient.
|
||||||
newReaderTraceScope("DFSInputStream#byteArrayPread",
|
newReaderTraceScope("DFSInputStream#byteArrayPread",
|
||||||
src, position, length)) {
|
src, position, length)) {
|
||||||
int retLen = pread(position, buffer, offset, length);
|
ByteBuffer bb = ByteBuffer.wrap(buffer, offset, length);
|
||||||
|
int retLen = pread(position, bb);
|
||||||
if (retLen < length) {
|
if (retLen < length) {
|
||||||
dfsClient.addRetLenToReaderScope(scope, retLen);
|
dfsClient.addRetLenToReaderScope(scope, retLen);
|
||||||
}
|
}
|
||||||
|
@ -1280,7 +1295,7 @@ public class DFSInputStream extends FSInputStream
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private int pread(long position, byte[] buffer, int offset, int length)
|
private int pread(long position, ByteBuffer buffer)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
// sanity checks
|
// sanity checks
|
||||||
dfsClient.checkOpen();
|
dfsClient.checkOpen();
|
||||||
|
@ -1292,6 +1307,7 @@ public class DFSInputStream extends FSInputStream
|
||||||
if ((position < 0) || (position >= filelen)) {
|
if ((position < 0) || (position >= filelen)) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
int length = buffer.remaining();
|
||||||
int realLen = length;
|
int realLen = length;
|
||||||
if ((position + length) > filelen) {
|
if ((position + length) > filelen) {
|
||||||
realLen = (int)(filelen - position);
|
realLen = (int)(filelen - position);
|
||||||
|
@ -1304,14 +1320,16 @@ public class DFSInputStream extends FSInputStream
|
||||||
CorruptedBlocks corruptedBlocks = new CorruptedBlocks();
|
CorruptedBlocks corruptedBlocks = new CorruptedBlocks();
|
||||||
for (LocatedBlock blk : blockRange) {
|
for (LocatedBlock blk : blockRange) {
|
||||||
long targetStart = position - blk.getStartOffset();
|
long targetStart = position - blk.getStartOffset();
|
||||||
long bytesToRead = Math.min(remaining, blk.getBlockSize() - targetStart);
|
int bytesToRead = (int) Math.min(remaining,
|
||||||
|
blk.getBlockSize() - targetStart);
|
||||||
|
long targetEnd = targetStart + bytesToRead - 1;
|
||||||
try {
|
try {
|
||||||
if (dfsClient.isHedgedReadsEnabled() && !blk.isStriped()) {
|
if (dfsClient.isHedgedReadsEnabled() && !blk.isStriped()) {
|
||||||
hedgedFetchBlockByteRange(blk, targetStart,
|
hedgedFetchBlockByteRange(blk, targetStart,
|
||||||
targetStart + bytesToRead - 1, buffer, offset, corruptedBlocks);
|
targetEnd, buffer, corruptedBlocks);
|
||||||
} else {
|
} else {
|
||||||
fetchBlockByteRange(blk, targetStart, targetStart + bytesToRead - 1,
|
fetchBlockByteRange(blk, targetStart, targetEnd,
|
||||||
buffer, offset, corruptedBlocks);
|
buffer, corruptedBlocks);
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
// Check and report if any block replicas are corrupted.
|
// Check and report if any block replicas are corrupted.
|
||||||
|
@ -1323,7 +1341,6 @@ public class DFSInputStream extends FSInputStream
|
||||||
|
|
||||||
remaining -= bytesToRead;
|
remaining -= bytesToRead;
|
||||||
position += bytesToRead;
|
position += bytesToRead;
|
||||||
offset += bytesToRead;
|
|
||||||
}
|
}
|
||||||
assert remaining == 0 : "Wrong number of bytes read.";
|
assert remaining == 0 : "Wrong number of bytes read.";
|
||||||
return realLen;
|
return realLen;
|
||||||
|
@ -1457,7 +1474,8 @@ public class DFSInputStream extends FSInputStream
|
||||||
* If another node could not be found, then returns false.
|
* If another node could not be found, then returns false.
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public synchronized boolean seekToNewSource(long targetPos) throws IOException {
|
public synchronized boolean seekToNewSource(long targetPos)
|
||||||
|
throws IOException {
|
||||||
if (currentNode == null) {
|
if (currentNode == null) {
|
||||||
return seekToBlockSource(targetPos);
|
return seekToBlockSource(targetPos);
|
||||||
}
|
}
|
||||||
|
|
|
@ -307,8 +307,8 @@ public class DFSStripedInputStream extends DFSInputStream {
|
||||||
stripeLimit - stripeBufOffset);
|
stripeLimit - stripeBufOffset);
|
||||||
|
|
||||||
LocatedStripedBlock blockGroup = (LocatedStripedBlock) currentLocatedBlock;
|
LocatedStripedBlock blockGroup = (LocatedStripedBlock) currentLocatedBlock;
|
||||||
AlignedStripe[] stripes = StripedBlockUtil.divideOneStripe(ecPolicy, cellSize,
|
AlignedStripe[] stripes = StripedBlockUtil.divideOneStripe(ecPolicy,
|
||||||
blockGroup, offsetInBlockGroup,
|
cellSize, blockGroup, offsetInBlockGroup,
|
||||||
offsetInBlockGroup + stripeRange.length - 1, curStripeBuf);
|
offsetInBlockGroup + stripeRange.length - 1, curStripeBuf);
|
||||||
final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(
|
final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(
|
||||||
blockGroup, cellSize, dataBlkNum, parityBlkNum);
|
blockGroup, cellSize, dataBlkNum, parityBlkNum);
|
||||||
|
@ -523,13 +523,13 @@ public class DFSStripedInputStream extends DFSInputStream {
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
protected void fetchBlockByteRange(LocatedBlock block, long start,
|
protected void fetchBlockByteRange(LocatedBlock block, long start,
|
||||||
long end, byte[] buf, int offset, CorruptedBlocks corruptedBlocks)
|
long end, ByteBuffer buf, CorruptedBlocks corruptedBlocks)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
// Refresh the striped block group
|
// Refresh the striped block group
|
||||||
LocatedStripedBlock blockGroup = getBlockGroupAt(block.getStartOffset());
|
LocatedStripedBlock blockGroup = getBlockGroupAt(block.getStartOffset());
|
||||||
|
|
||||||
AlignedStripe[] stripes = StripedBlockUtil.divideByteRangeIntoStripes(
|
AlignedStripe[] stripes = StripedBlockUtil.divideByteRangeIntoStripes(
|
||||||
ecPolicy, cellSize, blockGroup, start, end, buf, offset);
|
ecPolicy, cellSize, blockGroup, start, end, buf);
|
||||||
CompletionService<Void> readService = new ExecutorCompletionService<>(
|
CompletionService<Void> readService = new ExecutorCompletionService<>(
|
||||||
dfsClient.getStripedReadsThreadPool());
|
dfsClient.getStripedReadsThreadPool());
|
||||||
final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(
|
final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(
|
||||||
|
@ -542,6 +542,7 @@ public class DFSStripedInputStream extends DFSInputStream {
|
||||||
blks, preaderInfos, corruptedBlocks);
|
blks, preaderInfos, corruptedBlocks);
|
||||||
preader.readStripe();
|
preader.readStripe();
|
||||||
}
|
}
|
||||||
|
buf.position(buf.position() + (int)(end - start + 1));
|
||||||
} finally {
|
} finally {
|
||||||
for (BlockReaderInfo preaderInfo : preaderInfos) {
|
for (BlockReaderInfo preaderInfo : preaderInfos) {
|
||||||
closeReader(preaderInfo);
|
closeReader(preaderInfo);
|
||||||
|
@ -698,16 +699,15 @@ public class DFSStripedInputStream extends DFSInputStream {
|
||||||
}
|
}
|
||||||
|
|
||||||
private ByteBufferStrategy[] getReadStrategies(StripingChunk chunk) {
|
private ByteBufferStrategy[] getReadStrategies(StripingChunk chunk) {
|
||||||
if (chunk.byteBuffer != null) {
|
if (chunk.useByteBuffer()) {
|
||||||
ByteBufferStrategy strategy =
|
ByteBufferStrategy strategy = new ByteBufferStrategy(
|
||||||
new ByteBufferStrategy(chunk.byteBuffer, readStatistics, dfsClient);
|
chunk.getByteBuffer(), readStatistics, dfsClient);
|
||||||
return new ByteBufferStrategy[]{strategy};
|
return new ByteBufferStrategy[]{strategy};
|
||||||
} else {
|
} else {
|
||||||
ByteBufferStrategy[] strategies =
|
ByteBufferStrategy[] strategies =
|
||||||
new ByteBufferStrategy[chunk.byteArray.getOffsets().length];
|
new ByteBufferStrategy[chunk.getChunkBuffer().getSlices().size()];
|
||||||
for (int i = 0; i < strategies.length; i++) {
|
for (int i = 0; i < strategies.length; i++) {
|
||||||
ByteBuffer buffer = ByteBuffer.wrap(chunk.byteArray.buf(),
|
ByteBuffer buffer = chunk.getChunkBuffer().getSlice(i);
|
||||||
chunk.byteArray.getOffsets()[i], chunk.byteArray.getLengths()[i]);
|
|
||||||
strategies[i] =
|
strategies[i] =
|
||||||
new ByteBufferStrategy(buffer, readStatistics, dfsClient);
|
new ByteBufferStrategy(buffer, readStatistics, dfsClient);
|
||||||
}
|
}
|
||||||
|
@ -814,7 +814,7 @@ public class DFSStripedInputStream extends DFSInputStream {
|
||||||
}
|
}
|
||||||
|
|
||||||
class PositionStripeReader extends StripeReader {
|
class PositionStripeReader extends StripeReader {
|
||||||
private byte[][] decodeInputs = null;
|
private ByteBuffer[] decodeInputs = null;
|
||||||
|
|
||||||
PositionStripeReader(CompletionService<Void> service,
|
PositionStripeReader(CompletionService<Void> service,
|
||||||
AlignedStripe alignedStripe, LocatedBlock[] targetBlocks,
|
AlignedStripe alignedStripe, LocatedBlock[] targetBlocks,
|
||||||
|
@ -836,8 +836,6 @@ public class DFSStripedInputStream extends DFSInputStream {
|
||||||
Preconditions.checkState(index >= dataBlkNum &&
|
Preconditions.checkState(index >= dataBlkNum &&
|
||||||
alignedStripe.chunks[index] == null);
|
alignedStripe.chunks[index] == null);
|
||||||
alignedStripe.chunks[index] = new StripingChunk(decodeInputs[index]);
|
alignedStripe.chunks[index] = new StripingChunk(decodeInputs[index]);
|
||||||
alignedStripe.chunks[index].addByteArraySlice(0,
|
|
||||||
(int) alignedStripe.getSpanInBlock());
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -73,7 +73,8 @@ import java.util.concurrent.TimeUnit;
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class StripedBlockUtil {
|
public class StripedBlockUtil {
|
||||||
|
|
||||||
public static final Logger LOG = LoggerFactory.getLogger(StripedBlockUtil.class);
|
public static final Logger LOG =
|
||||||
|
LoggerFactory.getLogger(StripedBlockUtil.class);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Parses a striped block group into individual blocks.
|
* Parses a striped block group into individual blocks.
|
||||||
|
@ -312,16 +313,17 @@ public class StripedBlockUtil {
|
||||||
* schedule a new fetch request with the decoding input buffer as transfer
|
* schedule a new fetch request with the decoding input buffer as transfer
|
||||||
* destination.
|
* destination.
|
||||||
*/
|
*/
|
||||||
public static byte[][] initDecodeInputs(AlignedStripe alignedStripe,
|
public static ByteBuffer[] initDecodeInputs(AlignedStripe alignedStripe,
|
||||||
int dataBlkNum, int parityBlkNum) {
|
int dataBlkNum, int parityBlkNum) {
|
||||||
byte[][] decodeInputs =
|
ByteBuffer[] decodeInputs = new ByteBuffer[dataBlkNum + parityBlkNum];
|
||||||
new byte[dataBlkNum + parityBlkNum][(int) alignedStripe.getSpanInBlock()];
|
for (int i = 0; i < decodeInputs.length; i++) {
|
||||||
|
decodeInputs[i] = ByteBuffer.allocate(
|
||||||
|
(int) alignedStripe.getSpanInBlock());
|
||||||
|
}
|
||||||
// read the full data aligned stripe
|
// read the full data aligned stripe
|
||||||
for (int i = 0; i < dataBlkNum; i++) {
|
for (int i = 0; i < dataBlkNum; i++) {
|
||||||
if (alignedStripe.chunks[i] == null) {
|
if (alignedStripe.chunks[i] == null) {
|
||||||
alignedStripe.chunks[i] = new StripingChunk(decodeInputs[i]);
|
alignedStripe.chunks[i] = new StripingChunk(decodeInputs[i]);
|
||||||
alignedStripe.chunks[i].addByteArraySlice(0,
|
|
||||||
(int) alignedStripe.getSpanInBlock());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return decodeInputs;
|
return decodeInputs;
|
||||||
|
@ -334,14 +336,21 @@ public class StripedBlockUtil {
|
||||||
* When all pending requests have returned, this method should be called to
|
* When all pending requests have returned, this method should be called to
|
||||||
* finalize decode input buffers.
|
* finalize decode input buffers.
|
||||||
*/
|
*/
|
||||||
public static void finalizeDecodeInputs(final byte[][] decodeInputs,
|
public static void finalizeDecodeInputs(final ByteBuffer[] decodeInputs,
|
||||||
AlignedStripe alignedStripe) {
|
AlignedStripe alignedStripe) {
|
||||||
for (int i = 0; i < alignedStripe.chunks.length; i++) {
|
for (int i = 0; i < alignedStripe.chunks.length; i++) {
|
||||||
final StripingChunk chunk = alignedStripe.chunks[i];
|
final StripingChunk chunk = alignedStripe.chunks[i];
|
||||||
if (chunk != null && chunk.state == StripingChunk.FETCHED) {
|
if (chunk != null && chunk.state == StripingChunk.FETCHED) {
|
||||||
chunk.copyTo(decodeInputs[i]);
|
if (chunk.useChunkBuffer()) {
|
||||||
|
chunk.getChunkBuffer().copyTo(decodeInputs[i]);
|
||||||
|
} else {
|
||||||
|
chunk.getByteBuffer().flip();
|
||||||
|
}
|
||||||
} else if (chunk != null && chunk.state == StripingChunk.ALLZERO) {
|
} else if (chunk != null && chunk.state == StripingChunk.ALLZERO) {
|
||||||
Arrays.fill(decodeInputs[i], (byte) 0);
|
//ZERO it. Will be better handled in other following issue.
|
||||||
|
byte[] emptyBytes = new byte[decodeInputs[i].limit()];
|
||||||
|
decodeInputs[i].put(emptyBytes);
|
||||||
|
decodeInputs[i].flip();
|
||||||
} else {
|
} else {
|
||||||
decodeInputs[i] = null;
|
decodeInputs[i] = null;
|
||||||
}
|
}
|
||||||
|
@ -351,7 +360,7 @@ public class StripedBlockUtil {
|
||||||
/**
|
/**
|
||||||
* Decode based on the given input buffers and erasure coding policy.
|
* Decode based on the given input buffers and erasure coding policy.
|
||||||
*/
|
*/
|
||||||
public static void decodeAndFillBuffer(final byte[][] decodeInputs,
|
public static void decodeAndFillBuffer(final ByteBuffer[] decodeInputs,
|
||||||
AlignedStripe alignedStripe, int dataBlkNum, int parityBlkNum,
|
AlignedStripe alignedStripe, int dataBlkNum, int parityBlkNum,
|
||||||
RawErasureDecoder decoder) {
|
RawErasureDecoder decoder) {
|
||||||
// Step 1: prepare indices and output buffers for missing data units
|
// Step 1: prepare indices and output buffers for missing data units
|
||||||
|
@ -364,8 +373,11 @@ public class StripedBlockUtil {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
decodeIndices = Arrays.copyOf(decodeIndices, pos);
|
decodeIndices = Arrays.copyOf(decodeIndices, pos);
|
||||||
byte[][] decodeOutputs =
|
ByteBuffer[] decodeOutputs = new ByteBuffer[decodeIndices.length];
|
||||||
new byte[decodeIndices.length][(int) alignedStripe.getSpanInBlock()];
|
for (int i = 0; i < decodeOutputs.length; i++) {
|
||||||
|
decodeOutputs[i] = ByteBuffer.allocate(
|
||||||
|
(int) alignedStripe.getSpanInBlock());
|
||||||
|
}
|
||||||
|
|
||||||
// Step 2: decode into prepared output buffers
|
// Step 2: decode into prepared output buffers
|
||||||
decoder.decode(decodeInputs, decodeIndices, decodeOutputs);
|
decoder.decode(decodeInputs, decodeIndices, decodeOutputs);
|
||||||
|
@ -374,8 +386,8 @@ public class StripedBlockUtil {
|
||||||
for (int i = 0; i < decodeIndices.length; i++) {
|
for (int i = 0; i < decodeIndices.length; i++) {
|
||||||
int missingBlkIdx = decodeIndices[i];
|
int missingBlkIdx = decodeIndices[i];
|
||||||
StripingChunk chunk = alignedStripe.chunks[missingBlkIdx];
|
StripingChunk chunk = alignedStripe.chunks[missingBlkIdx];
|
||||||
if (chunk.state == StripingChunk.MISSING) {
|
if (chunk.state == StripingChunk.MISSING && chunk.useChunkBuffer()) {
|
||||||
chunk.copyFrom(decodeOutputs[i]);
|
chunk.getChunkBuffer().copyFrom(decodeOutputs[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -402,7 +414,8 @@ public class StripedBlockUtil {
|
||||||
|
|
||||||
// Step 4: calculate each chunk's position in destination buffer. Since the
|
// Step 4: calculate each chunk's position in destination buffer. Since the
|
||||||
// whole read range is within a single stripe, the logic is simpler here.
|
// whole read range is within a single stripe, the logic is simpler here.
|
||||||
int bufOffset = (int) (rangeStartInBlockGroup % ((long) cellSize * dataBlkNum));
|
int bufOffset =
|
||||||
|
(int) (rangeStartInBlockGroup % ((long) cellSize * dataBlkNum));
|
||||||
for (StripingCell cell : cells) {
|
for (StripingCell cell : cells) {
|
||||||
long cellStart = cell.idxInInternalBlk * cellSize + cell.offset;
|
long cellStart = cell.idxInInternalBlk * cellSize + cell.offset;
|
||||||
long cellEnd = cellStart + cell.size - 1;
|
long cellEnd = cellStart + cell.size - 1;
|
||||||
|
@ -437,15 +450,14 @@ public class StripedBlockUtil {
|
||||||
* @param rangeStartInBlockGroup The byte range's start offset in block group
|
* @param rangeStartInBlockGroup The byte range's start offset in block group
|
||||||
* @param rangeEndInBlockGroup The byte range's end offset in block group
|
* @param rangeEndInBlockGroup The byte range's end offset in block group
|
||||||
* @param buf Destination buffer of the read operation for the byte range
|
* @param buf Destination buffer of the read operation for the byte range
|
||||||
* @param offsetInBuf Start offset into the destination buffer
|
|
||||||
*
|
*
|
||||||
* At most 5 stripes will be generated from each logical range, as
|
* At most 5 stripes will be generated from each logical range, as
|
||||||
* demonstrated in the header of {@link AlignedStripe}.
|
* demonstrated in the header of {@link AlignedStripe}.
|
||||||
*/
|
*/
|
||||||
public static AlignedStripe[] divideByteRangeIntoStripes(ErasureCodingPolicy ecPolicy,
|
public static AlignedStripe[] divideByteRangeIntoStripes(
|
||||||
|
ErasureCodingPolicy ecPolicy,
|
||||||
int cellSize, LocatedStripedBlock blockGroup,
|
int cellSize, LocatedStripedBlock blockGroup,
|
||||||
long rangeStartInBlockGroup, long rangeEndInBlockGroup, byte[] buf,
|
long rangeStartInBlockGroup, long rangeEndInBlockGroup, ByteBuffer buf) {
|
||||||
int offsetInBuf) {
|
|
||||||
|
|
||||||
// Step 0: analyze range and calculate basic parameters
|
// Step 0: analyze range and calculate basic parameters
|
||||||
final int dataBlkNum = ecPolicy.getNumDataUnits();
|
final int dataBlkNum = ecPolicy.getNumDataUnits();
|
||||||
|
@ -462,7 +474,7 @@ public class StripedBlockUtil {
|
||||||
AlignedStripe[] stripes = mergeRangesForInternalBlocks(ecPolicy, ranges);
|
AlignedStripe[] stripes = mergeRangesForInternalBlocks(ecPolicy, ranges);
|
||||||
|
|
||||||
// Step 4: calculate each chunk's position in destination buffer
|
// Step 4: calculate each chunk's position in destination buffer
|
||||||
calcualteChunkPositionsInBuf(cellSize, stripes, cells, buf, offsetInBuf);
|
calcualteChunkPositionsInBuf(cellSize, stripes, cells, buf);
|
||||||
|
|
||||||
// Step 5: prepare ALLZERO blocks
|
// Step 5: prepare ALLZERO blocks
|
||||||
prepareAllZeroChunks(blockGroup, stripes, cellSize, dataBlkNum);
|
prepareAllZeroChunks(blockGroup, stripes, cellSize, dataBlkNum);
|
||||||
|
@ -476,7 +488,8 @@ public class StripedBlockUtil {
|
||||||
* used by {@link DFSStripedOutputStream} in encoding
|
* used by {@link DFSStripedOutputStream} in encoding
|
||||||
*/
|
*/
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
private static StripingCell[] getStripingCellsOfByteRange(ErasureCodingPolicy ecPolicy,
|
private static StripingCell[] getStripingCellsOfByteRange(
|
||||||
|
ErasureCodingPolicy ecPolicy,
|
||||||
int cellSize, LocatedStripedBlock blockGroup,
|
int cellSize, LocatedStripedBlock blockGroup,
|
||||||
long rangeStartInBlockGroup, long rangeEndInBlockGroup) {
|
long rangeStartInBlockGroup, long rangeEndInBlockGroup) {
|
||||||
Preconditions.checkArgument(
|
Preconditions.checkArgument(
|
||||||
|
@ -511,7 +524,8 @@ public class StripedBlockUtil {
|
||||||
* the physical byte range (inclusive) on each stored internal block.
|
* the physical byte range (inclusive) on each stored internal block.
|
||||||
*/
|
*/
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
private static VerticalRange[] getRangesForInternalBlocks(ErasureCodingPolicy ecPolicy,
|
private static VerticalRange[] getRangesForInternalBlocks(
|
||||||
|
ErasureCodingPolicy ecPolicy,
|
||||||
int cellSize, StripingCell[] cells) {
|
int cellSize, StripingCell[] cells) {
|
||||||
int dataBlkNum = ecPolicy.getNumDataUnits();
|
int dataBlkNum = ecPolicy.getNumDataUnits();
|
||||||
int parityBlkNum = ecPolicy.getNumParityUnits();
|
int parityBlkNum = ecPolicy.getNumParityUnits();
|
||||||
|
@ -575,8 +589,7 @@ public class StripedBlockUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void calcualteChunkPositionsInBuf(int cellSize,
|
private static void calcualteChunkPositionsInBuf(int cellSize,
|
||||||
AlignedStripe[] stripes, StripingCell[] cells, byte[] buf,
|
AlignedStripe[] stripes, StripingCell[] cells, ByteBuffer buf) {
|
||||||
int offsetInBuf) {
|
|
||||||
/**
|
/**
|
||||||
* | <--------------- AlignedStripe --------------->|
|
* | <--------------- AlignedStripe --------------->|
|
||||||
*
|
*
|
||||||
|
@ -598,6 +611,7 @@ public class StripedBlockUtil {
|
||||||
for (StripingCell cell : cells) {
|
for (StripingCell cell : cells) {
|
||||||
long cellStart = cell.idxInInternalBlk * cellSize + cell.offset;
|
long cellStart = cell.idxInInternalBlk * cellSize + cell.offset;
|
||||||
long cellEnd = cellStart + cell.size - 1;
|
long cellEnd = cellStart + cell.size - 1;
|
||||||
|
StripingChunk chunk;
|
||||||
for (AlignedStripe s : stripes) {
|
for (AlignedStripe s : stripes) {
|
||||||
long stripeEnd = s.getOffsetInBlock() + s.getSpanInBlock() - 1;
|
long stripeEnd = s.getOffsetInBlock() + s.getSpanInBlock() - 1;
|
||||||
long overlapStart = Math.max(cellStart, s.getOffsetInBlock());
|
long overlapStart = Math.max(cellStart, s.getOffsetInBlock());
|
||||||
|
@ -606,11 +620,13 @@ public class StripedBlockUtil {
|
||||||
if (overLapLen <= 0) {
|
if (overLapLen <= 0) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (s.chunks[cell.idxInStripe] == null) {
|
chunk = s.chunks[cell.idxInStripe];
|
||||||
s.chunks[cell.idxInStripe] = new StripingChunk(buf);
|
if (chunk == null) {
|
||||||
|
chunk = new StripingChunk();
|
||||||
|
s.chunks[cell.idxInStripe] = chunk;
|
||||||
}
|
}
|
||||||
s.chunks[cell.idxInStripe].addByteArraySlice(
|
chunk.getChunkBuffer().addSlice(buf,
|
||||||
(int)(offsetInBuf + done + overlapStart - cellStart), overLapLen);
|
(int) (done + overlapStart - cellStart), overLapLen);
|
||||||
}
|
}
|
||||||
done += cell.size;
|
done += cell.size;
|
||||||
}
|
}
|
||||||
|
@ -833,88 +849,89 @@ public class StripedBlockUtil {
|
||||||
*/
|
*/
|
||||||
public int state = REQUESTED;
|
public int state = REQUESTED;
|
||||||
|
|
||||||
public final ChunkByteArray byteArray;
|
private final ChunkByteBuffer chunkBuffer;
|
||||||
public final ByteBuffer byteBuffer;
|
private final ByteBuffer byteBuffer;
|
||||||
|
|
||||||
public StripingChunk(byte[] buf) {
|
public StripingChunk() {
|
||||||
this.byteArray = new ChunkByteArray(buf);
|
this.chunkBuffer = new ChunkByteBuffer();
|
||||||
byteBuffer = null;
|
byteBuffer = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
public StripingChunk(ByteBuffer buf) {
|
public StripingChunk(ByteBuffer buf) {
|
||||||
this.byteArray = null;
|
this.chunkBuffer = null;
|
||||||
this.byteBuffer = buf;
|
this.byteBuffer = buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
public StripingChunk(int state) {
|
public StripingChunk(int state) {
|
||||||
this.byteArray = null;
|
this.chunkBuffer = null;
|
||||||
this.byteBuffer = null;
|
this.byteBuffer = null;
|
||||||
this.state = state;
|
this.state = state;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void addByteArraySlice(int offset, int length) {
|
public boolean useByteBuffer(){
|
||||||
assert byteArray != null;
|
return byteBuffer != null;
|
||||||
byteArray.offsetsInBuf.add(offset);
|
|
||||||
byteArray.lengthsInBuf.add(length);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void copyTo(byte[] target) {
|
public boolean useChunkBuffer() {
|
||||||
assert byteArray != null;
|
return chunkBuffer != null;
|
||||||
byteArray.copyTo(target);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void copyFrom(byte[] src) {
|
public ByteBuffer getByteBuffer() {
|
||||||
assert byteArray != null;
|
assert byteBuffer != null;
|
||||||
byteArray.copyFrom(src);
|
return byteBuffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
public ChunkByteBuffer getChunkBuffer() {
|
||||||
|
assert chunkBuffer != null;
|
||||||
|
return chunkBuffer;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class ChunkByteArray {
|
/**
|
||||||
private final byte[] buf;
|
* A utility to manage ByteBuffer slices for a reader.
|
||||||
private final List<Integer> offsetsInBuf;
|
*/
|
||||||
private final List<Integer> lengthsInBuf;
|
public static class ChunkByteBuffer {
|
||||||
|
private final List<ByteBuffer> slices;
|
||||||
|
|
||||||
ChunkByteArray(byte[] buf) {
|
ChunkByteBuffer() {
|
||||||
this.buf = buf;
|
this.slices = new ArrayList<>();
|
||||||
this.offsetsInBuf = new ArrayList<>();
|
|
||||||
this.lengthsInBuf = new ArrayList<>();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public int[] getOffsets() {
|
public void addSlice(ByteBuffer buffer, int offset, int len) {
|
||||||
int[] offsets = new int[offsetsInBuf.size()];
|
ByteBuffer tmp = buffer.duplicate();
|
||||||
for (int i = 0; i < offsets.length; i++) {
|
tmp.position(buffer.position() + offset);
|
||||||
offsets[i] = offsetsInBuf.get(i);
|
tmp.limit(buffer.position() + offset + len);
|
||||||
|
slices.add(tmp.slice());
|
||||||
|
}
|
||||||
|
|
||||||
|
public ByteBuffer getSlice(int i) {
|
||||||
|
return slices.get(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
public List<ByteBuffer> getSlices() {
|
||||||
|
return slices;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Note: target will be ready-to-read state after the call.
|
||||||
|
*/
|
||||||
|
void copyTo(ByteBuffer target) {
|
||||||
|
for (ByteBuffer slice : slices) {
|
||||||
|
slice.flip();
|
||||||
|
target.put(slice);
|
||||||
}
|
}
|
||||||
return offsets;
|
target.flip();
|
||||||
}
|
}
|
||||||
|
|
||||||
public int[] getLengths() {
|
void copyFrom(ByteBuffer src) {
|
||||||
int[] lens = new int[this.lengthsInBuf.size()];
|
ByteBuffer tmp;
|
||||||
for (int i = 0; i < lens.length; i++) {
|
int len;
|
||||||
lens[i] = this.lengthsInBuf.get(i);
|
for (ByteBuffer slice : slices) {
|
||||||
}
|
len = slice.remaining();
|
||||||
return lens;
|
tmp = src.duplicate();
|
||||||
}
|
tmp.limit(tmp.position() + len);
|
||||||
|
slice.put(tmp);
|
||||||
public byte[] buf() {
|
src.position(src.position() + len);
|
||||||
return buf;
|
|
||||||
}
|
|
||||||
|
|
||||||
void copyTo(byte[] target) {
|
|
||||||
int posInBuf = 0;
|
|
||||||
for (int i = 0; i < offsetsInBuf.size(); i++) {
|
|
||||||
System.arraycopy(buf, offsetsInBuf.get(i),
|
|
||||||
target, posInBuf, lengthsInBuf.get(i));
|
|
||||||
posInBuf += lengthsInBuf.get(i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void copyFrom(byte[] src) {
|
|
||||||
int srcPos = 0;
|
|
||||||
for (int j = 0; j < offsetsInBuf.size(); j++) {
|
|
||||||
System.arraycopy(src, srcPos, buf, offsetsInBuf.get(j),
|
|
||||||
lengthsInBuf.get(j));
|
|
||||||
srcPos += lengthsInBuf.get(j);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -183,6 +183,7 @@ public class URLConnectionFactory {
|
||||||
return openConnection(url, false);
|
return openConnection(url, false);
|
||||||
} catch (AuthenticationException e) {
|
} catch (AuthenticationException e) {
|
||||||
// Unreachable
|
// Unreachable
|
||||||
|
LOG.error("Open connection {} failed", url, e);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -114,7 +114,7 @@ int hdfsFileGetReadStatistics(hdfsFile file,
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, file->file,
|
jthr = invokeMethod(env, &jVal, INSTANCE, file->file,
|
||||||
"org/apache/hadoop/hdfs/client/HdfsDataInputStream",
|
"org/apache/hadoop/hdfs/client/HdfsDataInputStream",
|
||||||
"getReadStatistics",
|
"getReadStatistics",
|
||||||
"()Lorg/apache/hadoop/hdfs/DFSInputStream$ReadStatistics;");
|
"()Lorg/apache/hadoop/hdfs/ReadStatistics;");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hdfsFileGetReadStatistics: getReadStatistics failed");
|
"hdfsFileGetReadStatistics: getReadStatistics failed");
|
||||||
|
@ -127,7 +127,7 @@ int hdfsFileGetReadStatistics(hdfsFile file,
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
|
jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
|
||||||
"org/apache/hadoop/hdfs/DFSInputStream$ReadStatistics",
|
"org/apache/hadoop/hdfs/ReadStatistics",
|
||||||
"getTotalBytesRead", "()J");
|
"getTotalBytesRead", "()J");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
|
@ -137,7 +137,7 @@ int hdfsFileGetReadStatistics(hdfsFile file,
|
||||||
s->totalBytesRead = jVal.j;
|
s->totalBytesRead = jVal.j;
|
||||||
|
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
|
jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
|
||||||
"org/apache/hadoop/hdfs/DFSInputStream$ReadStatistics",
|
"org/apache/hadoop/hdfs/ReadStatistics",
|
||||||
"getTotalLocalBytesRead", "()J");
|
"getTotalLocalBytesRead", "()J");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
|
@ -147,7 +147,7 @@ int hdfsFileGetReadStatistics(hdfsFile file,
|
||||||
s->totalLocalBytesRead = jVal.j;
|
s->totalLocalBytesRead = jVal.j;
|
||||||
|
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
|
jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
|
||||||
"org/apache/hadoop/hdfs/DFSInputStream$ReadStatistics",
|
"org/apache/hadoop/hdfs/ReadStatistics",
|
||||||
"getTotalShortCircuitBytesRead", "()J");
|
"getTotalShortCircuitBytesRead", "()J");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
|
@ -156,7 +156,7 @@ int hdfsFileGetReadStatistics(hdfsFile file,
|
||||||
}
|
}
|
||||||
s->totalShortCircuitBytesRead = jVal.j;
|
s->totalShortCircuitBytesRead = jVal.j;
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
|
jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
|
||||||
"org/apache/hadoop/hdfs/DFSInputStream$ReadStatistics",
|
"org/apache/hadoop/hdfs/ReadStatistics",
|
||||||
"getTotalZeroCopyBytesRead", "()J");
|
"getTotalZeroCopyBytesRead", "()J");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
|
|
|
@ -129,7 +129,7 @@ function hdfscmd_case
|
||||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
|
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
|
||||||
;;
|
;;
|
||||||
diskbalancer)
|
diskbalancer)
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DiskBalancer
|
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DiskBalancerCLI
|
||||||
hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
|
hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
|
||||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
|
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
|
||||||
;;
|
;;
|
||||||
|
|
|
@ -419,6 +419,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||||
public static final String DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY =
|
public static final String DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY =
|
||||||
"dfs.namenode.read-lock-reporting-threshold-ms";
|
"dfs.namenode.read-lock-reporting-threshold-ms";
|
||||||
public static final long DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT = 5000L;
|
public static final long DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT = 5000L;
|
||||||
|
// Threshold for how long the lock warnings must be suppressed
|
||||||
|
public static final String DFS_LOCK_SUPPRESS_WARNING_INTERVAL_KEY =
|
||||||
|
"dfs.lock.suppress.warning.interval";
|
||||||
|
public static final long DFS_LOCK_SUPPRESS_WARNING_INTERVAL_DEFAULT =
|
||||||
|
10000; //ms
|
||||||
|
|
||||||
public static final String DFS_UPGRADE_DOMAIN_FACTOR = "dfs.namenode.upgrade.domain.factor";
|
public static final String DFS_UPGRADE_DOMAIN_FACTOR = "dfs.namenode.upgrade.domain.factor";
|
||||||
public static final int DFS_UPGRADE_DOMAIN_FACTOR_DEFAULT = DFS_REPLICATION_DEFAULT;
|
public static final int DFS_UPGRADE_DOMAIN_FACTOR_DEFAULT = DFS_REPLICATION_DEFAULT;
|
||||||
|
|
|
@ -0,0 +1,185 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
* <p>
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* <p>
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
|
import java.util.concurrent.locks.Condition;
|
||||||
|
import java.util.concurrent.locks.Lock;
|
||||||
|
import java.util.concurrent.locks.ReentrantLock;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
import org.apache.hadoop.util.Timer;
|
||||||
|
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is a debugging class that can be used by callers to track
|
||||||
|
* whether a specifc lock is being held for too long and periodically
|
||||||
|
* log a warning and stack trace, if so.
|
||||||
|
*
|
||||||
|
* The logged warnings are throttled so that logs are not spammed.
|
||||||
|
*
|
||||||
|
* A new instance of InstrumentedLock can be created for each object
|
||||||
|
* that needs to be instrumented.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
@InterfaceStability.Unstable
|
||||||
|
public class InstrumentedLock implements Lock {
|
||||||
|
|
||||||
|
private final Lock lock;
|
||||||
|
private final Log logger;
|
||||||
|
private final String name;
|
||||||
|
private final Timer clock;
|
||||||
|
|
||||||
|
/** Minimum gap between two lock warnings. */
|
||||||
|
private final long minLoggingGap;
|
||||||
|
/** Threshold for detecting long lock held time. */
|
||||||
|
private final long lockWarningThreshold;
|
||||||
|
|
||||||
|
// Tracking counters for lock statistics.
|
||||||
|
private volatile long lockAcquireTimestamp;
|
||||||
|
private final AtomicLong lastLogTimestamp;
|
||||||
|
private final AtomicLong warningsSuppressed = new AtomicLong(0);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a instrumented lock instance which logs a warning message
|
||||||
|
* when lock held time is above given threshold.
|
||||||
|
*
|
||||||
|
* @param name the identifier of the lock object
|
||||||
|
* @param logger this class does not have its own logger, will log to the
|
||||||
|
* given logger instead
|
||||||
|
* @param minLoggingGapMs the minimum time gap between two log messages,
|
||||||
|
* this is to avoid spamming to many logs
|
||||||
|
* @param lockWarningThresholdMs the time threshold to view lock held
|
||||||
|
* time as being "too long"
|
||||||
|
*/
|
||||||
|
public InstrumentedLock(String name, Log logger, long minLoggingGapMs,
|
||||||
|
long lockWarningThresholdMs) {
|
||||||
|
this(name, logger, new ReentrantLock(),
|
||||||
|
minLoggingGapMs, lockWarningThresholdMs);
|
||||||
|
}
|
||||||
|
|
||||||
|
public InstrumentedLock(String name, Log logger, Lock lock,
|
||||||
|
long minLoggingGapMs, long lockWarningThresholdMs) {
|
||||||
|
this(name, logger, lock,
|
||||||
|
minLoggingGapMs, lockWarningThresholdMs, new Timer());
|
||||||
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
InstrumentedLock(String name, Log logger, Lock lock,
|
||||||
|
long minLoggingGapMs, long lockWarningThresholdMs, Timer clock) {
|
||||||
|
this.name = name;
|
||||||
|
this.lock = lock;
|
||||||
|
this.clock = clock;
|
||||||
|
this.logger = logger;
|
||||||
|
minLoggingGap = minLoggingGapMs;
|
||||||
|
lockWarningThreshold = lockWarningThresholdMs;
|
||||||
|
lastLogTimestamp = new AtomicLong(
|
||||||
|
clock.monotonicNow() - Math.max(minLoggingGap, lockWarningThreshold));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void lock() {
|
||||||
|
lock.lock();
|
||||||
|
lockAcquireTimestamp = clock.monotonicNow();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void lockInterruptibly() throws InterruptedException {
|
||||||
|
lock.lockInterruptibly();
|
||||||
|
lockAcquireTimestamp = clock.monotonicNow();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean tryLock() {
|
||||||
|
if (lock.tryLock()) {
|
||||||
|
lockAcquireTimestamp = clock.monotonicNow();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean tryLock(long time, TimeUnit unit) throws InterruptedException {
|
||||||
|
if (lock.tryLock(time, unit)) {
|
||||||
|
lockAcquireTimestamp = clock.monotonicNow();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void unlock() {
|
||||||
|
long localLockReleaseTime = clock.monotonicNow();
|
||||||
|
long localLockAcquireTime = lockAcquireTimestamp;
|
||||||
|
lock.unlock();
|
||||||
|
check(localLockAcquireTime, localLockReleaseTime);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Condition newCondition() {
|
||||||
|
return lock.newCondition();
|
||||||
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
void logWarning(long lockHeldTime, long suppressed) {
|
||||||
|
logger.warn(String.format("Lock held time above threshold: " +
|
||||||
|
"lock identifier: %s " +
|
||||||
|
"lockHeldTimeMs=%d ms. Suppressed %d lock warnings. " +
|
||||||
|
"The stack trace is: %s" ,
|
||||||
|
name, lockHeldTime, suppressed,
|
||||||
|
StringUtils.getStackTrace(Thread.currentThread())));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Log a warning if the lock was held for too long.
|
||||||
|
*
|
||||||
|
* Should be invoked by the caller immediately AFTER releasing the lock.
|
||||||
|
*
|
||||||
|
* @param acquireTime - timestamp just after acquiring the lock.
|
||||||
|
* @param releaseTime - timestamp just before releasing the lock.
|
||||||
|
*/
|
||||||
|
private void check(long acquireTime, long releaseTime) {
|
||||||
|
if (!logger.isWarnEnabled()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
final long lockHeldTime = releaseTime - acquireTime;
|
||||||
|
if (lockWarningThreshold - lockHeldTime < 0) {
|
||||||
|
long now;
|
||||||
|
long localLastLogTs;
|
||||||
|
do {
|
||||||
|
now = clock.monotonicNow();
|
||||||
|
localLastLogTs = lastLogTimestamp.get();
|
||||||
|
long deltaSinceLastLog = now - localLastLogTs;
|
||||||
|
// check should print log or not
|
||||||
|
if (deltaSinceLastLog - minLoggingGap < 0) {
|
||||||
|
warningsSuppressed.incrementAndGet();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} while (!lastLogTimestamp.compareAndSet(localLastLogTs, now));
|
||||||
|
long suppressed = warningsSuppressed.getAndSet(0);
|
||||||
|
logWarning(lockHeldTime, suppressed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -40,6 +40,7 @@ import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
import java.util.concurrent.Executor;
|
import java.util.concurrent.Executor;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import javax.management.NotCompliantMBeanException;
|
import javax.management.NotCompliantMBeanException;
|
||||||
import javax.management.ObjectName;
|
import javax.management.ObjectName;
|
||||||
|
@ -60,6 +61,7 @@ import org.apache.hadoop.fs.StorageType;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
import org.apache.hadoop.hdfs.ExtendedBlockId;
|
import org.apache.hadoop.hdfs.ExtendedBlockId;
|
||||||
|
import org.apache.hadoop.hdfs.InstrumentedLock;
|
||||||
import org.apache.hadoop.util.AutoCloseableLock;
|
import org.apache.hadoop.util.AutoCloseableLock;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
||||||
|
@ -278,7 +280,13 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
||||||
this.dataStorage = storage;
|
this.dataStorage = storage;
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
this.smallBufferSize = DFSUtilClient.getSmallBufferSize(conf);
|
this.smallBufferSize = DFSUtilClient.getSmallBufferSize(conf);
|
||||||
this.datasetLock = new AutoCloseableLock();
|
this.datasetLock = new AutoCloseableLock(
|
||||||
|
new InstrumentedLock(getClass().getName(), LOG,
|
||||||
|
conf.getTimeDuration(
|
||||||
|
DFSConfigKeys.DFS_LOCK_SUPPRESS_WARNING_INTERVAL_KEY,
|
||||||
|
DFSConfigKeys.DFS_LOCK_SUPPRESS_WARNING_INTERVAL_DEFAULT,
|
||||||
|
TimeUnit.MILLISECONDS),
|
||||||
|
300));
|
||||||
// The number of volumes required for operation is the total number
|
// The number of volumes required for operation is the total number
|
||||||
// of volumes minus the number of failed volumes we can tolerate.
|
// of volumes minus the number of failed volumes we can tolerate.
|
||||||
volFailuresTolerated = datanode.getDnConf().getVolFailuresTolerated();
|
volFailuresTolerated = datanode.getDnConf().getVolFailuresTolerated();
|
||||||
|
|
|
@ -29,7 +29,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
|
import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
|
import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
|
||||||
import org.apache.hadoop.hdfs.tools.DiskBalancer;
|
import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
@ -44,9 +44,10 @@ public class CancelCommand extends Command {
|
||||||
*/
|
*/
|
||||||
public CancelCommand(Configuration conf) {
|
public CancelCommand(Configuration conf) {
|
||||||
super(conf);
|
super(conf);
|
||||||
addValidCommandParameters(DiskBalancer.CANCEL, "Cancels a running plan.");
|
addValidCommandParameters(DiskBalancerCLI.CANCEL,
|
||||||
addValidCommandParameters(DiskBalancer.NODE, "Node to run the command " +
|
"Cancels a running plan.");
|
||||||
"against in node:port format.");
|
addValidCommandParameters(DiskBalancerCLI.NODE,
|
||||||
|
"Node to run the command against in node:port format.");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -57,20 +58,20 @@ public class CancelCommand extends Command {
|
||||||
@Override
|
@Override
|
||||||
public void execute(CommandLine cmd) throws Exception {
|
public void execute(CommandLine cmd) throws Exception {
|
||||||
LOG.info("Executing \"Cancel plan\" command.");
|
LOG.info("Executing \"Cancel plan\" command.");
|
||||||
Preconditions.checkState(cmd.hasOption(DiskBalancer.CANCEL));
|
Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.CANCEL));
|
||||||
verifyCommandOptions(DiskBalancer.CANCEL, cmd);
|
verifyCommandOptions(DiskBalancerCLI.CANCEL, cmd);
|
||||||
|
|
||||||
// We can cancel a plan using datanode address and plan ID
|
// We can cancel a plan using datanode address and plan ID
|
||||||
// that you can read from a datanode using queryStatus
|
// that you can read from a datanode using queryStatus
|
||||||
if(cmd.hasOption(DiskBalancer.NODE)) {
|
if(cmd.hasOption(DiskBalancerCLI.NODE)) {
|
||||||
String nodeAddress = cmd.getOptionValue(DiskBalancer.NODE);
|
String nodeAddress = cmd.getOptionValue(DiskBalancerCLI.NODE);
|
||||||
String planHash = cmd.getOptionValue(DiskBalancer.CANCEL);
|
String planHash = cmd.getOptionValue(DiskBalancerCLI.CANCEL);
|
||||||
cancelPlanUsingHash(nodeAddress, planHash);
|
cancelPlanUsingHash(nodeAddress, planHash);
|
||||||
} else {
|
} else {
|
||||||
// Or you can cancel a plan using the plan file. If the user
|
// Or you can cancel a plan using the plan file. If the user
|
||||||
// points us to the plan file, we can compute the hash as well as read
|
// points us to the plan file, we can compute the hash as well as read
|
||||||
// the address of the datanode from the plan file.
|
// the address of the datanode from the plan file.
|
||||||
String planFile = cmd.getOptionValue(DiskBalancer.CANCEL);
|
String planFile = cmd.getOptionValue(DiskBalancerCLI.CANCEL);
|
||||||
Preconditions.checkArgument(planFile != null && !planFile.isEmpty(),
|
Preconditions.checkArgument(planFile != null && !planFile.isEmpty(),
|
||||||
"Invalid plan file specified.");
|
"Invalid plan file specified.");
|
||||||
String planData = null;
|
String planData = null;
|
||||||
|
@ -142,6 +143,6 @@ public class CancelCommand extends Command {
|
||||||
HelpFormatter helpFormatter = new HelpFormatter();
|
HelpFormatter helpFormatter = new HelpFormatter();
|
||||||
helpFormatter.printHelp("hdfs diskbalancer -cancel <planFile> | -cancel " +
|
helpFormatter.printHelp("hdfs diskbalancer -cancel <planFile> | -cancel " +
|
||||||
"<planID> -node <hostname>",
|
"<planID> -node <hostname>",
|
||||||
header, DiskBalancer.getCancelOptions(), footer);
|
header, DiskBalancerCLI.getCancelOptions(), footer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,7 +43,7 @@ import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
|
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
|
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet;
|
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet;
|
||||||
import org.apache.hadoop.hdfs.tools.DiskBalancer;
|
import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.codehaus.jackson.map.ObjectMapper;
|
import org.codehaus.jackson.map.ObjectMapper;
|
||||||
|
@ -418,7 +418,7 @@ public abstract class Command extends Configured {
|
||||||
* @return default top number of nodes.
|
* @return default top number of nodes.
|
||||||
*/
|
*/
|
||||||
protected int getDefaultTop() {
|
protected int getDefaultTop() {
|
||||||
return DiskBalancer.DEFAULT_TOP;
|
return DiskBalancerCLI.DEFAULT_TOP;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -437,7 +437,7 @@ public abstract class Command extends Configured {
|
||||||
protected int parseTopNodes(final CommandLine cmd, final StrBuilder result) {
|
protected int parseTopNodes(final CommandLine cmd, final StrBuilder result) {
|
||||||
String outputLine = "";
|
String outputLine = "";
|
||||||
int nodes = 0;
|
int nodes = 0;
|
||||||
final String topVal = cmd.getOptionValue(DiskBalancer.TOP);
|
final String topVal = cmd.getOptionValue(DiskBalancerCLI.TOP);
|
||||||
if (StringUtils.isBlank(topVal)) {
|
if (StringUtils.isBlank(topVal)) {
|
||||||
outputLine = String.format(
|
outputLine = String.format(
|
||||||
"No top limit specified, using default top value %d.",
|
"No top limit specified, using default top value %d.",
|
||||||
|
|
|
@ -29,7 +29,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
|
import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
|
import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
|
||||||
import org.apache.hadoop.hdfs.tools.DiskBalancer;
|
import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
@ -46,7 +46,8 @@ public class ExecuteCommand extends Command {
|
||||||
*/
|
*/
|
||||||
public ExecuteCommand(Configuration conf) {
|
public ExecuteCommand(Configuration conf) {
|
||||||
super(conf);
|
super(conf);
|
||||||
addValidCommandParameters(DiskBalancer.EXECUTE, "Executes a given plan.");
|
addValidCommandParameters(DiskBalancerCLI.EXECUTE,
|
||||||
|
"Executes a given plan.");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -57,10 +58,10 @@ public class ExecuteCommand extends Command {
|
||||||
@Override
|
@Override
|
||||||
public void execute(CommandLine cmd) throws Exception {
|
public void execute(CommandLine cmd) throws Exception {
|
||||||
LOG.info("Executing \"execute plan\" command");
|
LOG.info("Executing \"execute plan\" command");
|
||||||
Preconditions.checkState(cmd.hasOption(DiskBalancer.EXECUTE));
|
Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.EXECUTE));
|
||||||
verifyCommandOptions(DiskBalancer.EXECUTE, cmd);
|
verifyCommandOptions(DiskBalancerCLI.EXECUTE, cmd);
|
||||||
|
|
||||||
String planFile = cmd.getOptionValue(DiskBalancer.EXECUTE);
|
String planFile = cmd.getOptionValue(DiskBalancerCLI.EXECUTE);
|
||||||
Preconditions.checkArgument(planFile != null && !planFile.isEmpty(),
|
Preconditions.checkArgument(planFile != null && !planFile.isEmpty(),
|
||||||
"Invalid plan file specified.");
|
"Invalid plan file specified.");
|
||||||
|
|
||||||
|
@ -88,7 +89,7 @@ public class ExecuteCommand extends Command {
|
||||||
String planHash = DigestUtils.shaHex(planData);
|
String planHash = DigestUtils.shaHex(planData);
|
||||||
try {
|
try {
|
||||||
// TODO : Support skipping date check.
|
// TODO : Support skipping date check.
|
||||||
dataNode.submitDiskBalancerPlan(planHash, DiskBalancer.PLAN_VERSION,
|
dataNode.submitDiskBalancerPlan(planHash, DiskBalancerCLI.PLAN_VERSION,
|
||||||
planFile, planData, false);
|
planFile, planData, false);
|
||||||
} catch (DiskBalancerException ex) {
|
} catch (DiskBalancerException ex) {
|
||||||
LOG.error("Submitting plan on {} failed. Result: {}, Message: {}",
|
LOG.error("Submitting plan on {} failed. Result: {}, Message: {}",
|
||||||
|
@ -111,6 +112,6 @@ public class ExecuteCommand extends Command {
|
||||||
|
|
||||||
HelpFormatter helpFormatter = new HelpFormatter();
|
HelpFormatter helpFormatter = new HelpFormatter();
|
||||||
helpFormatter.printHelp("hdfs diskbalancer -execute <planfile>",
|
helpFormatter.printHelp("hdfs diskbalancer -execute <planfile>",
|
||||||
header, DiskBalancer.getExecuteOptions(), footer);
|
header, DiskBalancerCLI.getExecuteOptions(), footer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,7 @@ import com.google.common.base.Preconditions;
|
||||||
import org.apache.commons.cli.CommandLine;
|
import org.apache.commons.cli.CommandLine;
|
||||||
import org.apache.commons.cli.HelpFormatter;
|
import org.apache.commons.cli.HelpFormatter;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.tools.DiskBalancer;
|
import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Help Command prints out detailed help about each command.
|
* Help Command prints out detailed help about each command.
|
||||||
|
@ -37,7 +37,7 @@ public class HelpCommand extends Command {
|
||||||
*/
|
*/
|
||||||
public HelpCommand(Configuration conf) {
|
public HelpCommand(Configuration conf) {
|
||||||
super(conf);
|
super(conf);
|
||||||
addValidCommandParameters(DiskBalancer.HELP, "Help Command");
|
addValidCommandParameters(DiskBalancerCLI.HELP, "Help Command");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -53,9 +53,9 @@ public class HelpCommand extends Command {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
Preconditions.checkState(cmd.hasOption(DiskBalancer.HELP));
|
Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.HELP));
|
||||||
verifyCommandOptions(DiskBalancer.HELP, cmd);
|
verifyCommandOptions(DiskBalancerCLI.HELP, cmd);
|
||||||
String helpCommand = cmd.getOptionValue(DiskBalancer.HELP);
|
String helpCommand = cmd.getOptionValue(DiskBalancerCLI.HELP);
|
||||||
if (helpCommand == null || helpCommand.isEmpty()) {
|
if (helpCommand == null || helpCommand.isEmpty()) {
|
||||||
this.printHelp();
|
this.printHelp();
|
||||||
return;
|
return;
|
||||||
|
@ -65,19 +65,19 @@ public class HelpCommand extends Command {
|
||||||
helpCommand = helpCommand.toLowerCase();
|
helpCommand = helpCommand.toLowerCase();
|
||||||
Command command = null;
|
Command command = null;
|
||||||
switch (helpCommand) {
|
switch (helpCommand) {
|
||||||
case DiskBalancer.PLAN:
|
case DiskBalancerCLI.PLAN:
|
||||||
command = new PlanCommand(getConf());
|
command = new PlanCommand(getConf());
|
||||||
break;
|
break;
|
||||||
case DiskBalancer.EXECUTE:
|
case DiskBalancerCLI.EXECUTE:
|
||||||
command = new ExecuteCommand(getConf());
|
command = new ExecuteCommand(getConf());
|
||||||
break;
|
break;
|
||||||
case DiskBalancer.QUERY:
|
case DiskBalancerCLI.QUERY:
|
||||||
command = new QueryCommand(getConf());
|
command = new QueryCommand(getConf());
|
||||||
break;
|
break;
|
||||||
case DiskBalancer.CANCEL:
|
case DiskBalancerCLI.CANCEL:
|
||||||
command = new CancelCommand(getConf());
|
command = new CancelCommand(getConf());
|
||||||
break;
|
break;
|
||||||
case DiskBalancer.REPORT:
|
case DiskBalancerCLI.REPORT:
|
||||||
command = new ReportCommand(getConf(), null);
|
command = new ReportCommand(getConf(), null);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -102,7 +102,7 @@ public class HelpCommand extends Command {
|
||||||
|
|
||||||
HelpFormatter helpFormatter = new HelpFormatter();
|
HelpFormatter helpFormatter = new HelpFormatter();
|
||||||
helpFormatter.printHelp("hdfs diskbalancer [command] [options]",
|
helpFormatter.printHelp("hdfs diskbalancer [command] [options]",
|
||||||
header, DiskBalancer.getHelpOptions(), "");
|
header, DiskBalancerCLI.getHelpOptions(), "");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.server.diskbalancer.datamodel
|
||||||
.DiskBalancerDataNode;
|
.DiskBalancerDataNode;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
|
import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.planner.Step;
|
import org.apache.hadoop.hdfs.server.diskbalancer.planner.Step;
|
||||||
import org.apache.hadoop.hdfs.tools.DiskBalancer;
|
import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
|
||||||
import java.nio.charset.StandardCharsets;
|
import java.nio.charset.StandardCharsets;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
|
@ -53,18 +53,18 @@ public class PlanCommand extends Command {
|
||||||
this.thresholdPercentage = 1;
|
this.thresholdPercentage = 1;
|
||||||
this.bandwidth = 0;
|
this.bandwidth = 0;
|
||||||
this.maxError = 0;
|
this.maxError = 0;
|
||||||
addValidCommandParameters(DiskBalancer.OUTFILE, "Output directory in " +
|
addValidCommandParameters(DiskBalancerCLI.OUTFILE, "Output directory in " +
|
||||||
"HDFS. The generated plan will be written to a file in this " +
|
"HDFS. The generated plan will be written to a file in this " +
|
||||||
"directory.");
|
"directory.");
|
||||||
addValidCommandParameters(DiskBalancer.BANDWIDTH, "Maximum Bandwidth to " +
|
addValidCommandParameters(DiskBalancerCLI.BANDWIDTH,
|
||||||
"be used while copying.");
|
"Maximum Bandwidth to be used while copying.");
|
||||||
addValidCommandParameters(DiskBalancer.THRESHOLD, "Percentage skew that " +
|
addValidCommandParameters(DiskBalancerCLI.THRESHOLD,
|
||||||
"we tolerate before diskbalancer starts working.");
|
"Percentage skew that we tolerate before diskbalancer starts working.");
|
||||||
addValidCommandParameters(DiskBalancer.MAXERROR, "Max errors to tolerate " +
|
addValidCommandParameters(DiskBalancerCLI.MAXERROR,
|
||||||
"between 2 disks");
|
"Max errors to tolerate between 2 disks");
|
||||||
addValidCommandParameters(DiskBalancer.VERBOSE, "Run plan command in " +
|
addValidCommandParameters(DiskBalancerCLI.VERBOSE, "Run plan command in " +
|
||||||
"verbose mode.");
|
"verbose mode.");
|
||||||
addValidCommandParameters(DiskBalancer.PLAN, "Plan Command");
|
addValidCommandParameters(DiskBalancerCLI.PLAN, "Plan Command");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -77,36 +77,37 @@ public class PlanCommand extends Command {
|
||||||
@Override
|
@Override
|
||||||
public void execute(CommandLine cmd) throws Exception {
|
public void execute(CommandLine cmd) throws Exception {
|
||||||
LOG.debug("Processing Plan Command.");
|
LOG.debug("Processing Plan Command.");
|
||||||
Preconditions.checkState(cmd.hasOption(DiskBalancer.PLAN));
|
Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.PLAN));
|
||||||
verifyCommandOptions(DiskBalancer.PLAN, cmd);
|
verifyCommandOptions(DiskBalancerCLI.PLAN, cmd);
|
||||||
|
|
||||||
if (cmd.getOptionValue(DiskBalancer.PLAN) == null) {
|
if (cmd.getOptionValue(DiskBalancerCLI.PLAN) == null) {
|
||||||
throw new IllegalArgumentException("A node name is required to create a" +
|
throw new IllegalArgumentException("A node name is required to create a" +
|
||||||
" plan.");
|
" plan.");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cmd.hasOption(DiskBalancer.BANDWIDTH)) {
|
if (cmd.hasOption(DiskBalancerCLI.BANDWIDTH)) {
|
||||||
this.bandwidth = Integer.parseInt(cmd.getOptionValue(DiskBalancer
|
this.bandwidth = Integer.parseInt(cmd.getOptionValue(DiskBalancerCLI
|
||||||
.BANDWIDTH));
|
.BANDWIDTH));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cmd.hasOption(DiskBalancer.MAXERROR)) {
|
if (cmd.hasOption(DiskBalancerCLI.MAXERROR)) {
|
||||||
this.maxError = Integer.parseInt(cmd.getOptionValue(DiskBalancer
|
this.maxError = Integer.parseInt(cmd.getOptionValue(DiskBalancerCLI
|
||||||
.MAXERROR));
|
.MAXERROR));
|
||||||
}
|
}
|
||||||
|
|
||||||
readClusterInfo(cmd);
|
readClusterInfo(cmd);
|
||||||
String output = null;
|
String output = null;
|
||||||
if (cmd.hasOption(DiskBalancer.OUTFILE)) {
|
if (cmd.hasOption(DiskBalancerCLI.OUTFILE)) {
|
||||||
output = cmd.getOptionValue(DiskBalancer.OUTFILE);
|
output = cmd.getOptionValue(DiskBalancerCLI.OUTFILE);
|
||||||
}
|
}
|
||||||
setOutputPath(output);
|
setOutputPath(output);
|
||||||
|
|
||||||
// -plan nodename is the command line argument.
|
// -plan nodename is the command line argument.
|
||||||
DiskBalancerDataNode node = getNode(cmd.getOptionValue(DiskBalancer.PLAN));
|
DiskBalancerDataNode node =
|
||||||
|
getNode(cmd.getOptionValue(DiskBalancerCLI.PLAN));
|
||||||
if (node == null) {
|
if (node == null) {
|
||||||
throw new IllegalArgumentException("Unable to find the specified node. " +
|
throw new IllegalArgumentException("Unable to find the specified node. " +
|
||||||
cmd.getOptionValue(DiskBalancer.PLAN));
|
cmd.getOptionValue(DiskBalancerCLI.PLAN));
|
||||||
}
|
}
|
||||||
this.thresholdPercentage = getThresholdPercentage(cmd);
|
this.thresholdPercentage = getThresholdPercentage(cmd);
|
||||||
|
|
||||||
|
@ -124,8 +125,8 @@ public class PlanCommand extends Command {
|
||||||
|
|
||||||
|
|
||||||
try (FSDataOutputStream beforeStream = create(String.format(
|
try (FSDataOutputStream beforeStream = create(String.format(
|
||||||
DiskBalancer.BEFORE_TEMPLATE,
|
DiskBalancerCLI.BEFORE_TEMPLATE,
|
||||||
cmd.getOptionValue(DiskBalancer.PLAN)))) {
|
cmd.getOptionValue(DiskBalancerCLI.PLAN)))) {
|
||||||
beforeStream.write(getCluster().toJson()
|
beforeStream.write(getCluster().toJson()
|
||||||
.getBytes(StandardCharsets.UTF_8));
|
.getBytes(StandardCharsets.UTF_8));
|
||||||
}
|
}
|
||||||
|
@ -133,17 +134,17 @@ public class PlanCommand extends Command {
|
||||||
if (plan != null && plan.getVolumeSetPlans().size() > 0) {
|
if (plan != null && plan.getVolumeSetPlans().size() > 0) {
|
||||||
LOG.info("Writing plan to : {}", getOutputPath());
|
LOG.info("Writing plan to : {}", getOutputPath());
|
||||||
try (FSDataOutputStream planStream = create(String.format(
|
try (FSDataOutputStream planStream = create(String.format(
|
||||||
DiskBalancer.PLAN_TEMPLATE,
|
DiskBalancerCLI.PLAN_TEMPLATE,
|
||||||
cmd.getOptionValue(DiskBalancer.PLAN)))) {
|
cmd.getOptionValue(DiskBalancerCLI.PLAN)))) {
|
||||||
planStream.write(plan.toJson().getBytes(StandardCharsets.UTF_8));
|
planStream.write(plan.toJson().getBytes(StandardCharsets.UTF_8));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
LOG.info("No plan generated. DiskBalancing not needed for node: {} " +
|
LOG.info("No plan generated. DiskBalancing not needed for node: {} " +
|
||||||
"threshold used: {}", cmd.getOptionValue(DiskBalancer.PLAN),
|
"threshold used: {}", cmd.getOptionValue(DiskBalancerCLI.PLAN),
|
||||||
this.thresholdPercentage);
|
this.thresholdPercentage);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cmd.hasOption(DiskBalancer.VERBOSE) && plans.size() > 0) {
|
if (cmd.hasOption(DiskBalancerCLI.VERBOSE) && plans.size() > 0) {
|
||||||
printToScreen(plans);
|
printToScreen(plans);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -162,8 +163,8 @@ public class PlanCommand extends Command {
|
||||||
" will balance the data.";
|
" will balance the data.";
|
||||||
|
|
||||||
HelpFormatter helpFormatter = new HelpFormatter();
|
HelpFormatter helpFormatter = new HelpFormatter();
|
||||||
helpFormatter.printHelp("hdfs diskbalancer -plan " +
|
helpFormatter.printHelp("hdfs diskbalancer -plan <hostname> [options]",
|
||||||
"<hostname> [options]", header, DiskBalancer.getPlanOptions(), footer);
|
header, DiskBalancerCLI.getPlanOptions(), footer);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -174,8 +175,8 @@ public class PlanCommand extends Command {
|
||||||
*/
|
*/
|
||||||
private double getThresholdPercentage(CommandLine cmd) {
|
private double getThresholdPercentage(CommandLine cmd) {
|
||||||
Double value = 0.0;
|
Double value = 0.0;
|
||||||
if (cmd.hasOption(DiskBalancer.THRESHOLD)) {
|
if (cmd.hasOption(DiskBalancerCLI.THRESHOLD)) {
|
||||||
value = Double.parseDouble(cmd.getOptionValue(DiskBalancer.THRESHOLD));
|
value = Double.parseDouble(cmd.getOptionValue(DiskBalancerCLI.THRESHOLD));
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((value <= 0.0) || (value > 100.0)) {
|
if ((value <= 0.0) || (value > 100.0)) {
|
||||||
|
|
|
@ -27,7 +27,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus;
|
import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
|
import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
|
||||||
import org.apache.hadoop.hdfs.tools.DiskBalancer;
|
import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -42,9 +42,10 @@ public class QueryCommand extends Command {
|
||||||
*/
|
*/
|
||||||
public QueryCommand(Configuration conf) {
|
public QueryCommand(Configuration conf) {
|
||||||
super(conf);
|
super(conf);
|
||||||
addValidCommandParameters(DiskBalancer.QUERY, "Queries the status of disk" +
|
addValidCommandParameters(DiskBalancerCLI.QUERY,
|
||||||
" plan running on a given datanode.");
|
"Queries the status of disk plan running on a given datanode.");
|
||||||
addValidCommandParameters(DiskBalancer.VERBOSE, "Prints verbose results.");
|
addValidCommandParameters(DiskBalancerCLI.VERBOSE,
|
||||||
|
"Prints verbose results.");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -55,9 +56,9 @@ public class QueryCommand extends Command {
|
||||||
@Override
|
@Override
|
||||||
public void execute(CommandLine cmd) throws Exception {
|
public void execute(CommandLine cmd) throws Exception {
|
||||||
LOG.info("Executing \"query plan\" command.");
|
LOG.info("Executing \"query plan\" command.");
|
||||||
Preconditions.checkState(cmd.hasOption(DiskBalancer.QUERY));
|
Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.QUERY));
|
||||||
verifyCommandOptions(DiskBalancer.QUERY, cmd);
|
verifyCommandOptions(DiskBalancerCLI.QUERY, cmd);
|
||||||
String nodeName = cmd.getOptionValue(DiskBalancer.QUERY);
|
String nodeName = cmd.getOptionValue(DiskBalancerCLI.QUERY);
|
||||||
Preconditions.checkNotNull(nodeName);
|
Preconditions.checkNotNull(nodeName);
|
||||||
nodeName = nodeName.trim();
|
nodeName = nodeName.trim();
|
||||||
String nodeAddress = nodeName;
|
String nodeAddress = nodeName;
|
||||||
|
@ -79,7 +80,7 @@ public class QueryCommand extends Command {
|
||||||
workStatus.getPlanID(),
|
workStatus.getPlanID(),
|
||||||
workStatus.getResult().toString());
|
workStatus.getResult().toString());
|
||||||
|
|
||||||
if (cmd.hasOption(DiskBalancer.VERBOSE)) {
|
if (cmd.hasOption(DiskBalancerCLI.VERBOSE)) {
|
||||||
System.out.printf("%s", workStatus.currentStateString());
|
System.out.printf("%s", workStatus.currentStateString());
|
||||||
}
|
}
|
||||||
} catch (DiskBalancerException ex) {
|
} catch (DiskBalancerException ex) {
|
||||||
|
@ -101,6 +102,6 @@ public class QueryCommand extends Command {
|
||||||
|
|
||||||
HelpFormatter helpFormatter = new HelpFormatter();
|
HelpFormatter helpFormatter = new HelpFormatter();
|
||||||
helpFormatter.printHelp("hdfs diskbalancer -query <hostname> [options]",
|
helpFormatter.printHelp("hdfs diskbalancer -query <hostname> [options]",
|
||||||
header, DiskBalancer.getQueryOptions(), footer);
|
header, DiskBalancerCLI.getQueryOptions(), footer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,7 +30,7 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
|
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
|
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet;
|
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet;
|
||||||
import org.apache.hadoop.hdfs.tools.DiskBalancer;
|
import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
|
@ -52,15 +52,15 @@ public class ReportCommand extends Command {
|
||||||
super(conf);
|
super(conf);
|
||||||
this.out = out;
|
this.out = out;
|
||||||
|
|
||||||
addValidCommandParameters(DiskBalancer.REPORT,
|
addValidCommandParameters(DiskBalancerCLI.REPORT,
|
||||||
"Report volume information of nodes.");
|
"Report volume information of nodes.");
|
||||||
|
|
||||||
String desc = String.format(
|
String desc = String.format(
|
||||||
"Top number of nodes to be processed. Default: %d", getDefaultTop());
|
"Top number of nodes to be processed. Default: %d", getDefaultTop());
|
||||||
addValidCommandParameters(DiskBalancer.TOP, desc);
|
addValidCommandParameters(DiskBalancerCLI.TOP, desc);
|
||||||
|
|
||||||
desc = String.format("Print out volume information for a DataNode.");
|
desc = String.format("Print out volume information for a DataNode.");
|
||||||
addValidCommandParameters(DiskBalancer.NODE, desc);
|
addValidCommandParameters(DiskBalancerCLI.NODE, desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -69,8 +69,8 @@ public class ReportCommand extends Command {
|
||||||
String outputLine = "Processing report command";
|
String outputLine = "Processing report command";
|
||||||
recordOutput(result, outputLine);
|
recordOutput(result, outputLine);
|
||||||
|
|
||||||
Preconditions.checkState(cmd.hasOption(DiskBalancer.REPORT));
|
Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.REPORT));
|
||||||
verifyCommandOptions(DiskBalancer.REPORT, cmd);
|
verifyCommandOptions(DiskBalancerCLI.REPORT, cmd);
|
||||||
readClusterInfo(cmd);
|
readClusterInfo(cmd);
|
||||||
|
|
||||||
final String nodeFormat =
|
final String nodeFormat =
|
||||||
|
@ -81,7 +81,7 @@ public class ReportCommand extends Command {
|
||||||
"[%s: volume-%s] - %.2f used: %d/%d, %.2f free: %d/%d, "
|
"[%s: volume-%s] - %.2f used: %d/%d, %.2f free: %d/%d, "
|
||||||
+ "isFailed: %s, isReadOnly: %s, isSkip: %s, isTransient: %s.";
|
+ "isFailed: %s, isReadOnly: %s, isSkip: %s, isTransient: %s.";
|
||||||
|
|
||||||
if (cmd.hasOption(DiskBalancer.NODE)) {
|
if (cmd.hasOption(DiskBalancerCLI.NODE)) {
|
||||||
/*
|
/*
|
||||||
* Reporting volume information for a specific DataNode
|
* Reporting volume information for a specific DataNode
|
||||||
*/
|
*/
|
||||||
|
@ -136,7 +136,7 @@ public class ReportCommand extends Command {
|
||||||
* get value that identifies a DataNode from command line, it could be UUID,
|
* get value that identifies a DataNode from command line, it could be UUID,
|
||||||
* IP address or host name.
|
* IP address or host name.
|
||||||
*/
|
*/
|
||||||
final String nodeVal = cmd.getOptionValue(DiskBalancer.NODE);
|
final String nodeVal = cmd.getOptionValue(DiskBalancerCLI.NODE);
|
||||||
|
|
||||||
if (StringUtils.isBlank(nodeVal)) {
|
if (StringUtils.isBlank(nodeVal)) {
|
||||||
outputLine = "The value for '-node' is neither specified or empty.";
|
outputLine = "The value for '-node' is neither specified or empty.";
|
||||||
|
@ -211,6 +211,6 @@ public class ReportCommand extends Command {
|
||||||
HelpFormatter helpFormatter = new HelpFormatter();
|
HelpFormatter helpFormatter = new HelpFormatter();
|
||||||
helpFormatter.printHelp("hdfs diskbalancer -fs http://namenode.uri " +
|
helpFormatter.printHelp("hdfs diskbalancer -fs http://namenode.uri " +
|
||||||
"-report [options]",
|
"-report [options]",
|
||||||
header, DiskBalancer.getReportOptions(), footer);
|
header, DiskBalancerCLI.getReportOptions(), footer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -168,6 +168,7 @@ import org.apache.hadoop.ipc.RetryCache.CacheEntry;
|
||||||
import org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload;
|
import org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload;
|
||||||
import org.apache.hadoop.ipc.Server;
|
import org.apache.hadoop.ipc.Server;
|
||||||
import org.apache.hadoop.ipc.StandbyException;
|
import org.apache.hadoop.ipc.StandbyException;
|
||||||
|
import org.apache.hadoop.ipc.WritableRpcEngine;
|
||||||
import org.apache.hadoop.ipc.RefreshRegistry;
|
import org.apache.hadoop.ipc.RefreshRegistry;
|
||||||
import org.apache.hadoop.ipc.RefreshResponse;
|
import org.apache.hadoop.ipc.RefreshResponse;
|
||||||
import org.apache.hadoop.net.Node;
|
import org.apache.hadoop.net.Node;
|
||||||
|
@ -316,6 +317,8 @@ public class NameNodeRpcServer implements NamenodeProtocols {
|
||||||
new TraceAdminProtocolServerSideTranslatorPB(this);
|
new TraceAdminProtocolServerSideTranslatorPB(this);
|
||||||
BlockingService traceAdminService = TraceAdminService
|
BlockingService traceAdminService = TraceAdminService
|
||||||
.newReflectiveBlockingService(traceAdminXlator);
|
.newReflectiveBlockingService(traceAdminXlator);
|
||||||
|
|
||||||
|
WritableRpcEngine.ensureInitialized();
|
||||||
|
|
||||||
InetSocketAddress serviceRpcAddr = nn.getServiceRpcServerAddress(conf);
|
InetSocketAddress serviceRpcAddr = nn.getServiceRpcServerAddress(conf);
|
||||||
if (serviceRpcAddr != null) {
|
if (serviceRpcAddr != null) {
|
||||||
|
|
|
@ -50,7 +50,7 @@ import java.io.PrintStream;
|
||||||
* At very high level diskbalancer computes a set of moves that will make disk
|
* At very high level diskbalancer computes a set of moves that will make disk
|
||||||
* utilization equal and then those moves are executed by the datanode.
|
* utilization equal and then those moves are executed by the datanode.
|
||||||
*/
|
*/
|
||||||
public class DiskBalancer extends Configured implements Tool {
|
public class DiskBalancerCLI extends Configured implements Tool {
|
||||||
/**
|
/**
|
||||||
* Computes a plan for a given set of nodes.
|
* Computes a plan for a given set of nodes.
|
||||||
*/
|
*/
|
||||||
|
@ -126,7 +126,7 @@ public class DiskBalancer extends Configured implements Tool {
|
||||||
*/
|
*/
|
||||||
public static final String PLAN_TEMPLATE = "%s.plan.json";
|
public static final String PLAN_TEMPLATE = "%s.plan.json";
|
||||||
private static final Logger LOG =
|
private static final Logger LOG =
|
||||||
LoggerFactory.getLogger(DiskBalancer.class);
|
LoggerFactory.getLogger(DiskBalancerCLI.class);
|
||||||
|
|
||||||
private static final Options PLAN_OPTIONS = new Options();
|
private static final Options PLAN_OPTIONS = new Options();
|
||||||
private static final Options EXECUTE_OPTIONS = new Options();
|
private static final Options EXECUTE_OPTIONS = new Options();
|
||||||
|
@ -140,7 +140,7 @@ public class DiskBalancer extends Configured implements Tool {
|
||||||
*
|
*
|
||||||
* @param conf
|
* @param conf
|
||||||
*/
|
*/
|
||||||
public DiskBalancer(Configuration conf) {
|
public DiskBalancerCLI(Configuration conf) {
|
||||||
super(conf);
|
super(conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -151,7 +151,7 @@ public class DiskBalancer extends Configured implements Tool {
|
||||||
* @throws Exception
|
* @throws Exception
|
||||||
*/
|
*/
|
||||||
public static void main(String[] argv) throws Exception {
|
public static void main(String[] argv) throws Exception {
|
||||||
DiskBalancer shell = new DiskBalancer(new HdfsConfiguration());
|
DiskBalancerCLI shell = new DiskBalancerCLI(new HdfsConfiguration());
|
||||||
int res = 0;
|
int res = 0;
|
||||||
try {
|
try {
|
||||||
res = ToolRunner.run(shell, argv);
|
res = ToolRunner.run(shell, argv);
|
||||||
|
@ -446,27 +446,27 @@ public class DiskBalancer extends Configured implements Tool {
|
||||||
private int dispatch(CommandLine cmd, Options opts, final PrintStream out)
|
private int dispatch(CommandLine cmd, Options opts, final PrintStream out)
|
||||||
throws Exception {
|
throws Exception {
|
||||||
Command currentCommand = null;
|
Command currentCommand = null;
|
||||||
if (cmd.hasOption(DiskBalancer.PLAN)) {
|
if (cmd.hasOption(DiskBalancerCLI.PLAN)) {
|
||||||
currentCommand = new PlanCommand(getConf());
|
currentCommand = new PlanCommand(getConf());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cmd.hasOption(DiskBalancer.EXECUTE)) {
|
if (cmd.hasOption(DiskBalancerCLI.EXECUTE)) {
|
||||||
currentCommand = new ExecuteCommand(getConf());
|
currentCommand = new ExecuteCommand(getConf());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cmd.hasOption(DiskBalancer.QUERY)) {
|
if (cmd.hasOption(DiskBalancerCLI.QUERY)) {
|
||||||
currentCommand = new QueryCommand(getConf());
|
currentCommand = new QueryCommand(getConf());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cmd.hasOption(DiskBalancer.CANCEL)) {
|
if (cmd.hasOption(DiskBalancerCLI.CANCEL)) {
|
||||||
currentCommand = new CancelCommand(getConf());
|
currentCommand = new CancelCommand(getConf());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cmd.hasOption(DiskBalancer.REPORT)) {
|
if (cmd.hasOption(DiskBalancerCLI.REPORT)) {
|
||||||
currentCommand = new ReportCommand(getConf(), out);
|
currentCommand = new ReportCommand(getConf(), out);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cmd.hasOption(DiskBalancer.HELP)) {
|
if (cmd.hasOption(DiskBalancerCLI.HELP)) {
|
||||||
currentCommand = new HelpCommand(getConf());
|
currentCommand = new HelpCommand(getConf());
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSImageUtil;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary;
|
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
|
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
|
||||||
import org.apache.hadoop.util.LimitInputStream;
|
import org.apache.hadoop.util.LimitInputStream;
|
||||||
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
|
|
||||||
|
@ -75,11 +76,14 @@ final class FileDistributionCalculator {
|
||||||
private long totalSpace;
|
private long totalSpace;
|
||||||
private long maxFileSize;
|
private long maxFileSize;
|
||||||
|
|
||||||
|
private boolean formatOutput = false;
|
||||||
|
|
||||||
FileDistributionCalculator(Configuration conf, long maxSize, int steps,
|
FileDistributionCalculator(Configuration conf, long maxSize, int steps,
|
||||||
PrintStream out) {
|
boolean formatOutput, PrintStream out) {
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
this.maxSize = maxSize == 0 ? MAX_SIZE_DEFAULT : maxSize;
|
this.maxSize = maxSize == 0 ? MAX_SIZE_DEFAULT : maxSize;
|
||||||
this.steps = steps == 0 ? INTERVAL_DEFAULT : steps;
|
this.steps = steps == 0 ? INTERVAL_DEFAULT : steps;
|
||||||
|
this.formatOutput = formatOutput;
|
||||||
this.out = out;
|
this.out = out;
|
||||||
long numIntervals = this.maxSize / this.steps;
|
long numIntervals = this.maxSize / this.steps;
|
||||||
// avoid OutOfMemoryError when allocating an array
|
// avoid OutOfMemoryError when allocating an array
|
||||||
|
@ -148,10 +152,20 @@ final class FileDistributionCalculator {
|
||||||
|
|
||||||
private void output() {
|
private void output() {
|
||||||
// write the distribution into the output file
|
// write the distribution into the output file
|
||||||
out.print("Size\tNumFiles\n");
|
out.print((formatOutput ? "Size Range" : "Size") + "\tNumFiles\n");
|
||||||
for (int i = 0; i < distribution.length; i++) {
|
for (int i = 0; i < distribution.length; i++) {
|
||||||
if (distribution[i] != 0) {
|
if (distribution[i] != 0) {
|
||||||
out.print(((long) i * steps) + "\t" + distribution[i]);
|
if (formatOutput) {
|
||||||
|
out.print((i == 0 ? "[" : "(")
|
||||||
|
+ StringUtils.byteDesc(((long) (i == 0 ? 0 : i - 1) * steps))
|
||||||
|
+ ", "
|
||||||
|
+ StringUtils.byteDesc((long)
|
||||||
|
(i == distribution.length - 1 ? maxFileSize : i * steps))
|
||||||
|
+ "]\t" + distribution[i]);
|
||||||
|
} else {
|
||||||
|
out.print(((long) i * steps) + "\t" + distribution[i]);
|
||||||
|
}
|
||||||
|
|
||||||
out.print('\n');
|
out.print('\n');
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.tools.offlineImageViewer;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.LinkedList;
|
import java.util.LinkedList;
|
||||||
|
|
||||||
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* File size distribution visitor.
|
* File size distribution visitor.
|
||||||
*
|
*
|
||||||
|
@ -67,6 +69,7 @@ class FileDistributionVisitor extends TextWriterImageVisitor {
|
||||||
private FileContext current;
|
private FileContext current;
|
||||||
|
|
||||||
private boolean inInode = false;
|
private boolean inInode = false;
|
||||||
|
private boolean formatOutput = false;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* File or directory information.
|
* File or directory information.
|
||||||
|
@ -78,12 +81,12 @@ class FileDistributionVisitor extends TextWriterImageVisitor {
|
||||||
int replication;
|
int replication;
|
||||||
}
|
}
|
||||||
|
|
||||||
public FileDistributionVisitor(String filename,
|
public FileDistributionVisitor(String filename, long maxSize, int step,
|
||||||
long maxSize,
|
boolean formatOutput) throws IOException {
|
||||||
int step) throws IOException {
|
|
||||||
super(filename, false);
|
super(filename, false);
|
||||||
this.maxSize = (maxSize == 0 ? MAX_SIZE_DEFAULT : maxSize);
|
this.maxSize = (maxSize == 0 ? MAX_SIZE_DEFAULT : maxSize);
|
||||||
this.step = (step == 0 ? INTERVAL_DEFAULT : step);
|
this.step = (step == 0 ? INTERVAL_DEFAULT : step);
|
||||||
|
this.formatOutput = formatOutput;
|
||||||
long numIntervals = this.maxSize / this.step;
|
long numIntervals = this.maxSize / this.step;
|
||||||
if(numIntervals >= Integer.MAX_VALUE)
|
if(numIntervals >= Integer.MAX_VALUE)
|
||||||
throw new IOException("Too many distribution intervals " + numIntervals);
|
throw new IOException("Too many distribution intervals " + numIntervals);
|
||||||
|
@ -113,9 +116,22 @@ class FileDistributionVisitor extends TextWriterImageVisitor {
|
||||||
|
|
||||||
private void output() throws IOException {
|
private void output() throws IOException {
|
||||||
// write the distribution into the output file
|
// write the distribution into the output file
|
||||||
write("Size\tNumFiles\n");
|
write((formatOutput ? "Size Range" : "Size") + "\tNumFiles\n");
|
||||||
for(int i = 0; i < distribution.length; i++)
|
for (int i = 0; i < distribution.length; i++) {
|
||||||
write(((long)i * step) + "\t" + distribution[i] + "\n");
|
if (distribution[i] > 0) {
|
||||||
|
if (formatOutput) {
|
||||||
|
write((i == 0 ? "[" : "(")
|
||||||
|
+ StringUtils.byteDesc(((long) (i == 0 ? 0 : i - 1) * step))
|
||||||
|
+ ", "
|
||||||
|
+ StringUtils.byteDesc((long)
|
||||||
|
(i == distribution.length - 1 ? maxFileSize : i * step))
|
||||||
|
+ "]\t"
|
||||||
|
+ distribution[i] + "\n");
|
||||||
|
} else {
|
||||||
|
write(((long) i * step) + "\t" + distribution[i] + "\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
System.out.println("totalFiles = " + totalFiles);
|
System.out.println("totalFiles = " + totalFiles);
|
||||||
System.out.println("totalDirectories = " + totalDirectories);
|
System.out.println("totalDirectories = " + totalDirectories);
|
||||||
System.out.println("totalBlocks = " + totalBlocks);
|
System.out.println("totalBlocks = " + totalBlocks);
|
||||||
|
|
|
@ -46,61 +46,63 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.PositionTrackingIn
|
||||||
public class OfflineImageViewer {
|
public class OfflineImageViewer {
|
||||||
public static final Log LOG = LogFactory.getLog(OfflineImageViewer.class);
|
public static final Log LOG = LogFactory.getLog(OfflineImageViewer.class);
|
||||||
|
|
||||||
private final static String usage =
|
private final static String usage =
|
||||||
"Usage: bin/hdfs oiv_legacy [OPTIONS] -i INPUTFILE -o OUTPUTFILE\n" +
|
"Usage: bin/hdfs oiv_legacy [OPTIONS] -i INPUTFILE -o OUTPUTFILE\n"
|
||||||
"Offline Image Viewer\n" +
|
+ "Offline Image Viewer\n"
|
||||||
"View a Hadoop fsimage INPUTFILE using the specified PROCESSOR,\n" +
|
+ "View a Hadoop fsimage INPUTFILE using the specified PROCESSOR,\n"
|
||||||
"saving the results in OUTPUTFILE.\n" +
|
+ "saving the results in OUTPUTFILE.\n"
|
||||||
"\n" +
|
+ "\n"
|
||||||
"The oiv utility will attempt to parse correctly formed image files\n" +
|
+ "The oiv utility will attempt to parse correctly formed image files\n"
|
||||||
"and will abort fail with mal-formed image files.\n" +
|
+ "and will abort fail with mal-formed image files.\n"
|
||||||
"\n" +
|
+ "\n"
|
||||||
"The tool works offline and does not require a running cluster in\n" +
|
+ "The tool works offline and does not require a running cluster in\n"
|
||||||
"order to process an image file.\n" +
|
+ "order to process an image file.\n"
|
||||||
"\n" +
|
+ "\n"
|
||||||
"The following image processors are available:\n" +
|
+ "The following image processors are available:\n"
|
||||||
" * Ls: The default image processor generates an lsr-style listing\n" +
|
+ " * Ls: The default image processor generates an lsr-style listing\n"
|
||||||
" of the files in the namespace, with the same fields in the same\n" +
|
+ " of the files in the namespace, with the same fields in the same\n"
|
||||||
" order. Note that in order to correctly determine file sizes,\n" +
|
+ " order. Note that in order to correctly determine file sizes,\n"
|
||||||
" this formatter cannot skip blocks and will override the\n" +
|
+ " this formatter cannot skip blocks and will override the\n"
|
||||||
" -skipBlocks option.\n" +
|
+ " -skipBlocks option.\n"
|
||||||
" * Indented: This processor enumerates over all of the elements in\n" +
|
+ " * Indented: This processor enumerates over all of the elements in\n"
|
||||||
" the fsimage file, using levels of indentation to delineate\n" +
|
+ " the fsimage file, using levels of indentation to delineate\n"
|
||||||
" sections within the file.\n" +
|
+ " sections within the file.\n"
|
||||||
" * Delimited: Generate a text file with all of the elements common\n" +
|
+ " * Delimited: Generate a text file with all of the elements common\n"
|
||||||
" to both inodes and inodes-under-construction, separated by a\n" +
|
+ " to both inodes and inodes-under-construction, separated by a\n"
|
||||||
" delimiter. The default delimiter is \u0001, though this may be\n" +
|
+ " delimiter. The default delimiter is \u0001, though this may be\n"
|
||||||
" changed via the -delimiter argument. This processor also overrides\n" +
|
+ " changed via the -delimiter argument. This processor also overrides\n"
|
||||||
" the -skipBlocks option for the same reason as the Ls processor\n" +
|
+ " the -skipBlocks option for the same reason as the Ls processor\n"
|
||||||
" * XML: This processor creates an XML document with all elements of\n" +
|
+ " * XML: This processor creates an XML document with all elements of\n"
|
||||||
" the fsimage enumerated, suitable for further analysis by XML\n" +
|
+ " the fsimage enumerated, suitable for further analysis by XML\n"
|
||||||
" tools.\n" +
|
+ " tools.\n"
|
||||||
" * FileDistribution: This processor analyzes the file size\n" +
|
+ " * FileDistribution: This processor analyzes the file size\n"
|
||||||
" distribution in the image.\n" +
|
+ " distribution in the image.\n"
|
||||||
" -maxSize specifies the range [0, maxSize] of file sizes to be\n" +
|
+ " -maxSize specifies the range [0, maxSize] of file sizes to be\n"
|
||||||
" analyzed (128GB by default).\n" +
|
+ " analyzed (128GB by default).\n"
|
||||||
" -step defines the granularity of the distribution. (2MB by default)\n" +
|
+ " -step defines the granularity of the distribution. (2MB by default)\n"
|
||||||
" * NameDistribution: This processor analyzes the file names\n" +
|
+ " -format formats the output result in a human-readable fashion\n"
|
||||||
" in the image and prints total number of file names and how frequently\n" +
|
+ " rather than a number of bytes. (false by default)\n"
|
||||||
" file names are reused.\n" +
|
+ " * NameDistribution: This processor analyzes the file names\n"
|
||||||
"\n" +
|
+ " in the image and prints total number of file names and how frequently\n"
|
||||||
"Required command line arguments:\n" +
|
+ " file names are reused.\n"
|
||||||
"-i,--inputFile <arg> FSImage file to process.\n" +
|
+ "\n"
|
||||||
"-o,--outputFile <arg> Name of output file. If the specified\n" +
|
+ "Required command line arguments:\n"
|
||||||
" file exists, it will be overwritten.\n" +
|
+ "-i,--inputFile <arg> FSImage file to process.\n"
|
||||||
"\n" +
|
+ "-o,--outputFile <arg> Name of output file. If the specified\n"
|
||||||
"Optional command line arguments:\n" +
|
+ " file exists, it will be overwritten.\n"
|
||||||
"-p,--processor <arg> Select which type of processor to apply\n" +
|
+ "\n"
|
||||||
" against image file." +
|
+ "Optional command line arguments:\n"
|
||||||
" (Ls|XML|Delimited|Indented|FileDistribution).\n" +
|
+ "-p,--processor <arg> Select which type of processor to apply\n"
|
||||||
"-h,--help Display usage information and exit\n" +
|
+ " against image file."
|
||||||
"-printToScreen For processors that write to a file, also\n" +
|
+ " (Ls|XML|Delimited|Indented|FileDistribution).\n"
|
||||||
" output to screen. On large image files this\n" +
|
+ "-h,--help Display usage information and exit\n"
|
||||||
" will dramatically increase processing time.\n" +
|
+ "-printToScreen For processors that write to a file, also\n"
|
||||||
"-skipBlocks Skip inodes' blocks information. May\n" +
|
+ " output to screen. On large image files this\n"
|
||||||
" significantly decrease output.\n" +
|
+ " will dramatically increase processing time.\n"
|
||||||
" (default = false).\n" +
|
+ "-skipBlocks Skip inodes' blocks information. May\n"
|
||||||
"-delimiter <arg> Delimiting string to use with Delimited processor\n";
|
+ " significantly decrease output.\n"
|
||||||
|
+ " (default = false).\n"
|
||||||
|
+ "-delimiter <arg> Delimiting string to use with Delimited processor\n";
|
||||||
|
|
||||||
private final boolean skipBlocks;
|
private final boolean skipBlocks;
|
||||||
private final String inputFile;
|
private final String inputFile;
|
||||||
|
@ -188,6 +190,7 @@ public class OfflineImageViewer {
|
||||||
options.addOption("h", "help", false, "");
|
options.addOption("h", "help", false, "");
|
||||||
options.addOption("maxSize", true, "");
|
options.addOption("maxSize", true, "");
|
||||||
options.addOption("step", true, "");
|
options.addOption("step", true, "");
|
||||||
|
options.addOption("format", false, "");
|
||||||
options.addOption("skipBlocks", false, "");
|
options.addOption("skipBlocks", false, "");
|
||||||
options.addOption("printToScreen", false, "");
|
options.addOption("printToScreen", false, "");
|
||||||
options.addOption("delimiter", true, "");
|
options.addOption("delimiter", true, "");
|
||||||
|
@ -253,7 +256,8 @@ public class OfflineImageViewer {
|
||||||
} else if (processor.equals("FileDistribution")) {
|
} else if (processor.equals("FileDistribution")) {
|
||||||
long maxSize = Long.parseLong(cmd.getOptionValue("maxSize", "0"));
|
long maxSize = Long.parseLong(cmd.getOptionValue("maxSize", "0"));
|
||||||
int step = Integer.parseInt(cmd.getOptionValue("step", "0"));
|
int step = Integer.parseInt(cmd.getOptionValue("step", "0"));
|
||||||
v = new FileDistributionVisitor(outputFile, maxSize, step);
|
boolean formatOutput = cmd.hasOption("format");
|
||||||
|
v = new FileDistributionVisitor(outputFile, maxSize, step, formatOutput);
|
||||||
} else if (processor.equals("NameDistribution")) {
|
} else if (processor.equals("NameDistribution")) {
|
||||||
v = new NameDistributionVisitor(outputFile, printToScreen);
|
v = new NameDistributionVisitor(outputFile, printToScreen);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -67,6 +67,8 @@ public class OfflineImageViewerPB {
|
||||||
+ " -maxSize specifies the range [0, maxSize] of file sizes to be\n"
|
+ " -maxSize specifies the range [0, maxSize] of file sizes to be\n"
|
||||||
+ " analyzed (128GB by default).\n"
|
+ " analyzed (128GB by default).\n"
|
||||||
+ " -step defines the granularity of the distribution. (2MB by default)\n"
|
+ " -step defines the granularity of the distribution. (2MB by default)\n"
|
||||||
|
+ " -format formats the output result in a human-readable fashion\n"
|
||||||
|
+ " rather than a number of bytes. (false by default)\n"
|
||||||
+ " * Web: Run a viewer to expose read-only WebHDFS API.\n"
|
+ " * Web: Run a viewer to expose read-only WebHDFS API.\n"
|
||||||
+ " -addr specifies the address to listen. (localhost:5978 by default)\n"
|
+ " -addr specifies the address to listen. (localhost:5978 by default)\n"
|
||||||
+ " * Delimited (experimental): Generate a text file with all of the elements common\n"
|
+ " * Delimited (experimental): Generate a text file with all of the elements common\n"
|
||||||
|
@ -111,6 +113,7 @@ public class OfflineImageViewerPB {
|
||||||
options.addOption("h", "help", false, "");
|
options.addOption("h", "help", false, "");
|
||||||
options.addOption("maxSize", true, "");
|
options.addOption("maxSize", true, "");
|
||||||
options.addOption("step", true, "");
|
options.addOption("step", true, "");
|
||||||
|
options.addOption("format", false, "");
|
||||||
options.addOption("addr", true, "");
|
options.addOption("addr", true, "");
|
||||||
options.addOption("delimiter", true, "");
|
options.addOption("delimiter", true, "");
|
||||||
options.addOption("t", "temp", true, "");
|
options.addOption("t", "temp", true, "");
|
||||||
|
@ -172,43 +175,44 @@ public class OfflineImageViewerPB {
|
||||||
try (PrintStream out = outputFile.equals("-") ?
|
try (PrintStream out = outputFile.equals("-") ?
|
||||||
System.out : new PrintStream(outputFile, "UTF-8")) {
|
System.out : new PrintStream(outputFile, "UTF-8")) {
|
||||||
switch (processor) {
|
switch (processor) {
|
||||||
case "FileDistribution":
|
case "FileDistribution":
|
||||||
long maxSize = Long.parseLong(cmd.getOptionValue("maxSize", "0"));
|
long maxSize = Long.parseLong(cmd.getOptionValue("maxSize", "0"));
|
||||||
int step = Integer.parseInt(cmd.getOptionValue("step", "0"));
|
int step = Integer.parseInt(cmd.getOptionValue("step", "0"));
|
||||||
new FileDistributionCalculator(conf, maxSize, step, out).visit(
|
boolean formatOutput = cmd.hasOption("format");
|
||||||
new RandomAccessFile(inputFile, "r"));
|
new FileDistributionCalculator(conf, maxSize, step, formatOutput, out)
|
||||||
break;
|
.visit(new RandomAccessFile(inputFile, "r"));
|
||||||
case "XML":
|
break;
|
||||||
new PBImageXmlWriter(conf, out).visit(
|
case "XML":
|
||||||
new RandomAccessFile(inputFile, "r"));
|
new PBImageXmlWriter(conf, out).visit(new RandomAccessFile(inputFile,
|
||||||
break;
|
"r"));
|
||||||
case "ReverseXML":
|
break;
|
||||||
try {
|
case "ReverseXML":
|
||||||
OfflineImageReconstructor.run(inputFile, outputFile);
|
try {
|
||||||
} catch (Exception e) {
|
OfflineImageReconstructor.run(inputFile, outputFile);
|
||||||
System.err.println("OfflineImageReconstructor failed: " +
|
} catch (Exception e) {
|
||||||
e.getMessage());
|
System.err.println("OfflineImageReconstructor failed: "
|
||||||
e.printStackTrace(System.err);
|
+ e.getMessage());
|
||||||
System.exit(1);
|
e.printStackTrace(System.err);
|
||||||
}
|
System.exit(1);
|
||||||
break;
|
}
|
||||||
case "Web":
|
break;
|
||||||
String addr = cmd.getOptionValue("addr", "localhost:5978");
|
case "Web":
|
||||||
try (WebImageViewer viewer = new WebImageViewer(
|
String addr = cmd.getOptionValue("addr", "localhost:5978");
|
||||||
NetUtils.createSocketAddr(addr))) {
|
try (WebImageViewer viewer =
|
||||||
viewer.start(inputFile);
|
new WebImageViewer(NetUtils.createSocketAddr(addr))) {
|
||||||
}
|
viewer.start(inputFile);
|
||||||
break;
|
}
|
||||||
case "Delimited":
|
break;
|
||||||
try (PBImageDelimitedTextWriter writer =
|
case "Delimited":
|
||||||
new PBImageDelimitedTextWriter(out, delimiter, tempPath)) {
|
try (PBImageDelimitedTextWriter writer =
|
||||||
writer.visit(new RandomAccessFile(inputFile, "r"));
|
new PBImageDelimitedTextWriter(out, delimiter, tempPath)) {
|
||||||
}
|
writer.visit(new RandomAccessFile(inputFile, "r"));
|
||||||
break;
|
}
|
||||||
default:
|
break;
|
||||||
System.err.println("Invalid processor specified : " + processor);
|
default:
|
||||||
printUsage();
|
System.err.println("Invalid processor specified : " + processor);
|
||||||
return -1;
|
printUsage();
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
} catch (EOFException e) {
|
} catch (EOFException e) {
|
||||||
|
|
|
@ -650,7 +650,7 @@
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.blockreport.initialDelay</name>
|
<name>dfs.blockreport.initialDelay</name>
|
||||||
<value>0</value>
|
<value>0s</value>
|
||||||
<description>
|
<description>
|
||||||
Delay for first block report in seconds. Support multiple time unit
|
Delay for first block report in seconds. Support multiple time unit
|
||||||
suffix(case insensitive), as described in dfs.heartbeat.interval.
|
suffix(case insensitive), as described in dfs.heartbeat.interval.
|
||||||
|
@ -694,7 +694,7 @@
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.datanode.directoryscan.interval</name>
|
<name>dfs.datanode.directoryscan.interval</name>
|
||||||
<value>21600</value>
|
<value>21600s</value>
|
||||||
<description>Interval in seconds for Datanode to scan data directories and
|
<description>Interval in seconds for Datanode to scan data directories and
|
||||||
reconcile the difference between blocks in memory and on the disk.
|
reconcile the difference between blocks in memory and on the disk.
|
||||||
Support multiple time unit suffix(case insensitive), as described
|
Support multiple time unit suffix(case insensitive), as described
|
||||||
|
@ -732,7 +732,7 @@
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.heartbeat.interval</name>
|
<name>dfs.heartbeat.interval</name>
|
||||||
<value>3</value>
|
<value>3s</value>
|
||||||
<description>
|
<description>
|
||||||
Determines datanode heartbeat interval in seconds.
|
Determines datanode heartbeat interval in seconds.
|
||||||
Can use the following suffix (case insensitive):
|
Can use the following suffix (case insensitive):
|
||||||
|
@ -942,7 +942,7 @@
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.namenode.decommission.interval</name>
|
<name>dfs.namenode.decommission.interval</name>
|
||||||
<value>30</value>
|
<value>30s</value>
|
||||||
<description>Namenode periodicity in seconds to check if decommission is
|
<description>Namenode periodicity in seconds to check if decommission is
|
||||||
complete. Support multiple time unit suffix(case insensitive), as described
|
complete. Support multiple time unit suffix(case insensitive), as described
|
||||||
in dfs.heartbeat.interval.
|
in dfs.heartbeat.interval.
|
||||||
|
@ -973,7 +973,7 @@
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.namenode.replication.interval</name>
|
<name>dfs.namenode.replication.interval</name>
|
||||||
<value>3</value>
|
<value>3s</value>
|
||||||
<description>The periodicity in seconds with which the namenode computes
|
<description>The periodicity in seconds with which the namenode computes
|
||||||
replication work for datanodes. Support multiple time unit suffix(case insensitive),
|
replication work for datanodes. Support multiple time unit suffix(case insensitive),
|
||||||
as described in dfs.heartbeat.interval.
|
as described in dfs.heartbeat.interval.
|
||||||
|
@ -1071,7 +1071,7 @@
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.namenode.checkpoint.period</name>
|
<name>dfs.namenode.checkpoint.period</name>
|
||||||
<value>3600</value>
|
<value>3600s</value>
|
||||||
<description>
|
<description>
|
||||||
The number of seconds between two periodic checkpoints.
|
The number of seconds between two periodic checkpoints.
|
||||||
Support multiple time unit suffix(case insensitive), as described
|
Support multiple time unit suffix(case insensitive), as described
|
||||||
|
@ -1090,7 +1090,7 @@
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.namenode.checkpoint.check.period</name>
|
<name>dfs.namenode.checkpoint.check.period</name>
|
||||||
<value>60</value>
|
<value>60s</value>
|
||||||
<description>The SecondaryNameNode and CheckpointNode will poll the NameNode
|
<description>The SecondaryNameNode and CheckpointNode will poll the NameNode
|
||||||
every 'dfs.namenode.checkpoint.check.period' seconds to query the number
|
every 'dfs.namenode.checkpoint.check.period' seconds to query the number
|
||||||
of uncheckpointed transactions. Support multiple time unit suffix(case insensitive),
|
of uncheckpointed transactions. Support multiple time unit suffix(case insensitive),
|
||||||
|
@ -1433,7 +1433,7 @@
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.client.datanode-restart.timeout</name>
|
<name>dfs.client.datanode-restart.timeout</name>
|
||||||
<value>30</value>
|
<value>30s</value>
|
||||||
<description>
|
<description>
|
||||||
Expert only. The time to wait, in seconds, from reception of an
|
Expert only. The time to wait, in seconds, from reception of an
|
||||||
datanode shutdown notification for quick restart, until declaring
|
datanode shutdown notification for quick restart, until declaring
|
||||||
|
@ -1502,7 +1502,7 @@
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.ha.log-roll.period</name>
|
<name>dfs.ha.log-roll.period</name>
|
||||||
<value>120</value>
|
<value>120s</value>
|
||||||
<description>
|
<description>
|
||||||
How often, in seconds, the StandbyNode should ask the active to
|
How often, in seconds, the StandbyNode should ask the active to
|
||||||
roll edit logs. Since the StandbyNode only reads from finalized
|
roll edit logs. Since the StandbyNode only reads from finalized
|
||||||
|
@ -1516,7 +1516,7 @@
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.ha.tail-edits.period</name>
|
<name>dfs.ha.tail-edits.period</name>
|
||||||
<value>60</value>
|
<value>60s</value>
|
||||||
<description>
|
<description>
|
||||||
How often, in seconds, the StandbyNode should check for new
|
How often, in seconds, the StandbyNode should check for new
|
||||||
finalized log segments in the shared edits log.
|
finalized log segments in the shared edits log.
|
||||||
|
@ -2950,7 +2950,7 @@
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.datanode.bp-ready.timeout</name>
|
<name>dfs.datanode.bp-ready.timeout</name>
|
||||||
<value>20</value>
|
<value>20s</value>
|
||||||
<description>
|
<description>
|
||||||
The maximum wait time for datanode to be ready before failing the
|
The maximum wait time for datanode to be ready before failing the
|
||||||
received request. Setting this to 0 fails requests right away if the
|
received request. Setting this to 0 fails requests right away if the
|
||||||
|
@ -4273,4 +4273,12 @@
|
||||||
a plan.
|
a plan.
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>dfs.lock.suppress.warning.interval</name>
|
||||||
|
<value>10s</value>
|
||||||
|
<description>Instrumentation reporting long critical sections will suppress
|
||||||
|
consecutive warnings within this interval.</description>
|
||||||
|
</property>
|
||||||
|
|
||||||
</configuration>
|
</configuration>
|
||||||
|
|
|
@ -239,6 +239,7 @@ Usage: `hdfs oiv [OPTIONS] -i INPUT_FILE`
|
||||||
| `-addr` *address* | Specify the address(host:port) to listen. (localhost:5978 by default). This option is used with Web processor. |
|
| `-addr` *address* | Specify the address(host:port) to listen. (localhost:5978 by default). This option is used with Web processor. |
|
||||||
| `-maxSize` *size* | Specify the range [0, maxSize] of file sizes to be analyzed in bytes (128GB by default). This option is used with FileDistribution processor. |
|
| `-maxSize` *size* | Specify the range [0, maxSize] of file sizes to be analyzed in bytes (128GB by default). This option is used with FileDistribution processor. |
|
||||||
| `-step` *size* | Specify the granularity of the distribution in bytes (2MB by default). This option is used with FileDistribution processor. |
|
| `-step` *size* | Specify the granularity of the distribution in bytes (2MB by default). This option is used with FileDistribution processor. |
|
||||||
|
| `-format` | Format the output result in a human-readable fashion rather than a number of bytes. (false by default). This option is used with FileDistribution processor. |
|
||||||
| `-delimiter` *arg* | Delimiting string to use with Delimited processor. |
|
| `-delimiter` *arg* | Delimiting string to use with Delimited processor. |
|
||||||
| `-t`,`--temp` *temporary dir* | Use temporary dir to cache intermediate result to generate Delimited outputs. If not set, Delimited processor constructs the namespace in memory before outputting text. |
|
| `-t`,`--temp` *temporary dir* | Use temporary dir to cache intermediate result to generate Delimited outputs. If not set, Delimited processor constructs the namespace in memory before outputting text. |
|
||||||
| `-h`,`--help` | Display the tool usage and help information and exit. |
|
| `-h`,`--help` | Display the tool usage and help information and exit. |
|
||||||
|
@ -260,6 +261,9 @@ Usage: `hdfs oiv_legacy [OPTIONS] -i INPUT_FILE -o OUTPUT_FILE`
|
||||||
| COMMAND\_OPTION | Description |
|
| COMMAND\_OPTION | Description |
|
||||||
|:---- |:---- |
|
|:---- |:---- |
|
||||||
| `-p`\|`--processor` *processor* | Specify the image processor to apply against the image file. Valid options are Ls (default), XML, Delimited, Indented, and FileDistribution. |
|
| `-p`\|`--processor` *processor* | Specify the image processor to apply against the image file. Valid options are Ls (default), XML, Delimited, Indented, and FileDistribution. |
|
||||||
|
| `-maxSize` *size* | Specify the range [0, maxSize] of file sizes to be analyzed in bytes (128GB by default). This option is used with FileDistribution processor. |
|
||||||
|
| `-step` *size* | Specify the granularity of the distribution in bytes (2MB by default). This option is used with FileDistribution processor. |
|
||||||
|
| `-format` | Format the output result in a human-readable fashion rather than a number of bytes. (false by default). This option is used with FileDistribution processor. |
|
||||||
| `-skipBlocks` | Do not enumerate individual blocks within files. This may save processing time and outfile file space on namespaces with very large files. The Ls processor reads the blocks to correctly determine file sizes and ignores this option. |
|
| `-skipBlocks` | Do not enumerate individual blocks within files. This may save processing time and outfile file space on namespaces with very large files. The Ls processor reads the blocks to correctly determine file sizes and ignores this option. |
|
||||||
| `-printToScreen` | Pipe output of processor to console as well as specified file. On extremely large namespaces, this may increase processing time by an order of magnitude. |
|
| `-printToScreen` | Pipe output of processor to console as well as specified file. On extremely large namespaces, this may increase processing time by an order of magnitude. |
|
||||||
| `-delimiter` *arg* | When used in conjunction with the Delimited processor, replaces the default tab delimiter with the string specified by *arg*. |
|
| `-delimiter` *arg* | When used in conjunction with the Delimited processor, replaces the default tab delimiter with the string specified by *arg*. |
|
||||||
|
|
|
@ -50,10 +50,13 @@ The Offline Image Viewer provides several output processors:
|
||||||
..., s[n-1], maxSize], and the processor calculates how many files
|
..., s[n-1], maxSize], and the processor calculates how many files
|
||||||
in the system fall into each segment [s[i-1], s[i]). Note that
|
in the system fall into each segment [s[i-1], s[i]). Note that
|
||||||
files larger than maxSize always fall into the very last segment.
|
files larger than maxSize always fall into the very last segment.
|
||||||
The output file is formatted as a tab separated two column table:
|
By default, the output file is formatted as a tab separated two column
|
||||||
Size and NumFiles. Where Size represents the start of the segment,
|
table: Size and NumFiles. Where Size represents the start of the segment,
|
||||||
and numFiles is the number of files form the image which size falls
|
and numFiles is the number of files form the image which size falls
|
||||||
in this segment.
|
in this segment. By specifying the option -format, the output file will be
|
||||||
|
formatted in a human-readable fashion rather than a number of bytes that
|
||||||
|
showed in Size column. In addition, the Size column will be changed to the
|
||||||
|
Size Range column.
|
||||||
|
|
||||||
4. Delimited (experimental): Generate a text file with all of the elements
|
4. Delimited (experimental): Generate a text file with all of the elements
|
||||||
common to both inodes and inodes-under-construction, separated by a
|
common to both inodes and inodes-under-construction, separated by a
|
||||||
|
@ -150,6 +153,7 @@ Options
|
||||||
| `-addr` *address* | Specify the address(host:port) to listen. (localhost:5978 by default). This option is used with Web processor. |
|
| `-addr` *address* | Specify the address(host:port) to listen. (localhost:5978 by default). This option is used with Web processor. |
|
||||||
| `-maxSize` *size* | Specify the range [0, maxSize] of file sizes to be analyzed in bytes (128GB by default). This option is used with FileDistribution processor. |
|
| `-maxSize` *size* | Specify the range [0, maxSize] of file sizes to be analyzed in bytes (128GB by default). This option is used with FileDistribution processor. |
|
||||||
| `-step` *size* | Specify the granularity of the distribution in bytes (2MB by default). This option is used with FileDistribution processor. |
|
| `-step` *size* | Specify the granularity of the distribution in bytes (2MB by default). This option is used with FileDistribution processor. |
|
||||||
|
| `-format` | Format the output result in a human-readable fashion rather than a number of bytes. (false by default). This option is used with FileDistribution processor. |
|
||||||
| `-delimiter` *arg* | Delimiting string to use with Delimited processor. |
|
| `-delimiter` *arg* | Delimiting string to use with Delimited processor. |
|
||||||
| `-t`\|`--temp` *temporary dir* | Use temporary dir to cache intermediate result to generate Delimited outputs. If not set, Delimited processor constructs the namespace in memory before outputting text. |
|
| `-t`\|`--temp` *temporary dir* | Use temporary dir to cache intermediate result to generate Delimited outputs. If not set, Delimited processor constructs the namespace in memory before outputting text. |
|
||||||
| `-h`\|`--help` | Display the tool usage and help information and exit. |
|
| `-h`\|`--help` | Display the tool usage and help information and exit. |
|
||||||
|
@ -181,6 +185,9 @@ Due to the internal layout changes introduced by the ProtocolBuffer-based fsimag
|
||||||
| `-i`\|`--inputFile` *input file* | Specify the input fsimage file to process. Required. |
|
| `-i`\|`--inputFile` *input file* | Specify the input fsimage file to process. Required. |
|
||||||
| `-o`\|`--outputFile` *output file* | Specify the output filename, if the specified output processor generates one. If the specified file already exists, it is silently overwritten. Required. |
|
| `-o`\|`--outputFile` *output file* | Specify the output filename, if the specified output processor generates one. If the specified file already exists, it is silently overwritten. Required. |
|
||||||
| `-p`\|`--processor` *processor* | Specify the image processor to apply against the image file. Valid options are Ls (default), XML, Delimited, Indented, and FileDistribution. |
|
| `-p`\|`--processor` *processor* | Specify the image processor to apply against the image file. Valid options are Ls (default), XML, Delimited, Indented, and FileDistribution. |
|
||||||
|
| `-maxSize` *size* | Specify the range [0, maxSize] of file sizes to be analyzed in bytes (128GB by default). This option is used with FileDistribution processor. |
|
||||||
|
| `-step` *size* | Specify the granularity of the distribution in bytes (2MB by default). This option is used with FileDistribution processor. |
|
||||||
|
| `-format` | Format the output result in a human-readable fashion rather than a number of bytes. (false by default). This option is used with FileDistribution processor. |
|
||||||
| `-skipBlocks` | Do not enumerate individual blocks within files. This may save processing time and outfile file space on namespaces with very large files. The Ls processor reads the blocks to correctly determine file sizes and ignores this option. |
|
| `-skipBlocks` | Do not enumerate individual blocks within files. This may save processing time and outfile file space on namespaces with very large files. The Ls processor reads the blocks to correctly determine file sizes and ignores this option. |
|
||||||
| `-printToScreen` | Pipe output of processor to console as well as specified file. On extremely large namespaces, this may increase processing time by an order of magnitude. |
|
| `-printToScreen` | Pipe output of processor to console as well as specified file. On extremely large namespaces, this may increase processing time by an order of magnitude. |
|
||||||
| `-delimiter` *arg* | When used in conjunction with the Delimited processor, replaces the default tab delimiter with the string specified by *arg*. |
|
| `-delimiter` *arg* | When used in conjunction with the Delimited processor, replaces the default tab delimiter with the string specified by *arg*. |
|
||||||
|
|
|
@ -57,7 +57,8 @@ import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
public class TestDFSStripedInputStream {
|
public class TestDFSStripedInputStream {
|
||||||
|
|
||||||
public static final Log LOG = LogFactory.getLog(TestDFSStripedInputStream.class);
|
public static final Log LOG =
|
||||||
|
LogFactory.getLog(TestDFSStripedInputStream.class);
|
||||||
|
|
||||||
private MiniDFSCluster cluster;
|
private MiniDFSCluster cluster;
|
||||||
private Configuration conf = new Configuration();
|
private Configuration conf = new Configuration();
|
||||||
|
@ -272,12 +273,16 @@ public class TestDFSStripedInputStream {
|
||||||
// |10 |
|
// |10 |
|
||||||
done += in.read(0, readBuffer, 0, delta);
|
done += in.read(0, readBuffer, 0, delta);
|
||||||
assertEquals(delta, done);
|
assertEquals(delta, done);
|
||||||
|
assertArrayEquals(Arrays.copyOf(expected, done),
|
||||||
|
Arrays.copyOf(readBuffer, done));
|
||||||
// both head and trail cells are partial
|
// both head and trail cells are partial
|
||||||
// |c_0 |c_1 |c_2 |c_3 |c_4 |c_5 |
|
// |c_0 |c_1 |c_2 |c_3 |c_4 |c_5 |
|
||||||
// |256K - 10|missing|256K|256K|256K - 10|not in range|
|
// |256K - 10|missing|256K|256K|256K - 10|not in range|
|
||||||
done += in.read(delta, readBuffer, delta,
|
done += in.read(delta, readBuffer, delta,
|
||||||
CELLSIZE * (DATA_BLK_NUM - 1) - 2 * delta);
|
CELLSIZE * (DATA_BLK_NUM - 1) - 2 * delta);
|
||||||
assertEquals(CELLSIZE * (DATA_BLK_NUM - 1) - delta, done);
|
assertEquals(CELLSIZE * (DATA_BLK_NUM - 1) - delta, done);
|
||||||
|
assertArrayEquals(Arrays.copyOf(expected, done),
|
||||||
|
Arrays.copyOf(readBuffer, done));
|
||||||
// read the rest
|
// read the rest
|
||||||
done += in.read(done, readBuffer, done, readSize - done);
|
done += in.read(done, readBuffer, done, readSize - done);
|
||||||
assertEquals(readSize, done);
|
assertEquals(readSize, done);
|
||||||
|
@ -291,8 +296,8 @@ public class TestDFSStripedInputStream {
|
||||||
testStatefulRead(true, true);
|
testStatefulRead(true, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void testStatefulRead(boolean useByteBuffer, boolean cellMisalignPacket)
|
private void testStatefulRead(boolean useByteBuffer,
|
||||||
throws Exception {
|
boolean cellMisalignPacket) throws Exception {
|
||||||
final int numBlocks = 2;
|
final int numBlocks = 2;
|
||||||
final int fileSize = numBlocks * BLOCK_GROUP_SIZE;
|
final int fileSize = numBlocks * BLOCK_GROUP_SIZE;
|
||||||
if (cellMisalignPacket) {
|
if (cellMisalignPacket) {
|
||||||
|
@ -302,7 +307,8 @@ public class TestDFSStripedInputStream {
|
||||||
}
|
}
|
||||||
DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks,
|
DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks,
|
||||||
NUM_STRIPE_PER_BLOCK, false);
|
NUM_STRIPE_PER_BLOCK, false);
|
||||||
LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(filePath.toString(), 0, fileSize);
|
LocatedBlocks lbs = fs.getClient().namenode.
|
||||||
|
getBlockLocations(filePath.toString(), 0, fileSize);
|
||||||
|
|
||||||
assert lbs.getLocatedBlocks().size() == numBlocks;
|
assert lbs.getLocatedBlocks().size() == numBlocks;
|
||||||
for (LocatedBlock lb : lbs.getLocatedBlocks()) {
|
for (LocatedBlock lb : lbs.getLocatedBlocks()) {
|
||||||
|
@ -360,4 +366,111 @@ public class TestDFSStripedInputStream {
|
||||||
}
|
}
|
||||||
fs.delete(filePath, true);
|
fs.delete(filePath, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testStatefulReadWithDNFailure() throws Exception {
|
||||||
|
final int numBlocks = 4;
|
||||||
|
final int failedDNIdx = DATA_BLK_NUM - 1;
|
||||||
|
DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks,
|
||||||
|
NUM_STRIPE_PER_BLOCK, false);
|
||||||
|
LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(
|
||||||
|
filePath.toString(), 0, BLOCK_GROUP_SIZE);
|
||||||
|
|
||||||
|
assert lbs.get(0) instanceof LocatedStripedBlock;
|
||||||
|
LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
|
||||||
|
for (int i = 0; i < DATA_BLK_NUM + PARITY_BLK_NUM; i++) {
|
||||||
|
Block blk = new Block(bg.getBlock().getBlockId() + i,
|
||||||
|
NUM_STRIPE_PER_BLOCK * CELLSIZE,
|
||||||
|
bg.getBlock().getGenerationStamp());
|
||||||
|
blk.setGenerationStamp(bg.getBlock().getGenerationStamp());
|
||||||
|
cluster.injectBlocks(i, Arrays.asList(blk),
|
||||||
|
bg.getBlock().getBlockPoolId());
|
||||||
|
}
|
||||||
|
DFSStripedInputStream in =
|
||||||
|
new DFSStripedInputStream(fs.getClient(), filePath.toString(), false,
|
||||||
|
ecPolicy, null);
|
||||||
|
int readSize = BLOCK_GROUP_SIZE;
|
||||||
|
byte[] readBuffer = new byte[readSize];
|
||||||
|
byte[] expected = new byte[readSize];
|
||||||
|
/** A variation of {@link DFSTestUtil#fillExpectedBuf} for striped blocks */
|
||||||
|
for (int i = 0; i < NUM_STRIPE_PER_BLOCK; i++) {
|
||||||
|
for (int j = 0; j < DATA_BLK_NUM; j++) {
|
||||||
|
for (int k = 0; k < CELLSIZE; k++) {
|
||||||
|
int posInBlk = i * CELLSIZE + k;
|
||||||
|
int posInFile = i * CELLSIZE * DATA_BLK_NUM + j * CELLSIZE + k;
|
||||||
|
expected[posInFile] = SimulatedFSDataset.simulatedByte(
|
||||||
|
new Block(bg.getBlock().getBlockId() + j), posInBlk);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ErasureCoderOptions coderOptions = new ErasureCoderOptions(
|
||||||
|
DATA_BLK_NUM, PARITY_BLK_NUM);
|
||||||
|
RawErasureDecoder rawDecoder = CodecUtil.createRawDecoder(conf,
|
||||||
|
ecPolicy.getCodecName(), coderOptions);
|
||||||
|
|
||||||
|
// Update the expected content for decoded data
|
||||||
|
int[] missingBlkIdx = new int[PARITY_BLK_NUM];
|
||||||
|
for (int i = 0; i < missingBlkIdx.length; i++) {
|
||||||
|
if (i == 0) {
|
||||||
|
missingBlkIdx[i] = failedDNIdx;
|
||||||
|
} else {
|
||||||
|
missingBlkIdx[i] = DATA_BLK_NUM + i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cluster.stopDataNode(failedDNIdx);
|
||||||
|
for (int i = 0; i < NUM_STRIPE_PER_BLOCK; i++) {
|
||||||
|
byte[][] decodeInputs = new byte[DATA_BLK_NUM + PARITY_BLK_NUM][CELLSIZE];
|
||||||
|
byte[][] decodeOutputs = new byte[missingBlkIdx.length][CELLSIZE];
|
||||||
|
for (int j = 0; j < DATA_BLK_NUM; j++) {
|
||||||
|
int posInBuf = i * CELLSIZE * DATA_BLK_NUM + j * CELLSIZE;
|
||||||
|
if (j != failedDNIdx) {
|
||||||
|
System.arraycopy(expected, posInBuf, decodeInputs[j], 0, CELLSIZE);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (int j = DATA_BLK_NUM; j < DATA_BLK_NUM + PARITY_BLK_NUM; j++) {
|
||||||
|
for (int k = 0; k < CELLSIZE; k++) {
|
||||||
|
int posInBlk = i * CELLSIZE + k;
|
||||||
|
decodeInputs[j][k] = SimulatedFSDataset.simulatedByte(
|
||||||
|
new Block(bg.getBlock().getBlockId() + j), posInBlk);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (int m : missingBlkIdx) {
|
||||||
|
decodeInputs[m] = null;
|
||||||
|
}
|
||||||
|
rawDecoder.decode(decodeInputs, missingBlkIdx, decodeOutputs);
|
||||||
|
int posInBuf = i * CELLSIZE * DATA_BLK_NUM + failedDNIdx * CELLSIZE;
|
||||||
|
System.arraycopy(decodeOutputs[0], 0, expected, posInBuf, CELLSIZE);
|
||||||
|
}
|
||||||
|
|
||||||
|
int delta = 10;
|
||||||
|
int done = 0;
|
||||||
|
// read a small delta, shouldn't trigger decode
|
||||||
|
// |cell_0 |
|
||||||
|
// |10 |
|
||||||
|
done += in.read(readBuffer, 0, delta);
|
||||||
|
assertEquals(delta, done);
|
||||||
|
// both head and trail cells are partial
|
||||||
|
// |c_0 |c_1 |c_2 |c_3 |c_4 |c_5 |
|
||||||
|
// |256K - 10|missing|256K|256K|256K - 10|not in range|
|
||||||
|
while (done < (CELLSIZE * (DATA_BLK_NUM - 1) - 2 * delta)) {
|
||||||
|
int ret = in.read(readBuffer, delta,
|
||||||
|
CELLSIZE * (DATA_BLK_NUM - 1) - 2 * delta);
|
||||||
|
assertTrue(ret > 0);
|
||||||
|
done += ret;
|
||||||
|
}
|
||||||
|
assertEquals(CELLSIZE * (DATA_BLK_NUM - 1) - delta, done);
|
||||||
|
// read the rest
|
||||||
|
|
||||||
|
int restSize;
|
||||||
|
restSize = readSize - done;
|
||||||
|
while (done < restSize) {
|
||||||
|
int ret = in.read(readBuffer, done, restSize);
|
||||||
|
assertTrue(ret > 0);
|
||||||
|
done += ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
assertEquals(readSize, done);
|
||||||
|
assertArrayEquals(expected, readBuffer);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,166 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
* <p>
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* <p>
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
|
import java.util.concurrent.atomic.AtomicReference;
|
||||||
|
import java.util.concurrent.locks.Lock;
|
||||||
|
|
||||||
|
import org.apache.hadoop.util.AutoCloseableLock;
|
||||||
|
import org.apache.hadoop.util.Timer;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
|
||||||
|
import org.junit.Rule;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.rules.TestName;
|
||||||
|
import static org.mockito.Mockito.*;
|
||||||
|
import static org.junit.Assert.*;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A test class for InstrumentedLock.
|
||||||
|
*/
|
||||||
|
public class TestInstrumentedLock {
|
||||||
|
|
||||||
|
static final Log LOG = LogFactory.getLog(TestInstrumentedLock.class);
|
||||||
|
|
||||||
|
@Rule public TestName name = new TestName();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test exclusive access of the lock.
|
||||||
|
* @throws Exception
|
||||||
|
*/
|
||||||
|
@Test(timeout=10000)
|
||||||
|
public void testMultipleThread() throws Exception {
|
||||||
|
String testname = name.getMethodName();
|
||||||
|
InstrumentedLock lock = new InstrumentedLock(testname, LOG, 0, 300);
|
||||||
|
lock.lock();
|
||||||
|
try {
|
||||||
|
Thread competingThread = new Thread() {
|
||||||
|
@Override
|
||||||
|
public void run() {
|
||||||
|
assertFalse(lock.tryLock());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
competingThread.start();
|
||||||
|
competingThread.join();
|
||||||
|
} finally {
|
||||||
|
lock.unlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test the correctness with try-with-resource syntax.
|
||||||
|
* @throws Exception
|
||||||
|
*/
|
||||||
|
@Test(timeout=10000)
|
||||||
|
public void testTryWithResourceSyntax() throws Exception {
|
||||||
|
String testname = name.getMethodName();
|
||||||
|
final AtomicReference<Thread> lockThread = new AtomicReference<>(null);
|
||||||
|
Lock lock = new InstrumentedLock(testname, LOG, 0, 300) {
|
||||||
|
@Override
|
||||||
|
public void lock() {
|
||||||
|
super.lock();
|
||||||
|
lockThread.set(Thread.currentThread());
|
||||||
|
}
|
||||||
|
@Override
|
||||||
|
public void unlock() {
|
||||||
|
super.unlock();
|
||||||
|
lockThread.set(null);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
AutoCloseableLock acl = new AutoCloseableLock(lock);
|
||||||
|
try (AutoCloseable localLock = acl.acquire()) {
|
||||||
|
assertEquals(acl, localLock);
|
||||||
|
Thread competingThread = new Thread() {
|
||||||
|
@Override
|
||||||
|
public void run() {
|
||||||
|
assertNotEquals(Thread.currentThread(), lockThread.get());
|
||||||
|
assertFalse(lock.tryLock());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
competingThread.start();
|
||||||
|
competingThread.join();
|
||||||
|
assertEquals(Thread.currentThread(), lockThread.get());
|
||||||
|
}
|
||||||
|
assertNull(lockThread.get());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test the lock logs warning when lock held time is greater than threshold
|
||||||
|
* and not log warning otherwise.
|
||||||
|
* @throws Exception
|
||||||
|
*/
|
||||||
|
@Test(timeout=10000)
|
||||||
|
public void testLockLongHoldingReport() throws Exception {
|
||||||
|
String testname = name.getMethodName();
|
||||||
|
final AtomicLong time = new AtomicLong(0);
|
||||||
|
Timer mclock = new Timer() {
|
||||||
|
@Override
|
||||||
|
public long monotonicNow() {
|
||||||
|
return time.get();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Lock mlock = mock(Lock.class);
|
||||||
|
|
||||||
|
final AtomicLong wlogged = new AtomicLong(0);
|
||||||
|
final AtomicLong wsuppresed = new AtomicLong(0);
|
||||||
|
InstrumentedLock lock = new InstrumentedLock(
|
||||||
|
testname, LOG, mlock, 2000, 300, mclock) {
|
||||||
|
@Override
|
||||||
|
void logWarning(long lockHeldTime, long suppressed) {
|
||||||
|
wlogged.incrementAndGet();
|
||||||
|
wsuppresed.set(suppressed);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// do not log warning when the lock held time is short
|
||||||
|
lock.lock(); // t = 0
|
||||||
|
time.set(200);
|
||||||
|
lock.unlock(); // t = 200
|
||||||
|
assertEquals(0, wlogged.get());
|
||||||
|
assertEquals(0, wsuppresed.get());
|
||||||
|
|
||||||
|
lock.lock(); // t = 200
|
||||||
|
time.set(700);
|
||||||
|
lock.unlock(); // t = 700
|
||||||
|
assertEquals(1, wlogged.get());
|
||||||
|
assertEquals(0, wsuppresed.get());
|
||||||
|
|
||||||
|
// despite the lock held time is greater than threshold
|
||||||
|
// suppress the log warning due to the logging gap
|
||||||
|
// (not recorded in wsuppressed until next log message)
|
||||||
|
lock.lock(); // t = 700
|
||||||
|
time.set(1100);
|
||||||
|
lock.unlock(); // t = 1100
|
||||||
|
assertEquals(1, wlogged.get());
|
||||||
|
assertEquals(0, wsuppresed.get());
|
||||||
|
|
||||||
|
// log a warning message when the lock held time is greater the threshold
|
||||||
|
// and the logging time gap is satisfied. Also should display suppressed
|
||||||
|
// previous warnings.
|
||||||
|
time.set(2400);
|
||||||
|
lock.lock(); // t = 2400
|
||||||
|
time.set(2800);
|
||||||
|
lock.unlock(); // t = 2800
|
||||||
|
assertEquals(2, wlogged.get());
|
||||||
|
assertEquals(1, wsuppresed.get());
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,119 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.hdfs.security;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
|
||||||
|
import static org.mockito.Mockito.mock;
|
||||||
|
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
|
import java.security.PrivilegedExceptionAction;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||||
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
|
import org.apache.hadoop.io.Text;
|
||||||
|
import org.apache.hadoop.ipc.Client;
|
||||||
|
import org.apache.hadoop.ipc.RPC;
|
||||||
|
import org.apache.hadoop.ipc.Server;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
|
import org.apache.hadoop.security.SaslInputStream;
|
||||||
|
import org.apache.hadoop.security.SaslRpcClient;
|
||||||
|
import org.apache.hadoop.security.SaslRpcServer;
|
||||||
|
import org.apache.hadoop.security.SecurityUtil;
|
||||||
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
import org.apache.hadoop.security.token.Token;
|
||||||
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
|
import org.apache.log4j.Level;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
/** Unit tests for using Delegation Token over RPC. */
|
||||||
|
public class TestClientProtocolWithDelegationToken {
|
||||||
|
private static final String ADDRESS = "0.0.0.0";
|
||||||
|
|
||||||
|
public static final Log LOG = LogFactory
|
||||||
|
.getLog(TestClientProtocolWithDelegationToken.class);
|
||||||
|
|
||||||
|
private static final Configuration conf;
|
||||||
|
static {
|
||||||
|
conf = new Configuration();
|
||||||
|
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
|
||||||
|
UserGroupInformation.setConfiguration(conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
static {
|
||||||
|
GenericTestUtils.setLogLevel(Client.LOG, Level.ALL);
|
||||||
|
GenericTestUtils.setLogLevel(Server.LOG, Level.ALL);
|
||||||
|
GenericTestUtils.setLogLevel(SaslRpcClient.LOG, Level.ALL);
|
||||||
|
GenericTestUtils.setLogLevel(SaslRpcServer.LOG, Level.ALL);
|
||||||
|
GenericTestUtils.setLogLevel(SaslInputStream.LOG, Level.ALL);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testDelegationTokenRpc() throws Exception {
|
||||||
|
ClientProtocol mockNN = mock(ClientProtocol.class);
|
||||||
|
FSNamesystem mockNameSys = mock(FSNamesystem.class);
|
||||||
|
|
||||||
|
DelegationTokenSecretManager sm = new DelegationTokenSecretManager(
|
||||||
|
DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT,
|
||||||
|
DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT,
|
||||||
|
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT,
|
||||||
|
3600000, mockNameSys);
|
||||||
|
sm.startThreads();
|
||||||
|
final Server server = new RPC.Builder(conf)
|
||||||
|
.setProtocol(ClientProtocol.class).setInstance(mockNN)
|
||||||
|
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
|
||||||
|
.setSecretManager(sm).build();
|
||||||
|
|
||||||
|
server.start();
|
||||||
|
|
||||||
|
final UserGroupInformation current = UserGroupInformation.getCurrentUser();
|
||||||
|
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
||||||
|
String user = current.getUserName();
|
||||||
|
Text owner = new Text(user);
|
||||||
|
DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(owner, owner, null);
|
||||||
|
Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(
|
||||||
|
dtId, sm);
|
||||||
|
SecurityUtil.setTokenService(token, addr);
|
||||||
|
LOG.info("Service for token is " + token.getService());
|
||||||
|
current.addToken(token);
|
||||||
|
current.doAs(new PrivilegedExceptionAction<Object>() {
|
||||||
|
@Override
|
||||||
|
public Object run() throws Exception {
|
||||||
|
ClientProtocol proxy = null;
|
||||||
|
try {
|
||||||
|
proxy = RPC.getProxy(ClientProtocol.class,
|
||||||
|
ClientProtocol.versionID, addr, conf);
|
||||||
|
proxy.getServerDefaults();
|
||||||
|
} finally {
|
||||||
|
server.stop();
|
||||||
|
if (proxy != null) {
|
||||||
|
RPC.stopProxy(proxy);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -41,18 +41,19 @@ import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
|
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
|
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
|
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
|
||||||
|
import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.tools.DiskBalancer.CANCEL;
|
import static org.apache.hadoop.hdfs.tools.DiskBalancerCLI.CANCEL;
|
||||||
import static org.apache.hadoop.hdfs.tools.DiskBalancer.HELP;
|
import static org.apache.hadoop.hdfs.tools.DiskBalancerCLI.HELP;
|
||||||
import static org.apache.hadoop.hdfs.tools.DiskBalancer.NODE;
|
import static org.apache.hadoop.hdfs.tools.DiskBalancerCLI.NODE;
|
||||||
import static org.apache.hadoop.hdfs.tools.DiskBalancer.PLAN;
|
import static org.apache.hadoop.hdfs.tools.DiskBalancerCLI.PLAN;
|
||||||
import static org.apache.hadoop.hdfs.tools.DiskBalancer.QUERY;
|
import static org.apache.hadoop.hdfs.tools.DiskBalancerCLI.QUERY;
|
||||||
import static org.apache.hadoop.hdfs.tools.DiskBalancer.REPORT;
|
import static org.apache.hadoop.hdfs.tools.DiskBalancerCLI.REPORT;
|
||||||
|
|
||||||
import org.junit.Rule;
|
import org.junit.Rule;
|
||||||
import org.junit.rules.ExpectedException;
|
import org.junit.rules.ExpectedException;
|
||||||
|
@ -387,8 +388,7 @@ public class TestDiskBalancerCommand {
|
||||||
private List<String> runCommandInternal(final String cmdLine) throws
|
private List<String> runCommandInternal(final String cmdLine) throws
|
||||||
Exception {
|
Exception {
|
||||||
String[] cmds = StringUtils.split(cmdLine, ' ');
|
String[] cmds = StringUtils.split(cmdLine, ' ');
|
||||||
org.apache.hadoop.hdfs.tools.DiskBalancer db =
|
DiskBalancerCLI db = new DiskBalancerCLI(conf);
|
||||||
new org.apache.hadoop.hdfs.tools.DiskBalancer(conf);
|
|
||||||
|
|
||||||
ByteArrayOutputStream bufOut = new ByteArrayOutputStream();
|
ByteArrayOutputStream bufOut = new ByteArrayOutputStream();
|
||||||
PrintStream out = new PrintStream(bufOut);
|
PrintStream out = new PrintStream(bufOut);
|
||||||
|
|
|
@ -237,7 +237,7 @@ public class TestOfflineImageViewer {
|
||||||
File truncatedFile = new File(tempDir, "truncatedFsImage");
|
File truncatedFile = new File(tempDir, "truncatedFsImage");
|
||||||
PrintStream output = new PrintStream(NullOutputStream.NULL_OUTPUT_STREAM);
|
PrintStream output = new PrintStream(NullOutputStream.NULL_OUTPUT_STREAM);
|
||||||
copyPartOfFile(originalFsimage, truncatedFile);
|
copyPartOfFile(originalFsimage, truncatedFile);
|
||||||
new FileDistributionCalculator(new Configuration(), 0, 0, output)
|
new FileDistributionCalculator(new Configuration(), 0, 0, false, output)
|
||||||
.visit(new RandomAccessFile(truncatedFile, "r"));
|
.visit(new RandomAccessFile(truncatedFile, "r"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -259,7 +259,7 @@ public class TestOfflineImageViewer {
|
||||||
public void testFileDistributionCalculator() throws IOException {
|
public void testFileDistributionCalculator() throws IOException {
|
||||||
ByteArrayOutputStream output = new ByteArrayOutputStream();
|
ByteArrayOutputStream output = new ByteArrayOutputStream();
|
||||||
PrintStream o = new PrintStream(output);
|
PrintStream o = new PrintStream(output);
|
||||||
new FileDistributionCalculator(new Configuration(), 0, 0, o)
|
new FileDistributionCalculator(new Configuration(), 0, 0, false, o)
|
||||||
.visit(new RandomAccessFile(originalFsimage, "r"));
|
.visit(new RandomAccessFile(originalFsimage, "r"));
|
||||||
o.close();
|
o.close();
|
||||||
|
|
||||||
|
@ -620,4 +620,24 @@ public class TestOfflineImageViewer {
|
||||||
IOUtils.closeStream(out);
|
IOUtils.closeStream(out);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testOfflineImageViewerWithFormatOption() throws Exception {
|
||||||
|
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
|
||||||
|
final PrintStream out = new PrintStream(bytes);
|
||||||
|
final PrintStream oldOut = System.out;
|
||||||
|
try {
|
||||||
|
System.setOut(out);
|
||||||
|
int status =
|
||||||
|
OfflineImageViewerPB.run(new String[] {"-i",
|
||||||
|
originalFsimage.getAbsolutePath(), "-o", "-", "-p",
|
||||||
|
"FileDistribution", "-maxSize", "512", "-step", "8",
|
||||||
|
"-format"});
|
||||||
|
assertEquals(0, status);
|
||||||
|
Assert.assertTrue(bytes.toString().contains("(0 B, 8 B]"));
|
||||||
|
} finally {
|
||||||
|
System.setOut(oldOut);
|
||||||
|
IOUtils.closeStream(out);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,6 +36,7 @@ import org.junit.Rule;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.rules.Timeout;
|
import org.junit.rules.Timeout;
|
||||||
|
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
|
@ -242,7 +243,8 @@ public class TestStripedBlockUtil {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testDivideByteRangeIntoStripes() {
|
public void testDivideByteRangeIntoStripes() {
|
||||||
byte[] assembled = new byte[BLK_GROUP_STRIPE_NUM * FULL_STRIPE_SIZE];
|
ByteBuffer assembled =
|
||||||
|
ByteBuffer.allocate(BLK_GROUP_STRIPE_NUM * FULL_STRIPE_SIZE);
|
||||||
for (int bgSize : blockGroupSizes) {
|
for (int bgSize : blockGroupSizes) {
|
||||||
LocatedStripedBlock blockGroup = createDummyLocatedBlock(bgSize);
|
LocatedStripedBlock blockGroup = createDummyLocatedBlock(bgSize);
|
||||||
byte[][] internalBlkBufs = createInternalBlkBuffers(bgSize);
|
byte[][] internalBlkBufs = createInternalBlkBuffers(bgSize);
|
||||||
|
@ -252,7 +254,7 @@ public class TestStripedBlockUtil {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
AlignedStripe[] stripes = divideByteRangeIntoStripes(EC_POLICY,
|
AlignedStripe[] stripes = divideByteRangeIntoStripes(EC_POLICY,
|
||||||
CELLSIZE, blockGroup, brStart, brStart + brSize - 1, assembled, 0);
|
CELLSIZE, blockGroup, brStart, brStart + brSize - 1, assembled);
|
||||||
|
|
||||||
for (AlignedStripe stripe : stripes) {
|
for (AlignedStripe stripe : stripes) {
|
||||||
for (int i = 0; i < DATA_BLK_NUM; i++) {
|
for (int i = 0; i < DATA_BLK_NUM; i++) {
|
||||||
|
@ -261,21 +263,21 @@ public class TestStripedBlockUtil {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
int done = 0;
|
int done = 0;
|
||||||
for (int j = 0; j < chunk.byteArray.getLengths().length; j++) {
|
int len;
|
||||||
System.arraycopy(internalBlkBufs[i],
|
for (ByteBuffer slice : chunk.getChunkBuffer().getSlices()) {
|
||||||
(int) stripe.getOffsetInBlock() + done, assembled,
|
len = slice.remaining();
|
||||||
chunk.byteArray.getOffsets()[j],
|
slice.put(internalBlkBufs[i],
|
||||||
chunk.byteArray.getLengths()[j]);
|
(int) stripe.getOffsetInBlock() + done, len);
|
||||||
done += chunk.byteArray.getLengths()[j];
|
done += len;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (int i = 0; i < brSize; i++) {
|
for (int i = 0; i < brSize; i++) {
|
||||||
if (hashIntToByte(brStart + i) != assembled[i]) {
|
if (hashIntToByte(brStart + i) != assembled.get(i)) {
|
||||||
System.out.println("Oops");
|
System.out.println("Oops");
|
||||||
}
|
}
|
||||||
assertEquals("Byte at " + (brStart + i) + " should be the same",
|
assertEquals("Byte at " + (brStart + i) + " should be the same",
|
||||||
hashIntToByte(brStart + i), assembled[i]);
|
hashIntToByte(brStart + i), assembled.get(i));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||||
import org.apache.hadoop.ipc.RPC;
|
import org.apache.hadoop.ipc.RPC;
|
||||||
|
import org.apache.hadoop.ipc.WritableRpcEngine;
|
||||||
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
|
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
import org.apache.hadoop.security.Groups;
|
import org.apache.hadoop.security.Groups;
|
||||||
|
@ -97,6 +98,8 @@ public class HSAdminServer extends AbstractService implements HSAdminProtocol {
|
||||||
BlockingService refreshHSAdminProtocolService = HSAdminRefreshProtocolService
|
BlockingService refreshHSAdminProtocolService = HSAdminRefreshProtocolService
|
||||||
.newReflectiveBlockingService(refreshHSAdminProtocolXlator);
|
.newReflectiveBlockingService(refreshHSAdminProtocolXlator);
|
||||||
|
|
||||||
|
WritableRpcEngine.ensureInitialized();
|
||||||
|
|
||||||
clientRpcAddress = conf.getSocketAddr(
|
clientRpcAddress = conf.getSocketAddr(
|
||||||
JHAdminConfig.MR_HISTORY_BIND_HOST,
|
JHAdminConfig.MR_HISTORY_BIND_HOST,
|
||||||
JHAdminConfig.JHS_ADMIN_ADDRESS,
|
JHAdminConfig.JHS_ADMIN_ADDRESS,
|
||||||
|
|
Loading…
Reference in New Issue