Merge branch 'trunk' into HDFS-7240

This commit is contained in:
Anu Engineer 2016-09-08 19:34:51 -07:00
commit 0f73cd7ff6
53 changed files with 1899 additions and 677 deletions

View File

@ -19,6 +19,9 @@
package org.apache.hadoop.fs;
import java.io.IOException;
import java.io.InvalidObjectException;
import java.io.ObjectInputValidation;
import java.io.Serializable;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.regex.Pattern;
@ -37,7 +40,7 @@ import org.apache.hadoop.conf.Configuration;
@Stringable
@InterfaceAudience.Public
@InterfaceStability.Stable
public class Path implements Comparable {
public class Path implements Comparable, Serializable, ObjectInputValidation {
/**
* The directory separator, a slash.
@ -66,6 +69,8 @@ public class Path implements Comparable {
private static final Pattern HAS_DRIVE_LETTER_SPECIFIER =
Pattern.compile("^/?[a-zA-Z]:");
private static final long serialVersionUID = 0xad00f;
private URI uri; // a hierarchical uri
/**
@ -565,4 +570,17 @@ public class Path implements Comparable {
}
return new Path(newUri);
}
/**
* Validate the contents of a deserialized Path, so as
* to defend against malicious object streams.
* @throws InvalidObjectException if there's no URI
*/
@Override
public void validateObject() throws InvalidObjectException {
if (uri == null) {
throw new InvalidObjectException("No URI in deserialized Path");
}
}
}

View File

@ -60,7 +60,7 @@ public class ProtobufRpcEngine implements RpcEngine {
private static final ThreadLocal<AsyncGet<Message, Exception>>
ASYNC_RETURN_MESSAGE = new ThreadLocal<>();
static { // Register the rpcRequest deserializer for ProtobufRpcEngine
static { // Register the rpcRequest deserializer for WritableRpcEngine
org.apache.hadoop.ipc.Server.registerProtocolEngine(
RPC.RpcKind.RPC_PROTOCOL_BUFFER, RpcProtobufRequest.class,
new Server.ProtoBufRpcInvoker());
@ -194,8 +194,7 @@ public class ProtobufRpcEngine implements RpcEngine {
}
if (args.length != 2) { // RpcController + Message
throw new ServiceException(
"Too many or few parameters for request. Method: ["
throw new ServiceException("Too many parameters for request. Method: ["
+ method.getName() + "]" + ", Expected: 2, Actual: "
+ args.length);
}

View File

@ -18,8 +18,6 @@
package org.apache.hadoop.ipc;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.lang.reflect.Field;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Proxy;
@ -28,6 +26,7 @@ import java.net.ConnectException;
import java.net.InetSocketAddress;
import java.net.NoRouteToHostException;
import java.net.SocketTimeoutException;
import java.io.*;
import java.io.Closeable;
import java.util.ArrayList;
import java.util.Arrays;
@ -38,12 +37,11 @@ import java.util.concurrent.atomic.AtomicBoolean;
import javax.net.SocketFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.*;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.*;
import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.ipc.Client.ConnectionId;
import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolInfoService;
@ -56,6 +54,7 @@ import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.Time;
@ -88,7 +87,7 @@ public class RPC {
RPC_WRITABLE ((short) 2), // Use WritableRpcEngine
RPC_PROTOCOL_BUFFER ((short) 3); // Use ProtobufRpcEngine
final static short MAX_INDEX = RPC_PROTOCOL_BUFFER.value; // used for array size
private final short value;
public final short value; //TODO make it private
RpcKind(short val) {
this.value = val;
@ -208,7 +207,7 @@ public class RPC {
RpcEngine engine = PROTOCOL_ENGINES.get(protocol);
if (engine == null) {
Class<?> impl = conf.getClass(ENGINE_PROP+"."+protocol.getName(),
ProtobufRpcEngine.class);
WritableRpcEngine.class);
engine = (RpcEngine)ReflectionUtils.newInstance(impl, conf);
PROTOCOL_ENGINES.put(protocol, engine);
}

View File

@ -237,14 +237,14 @@ public abstract class Server {
static class RpcKindMapValue {
final Class<? extends Writable> rpcRequestWrapperClass;
final RpcInvoker rpcInvoker;
RpcKindMapValue (Class<? extends Writable> rpcRequestWrapperClass,
RpcInvoker rpcInvoker) {
this.rpcInvoker = rpcInvoker;
this.rpcRequestWrapperClass = rpcRequestWrapperClass;
}
}
static Map<RPC.RpcKind, RpcKindMapValue> rpcKindMap = new HashMap<>(4);
static Map<RPC.RpcKind, RpcKindMapValue> rpcKindMap = new
HashMap<RPC.RpcKind, RpcKindMapValue>(4);

View File

@ -730,7 +730,7 @@ public class UserGroupInformation {
*
* @param user The principal name to load from the ticket
* cache
* @param ticketCache the path to the ticket cache file
* @param ticketCachePath the path to the ticket cache file
*
* @throws IOException if the kerberos login fails
*/
@ -790,7 +790,7 @@ public class UserGroupInformation {
/**
* Create a UserGroupInformation from a Subject with Kerberos principal.
*
* @param subject The KerberosPrincipal to use in UGI
* @param user The KerberosPrincipal to use in UGI
*
* @throws IOException if the kerberos login fails
*/

View File

@ -17,22 +17,33 @@
*/
package org.apache.hadoop.util;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import com.google.common.annotations.VisibleForTesting;
/**
* This is a wrap class of a ReentrantLock. Extending AutoCloseable
* interface such that the users can use a try-with-resource syntax.
*/
public class AutoCloseableLock implements AutoCloseable {
private final ReentrantLock lock;
private final Lock lock;
/**
* Creates an instance of {@code AutoCloseableLock}, initializes
* the underlying {@code ReentrantLock} object.
* the underlying lock instance with a new {@code ReentrantLock}.
*/
public AutoCloseableLock() {
this.lock = new ReentrantLock();
this(new ReentrantLock());
}
/**
* Wrap provided Lock instance.
* @param lock Lock instance to wrap in AutoCloseable API.
*/
public AutoCloseableLock(Lock lock) {
this.lock = lock;
}
/**
@ -86,7 +97,7 @@ public class AutoCloseableLock implements AutoCloseable {
/**
* A wrapper method that makes a call to {@code tryLock()} of
* the underlying {@code ReentrantLock} object.
* the underlying {@code Lock} object.
*
* If the lock is not held by another thread, acquires the lock, set the
* hold count to one and returns {@code true}.
@ -116,7 +127,12 @@ public class AutoCloseableLock implements AutoCloseable {
* @return {@code true} if any thread holds this lock and
* {@code false} otherwise
*/
public boolean isLocked() {
return lock.isLocked();
@VisibleForTesting
boolean isLocked() {
if (lock instanceof ReentrantLock) {
return ((ReentrantLock)lock).isLocked();
}
throw new UnsupportedOperationException();
}
}

View File

@ -304,7 +304,7 @@ public class DataChecksum implements Checksum {
bytesPerChecksum, checksums.array(), crcsOffset, fileName, basePos);
return;
}
if (NativeCrc32.isAvailable()) {
if (NativeCrc32.isAvailable() && data.isDirect()) {
NativeCrc32.verifyChunkedSums(bytesPerChecksum, type.id, checksums, data,
fileName, basePos);
} else {

View File

@ -106,7 +106,6 @@ public class NodeHealthScriptRunner extends AbstractService {
shexec.execute();
} catch (ExitCodeException e) {
// ignore the exit code of the script
exceptionStackTrace = StringUtils.stringifyException(e);
status = HealthCheckerExitStatus.FAILED_WITH_EXIT_CODE;
// On Windows, we will not hit the Stream closed IOException
// thrown by stdout buffered reader for timeout event.
@ -163,7 +162,7 @@ public class NodeHealthScriptRunner extends AbstractService {
setHealthStatus(false, exceptionStackTrace);
break;
case FAILED_WITH_EXIT_CODE:
setHealthStatus(false, exceptionStackTrace);
setHealthStatus(true, "", now);
break;
case FAILED:
setHealthStatus(false, shexec.getOutput());

View File

@ -17,9 +17,14 @@
*/
package org.apache.hadoop.fs;
import org.junit.Assert;
import org.junit.Test;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Arrays;
@ -506,4 +511,19 @@ public class TestPath {
assertFalse(Path.isWindowsAbsolutePath("C:test", false));
assertFalse(Path.isWindowsAbsolutePath("/C:test", true));
}
@Test(timeout = 30000)
public void testSerDeser() throws Throwable {
Path source = new Path("hdfs://localhost:4040/scratch");
ByteArrayOutputStream baos = new ByteArrayOutputStream(256);
try(ObjectOutputStream oos = new ObjectOutputStream(baos)) {
oos.writeObject(source);
}
ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
try (ObjectInputStream ois = new ObjectInputStream(bais)) {
Path deser = (Path) ois.readObject();
Assert.assertEquals(source, deser);
}
}
}

View File

@ -17,8 +17,13 @@
*/
package org.apache.hadoop.ipc;
import com.google.common.base.Joiner;
import com.google.protobuf.BlockingService;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.lang.management.ThreadMXBean;
import java.net.InetSocketAddress;
import java.security.PrivilegedExceptionAction;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
@ -29,6 +34,7 @@ import org.apache.commons.cli.ParseException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ipc.RPC.Server;
import org.apache.hadoop.ipc.TestRPC.TestProtocol;
import org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto;
import org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto;
import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcProto;
@ -39,12 +45,8 @@ import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.lang.management.ThreadMXBean;
import java.net.InetSocketAddress;
import java.security.PrivilegedExceptionAction;
import java.util.concurrent.atomic.AtomicLong;
import com.google.common.base.Joiner;
import com.google.protobuf.BlockingService;
/**
* Benchmark for protobuf RPC.
@ -66,7 +68,7 @@ public class RPCCallBenchmark extends TestRpcBase implements Tool {
public int secondsToRun = 15;
private int msgSize = 1024;
public Class<? extends RpcEngine> rpcEngine =
ProtobufRpcEngine.class;
WritableRpcEngine.class;
private MyOptions(String args[]) {
try {
@ -133,7 +135,7 @@ public class RPCCallBenchmark extends TestRpcBase implements Tool {
opts.addOption(
OptionBuilder.withLongOpt("engine").hasArg(true)
.withArgName("protobuf")
.withArgName("writable|protobuf")
.withDescription("engine to use")
.create('e'));
@ -182,6 +184,8 @@ public class RPCCallBenchmark extends TestRpcBase implements Tool {
String eng = line.getOptionValue('e');
if ("protobuf".equals(eng)) {
rpcEngine = ProtobufRpcEngine.class;
} else if ("writable".equals(eng)) {
rpcEngine = WritableRpcEngine.class;
} else {
throw new ParseException("invalid engine: " + eng);
}
@ -233,6 +237,11 @@ public class RPCCallBenchmark extends TestRpcBase implements Tool {
server = new RPC.Builder(conf).setProtocol(TestRpcService.class)
.setInstance(service).setBindAddress(opts.host).setPort(opts.getPort())
.setNumHandlers(opts.serverThreads).setVerbose(false).build();
} else if (opts.rpcEngine == WritableRpcEngine.class) {
server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestRPC.TestImpl()).setBindAddress(opts.host)
.setPort(opts.getPort()).setNumHandlers(opts.serverThreads)
.setVerbose(false).build();
} else {
throw new RuntimeException("Bad engine: " + opts.rpcEngine);
}
@ -390,6 +399,15 @@ public class RPCCallBenchmark extends TestRpcBase implements Tool {
return responseProto.getMessage();
}
};
} else if (opts.rpcEngine == WritableRpcEngine.class) {
final TestProtocol proxy = RPC.getProxy(
TestProtocol.class, TestProtocol.versionID, addr, conf);
return new RpcServiceWrapper() {
@Override
public String doEcho(String msg) throws Exception {
return proxy.echo(msg);
}
};
} else {
throw new RuntimeException("unsupported engine: " + opts.rpcEngine);
}

View File

@ -17,28 +17,252 @@
*/
package org.apache.hadoop.ipc;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.junit.After;
import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcProto;
import org.apache.hadoop.net.NetUtils;
import org.junit.Before;
import org.junit.After;
import org.junit.Test;
import com.google.protobuf.BlockingService;
public class TestMultipleProtocolServer extends TestRpcBase {
private static InetSocketAddress addr;
private static RPC.Server server;
@Before
public void setUp() throws Exception {
super.setupConf();
server = setupTestServer(conf, 2);
private static Configuration conf = new Configuration();
@ProtocolInfo(protocolName="Foo")
interface Foo0 extends VersionedProtocol {
public static final long versionID = 0L;
String ping() throws IOException;
}
@ProtocolInfo(protocolName="Foo")
interface Foo1 extends VersionedProtocol {
public static final long versionID = 1L;
String ping() throws IOException;
String ping2() throws IOException;
}
@ProtocolInfo(protocolName="Foo")
interface FooUnimplemented extends VersionedProtocol {
public static final long versionID = 2L;
String ping() throws IOException;
}
interface Mixin extends VersionedProtocol{
public static final long versionID = 0L;
void hello() throws IOException;
}
interface Bar extends Mixin {
public static final long versionID = 0L;
int echo(int i) throws IOException;
}
class Foo0Impl implements Foo0 {
@Override
public long getProtocolVersion(String protocol, long clientVersion)
throws IOException {
return Foo0.versionID;
}
@SuppressWarnings("unchecked")
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
Class<? extends VersionedProtocol> inter;
try {
inter = (Class<? extends VersionedProtocol>)getClass().
getGenericInterfaces()[0];
} catch (Exception e) {
throw new IOException(e);
}
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
getProtocolVersion(protocol, clientVersion), inter);
}
@Override
public String ping() {
return "Foo0";
}
}
class Foo1Impl implements Foo1 {
@Override
public long getProtocolVersion(String protocol, long clientVersion)
throws IOException {
return Foo1.versionID;
}
@SuppressWarnings("unchecked")
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
Class<? extends VersionedProtocol> inter;
try {
inter = (Class<? extends VersionedProtocol>)getClass().
getGenericInterfaces()[0];
} catch (Exception e) {
throw new IOException(e);
}
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
getProtocolVersion(protocol, clientVersion), inter);
}
@Override
public String ping() {
return "Foo1";
}
@Override
public String ping2() {
return "Foo1";
}
}
class BarImpl implements Bar {
@Override
public long getProtocolVersion(String protocol, long clientVersion)
throws IOException {
return Bar.versionID;
}
@SuppressWarnings("unchecked")
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
Class<? extends VersionedProtocol> inter;
try {
inter = (Class<? extends VersionedProtocol>)getClass().
getGenericInterfaces()[0];
} catch (Exception e) {
throw new IOException(e);
}
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
getProtocolVersion(protocol, clientVersion), inter);
}
@Override
public int echo(int i) {
return i;
}
@Override
public void hello() {
}
}
@Before
public void setUp() throws Exception {
// create a server with two handlers
server = new RPC.Builder(conf).setProtocol(Foo0.class)
.setInstance(new Foo0Impl()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(2).setVerbose(false).build();
server.addProtocol(RPC.RpcKind.RPC_WRITABLE, Foo1.class, new Foo1Impl());
server.addProtocol(RPC.RpcKind.RPC_WRITABLE, Bar.class, new BarImpl());
server.addProtocol(RPC.RpcKind.RPC_WRITABLE, Mixin.class, new BarImpl());
// Add Protobuf server
// Create server side implementation
PBServerImpl pbServerImpl = new PBServerImpl();
BlockingService service = TestProtobufRpcProto
.newReflectiveBlockingService(pbServerImpl);
server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, TestRpcService.class,
service);
server.start();
addr = NetUtils.getConnectAddress(server);
}
@After
public void tearDown() throws Exception {
server.stop();
}
@Test
public void test1() throws IOException {
ProtocolProxy<?> proxy;
proxy = RPC.getProtocolProxy(Foo0.class, Foo0.versionID, addr, conf);
Foo0 foo0 = (Foo0)proxy.getProxy();
Assert.assertEquals("Foo0", foo0.ping());
proxy = RPC.getProtocolProxy(Foo1.class, Foo1.versionID, addr, conf);
Foo1 foo1 = (Foo1)proxy.getProxy();
Assert.assertEquals("Foo1", foo1.ping());
Assert.assertEquals("Foo1", foo1.ping());
proxy = RPC.getProtocolProxy(Bar.class, Foo1.versionID, addr, conf);
Bar bar = (Bar)proxy.getProxy();
Assert.assertEquals(99, bar.echo(99));
// Now test Mixin class method
Mixin mixin = bar;
mixin.hello();
}
// Server does not implement the FooUnimplemented version of protocol Foo.
// See that calls to it fail.
@Test(expected=IOException.class)
public void testNonExistingProtocol() throws IOException {
ProtocolProxy<?> proxy;
proxy = RPC.getProtocolProxy(FooUnimplemented.class,
FooUnimplemented.versionID, addr, conf);
FooUnimplemented foo = (FooUnimplemented)proxy.getProxy();
foo.ping();
}
/**
* getProtocolVersion of an unimplemented version should return highest version
* Similarly getProtocolSignature should work.
* @throws IOException
*/
@Test
public void testNonExistingProtocol2() throws IOException {
ProtocolProxy<?> proxy;
proxy = RPC.getProtocolProxy(FooUnimplemented.class,
FooUnimplemented.versionID, addr, conf);
FooUnimplemented foo = (FooUnimplemented)proxy.getProxy();
Assert.assertEquals(Foo1.versionID,
foo.getProtocolVersion(RPC.getProtocolName(FooUnimplemented.class),
FooUnimplemented.versionID));
foo.getProtocolSignature(RPC.getProtocolName(FooUnimplemented.class),
FooUnimplemented.versionID, 0);
}
@Test(expected=IOException.class)
public void testIncorrectServerCreation() throws IOException {
new RPC.Builder(conf).setProtocol(Foo1.class).setInstance(new Foo0Impl())
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false)
.build();
}
// Now test a PB service - a server hosts both PB and Writable Rpcs.
@Test
public void testPBService() throws Exception {

View File

@ -25,6 +25,19 @@ import org.junit.Test;
public class TestRPCCallBenchmark {
@Test(timeout=20000)
public void testBenchmarkWithWritable() throws Exception {
int rc = ToolRunner.run(new RPCCallBenchmark(),
new String[] {
"--clientThreads", "30",
"--serverThreads", "30",
"--time", "5",
"--serverReaderThreads", "4",
"--messageSize", "1024",
"--engine", "writable"});
assertEquals(0, rc);
}
@Test(timeout=20000)
public void testBenchmarkWithProto() throws Exception {
int rc = ToolRunner.run(new RPCCallBenchmark(),

View File

@ -18,19 +18,27 @@
package org.apache.hadoop.ipc;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.lang.reflect.Method;
import java.net.InetSocketAddress;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureRequestProto;
import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureResponseProto;
import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolSignatureProto;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcErrorCodeProto;
import org.apache.hadoop.net.NetUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/** Unit test for supporting method-name based compatible RPCs. */
public class TestRPCCompatibility {
@ -41,7 +49,7 @@ public class TestRPCCompatibility {
public static final Log LOG =
LogFactory.getLog(TestRPCCompatibility.class);
private static Configuration conf = new Configuration();
public interface TestProtocol0 extends VersionedProtocol {
@ -112,21 +120,6 @@ public class TestRPCCompatibility {
@Before
public void setUp() {
ProtocolSignature.resetCache();
RPC.setProtocolEngine(conf,
TestProtocol0.class, ProtobufRpcEngine.class);
RPC.setProtocolEngine(conf,
TestProtocol1.class, ProtobufRpcEngine.class);
RPC.setProtocolEngine(conf,
TestProtocol2.class, ProtobufRpcEngine.class);
RPC.setProtocolEngine(conf,
TestProtocol3.class, ProtobufRpcEngine.class);
RPC.setProtocolEngine(conf,
TestProtocol4.class, ProtobufRpcEngine.class);
}
@After
@ -140,7 +133,117 @@ public class TestRPCCompatibility {
server = null;
}
}
@Test // old client vs new server
public void testVersion0ClientVersion1Server() throws Exception {
// create a server with two handlers
TestImpl1 impl = new TestImpl1();
server = new RPC.Builder(conf).setProtocol(TestProtocol1.class)
.setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2)
.setVerbose(false).build();
server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
server.start();
addr = NetUtils.getConnectAddress(server);
proxy = RPC.getProtocolProxy(
TestProtocol0.class, TestProtocol0.versionID, addr, conf);
TestProtocol0 proxy0 = (TestProtocol0)proxy.getProxy();
proxy0.ping();
}
@Test // old client vs new server
public void testVersion1ClientVersion0Server() throws Exception {
// create a server with two handlers
server = new RPC.Builder(conf).setProtocol(TestProtocol0.class)
.setInstance(new TestImpl0()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(2).setVerbose(false).build();
server.start();
addr = NetUtils.getConnectAddress(server);
proxy = RPC.getProtocolProxy(
TestProtocol1.class, TestProtocol1.versionID, addr, conf);
TestProtocol1 proxy1 = (TestProtocol1)proxy.getProxy();
proxy1.ping();
try {
proxy1.echo("hello");
fail("Echo should fail");
} catch(IOException e) {
}
}
private class Version2Client {
private TestProtocol2 proxy2;
private ProtocolProxy<TestProtocol2> serverInfo;
private Version2Client() throws IOException {
serverInfo = RPC.getProtocolProxy(
TestProtocol2.class, TestProtocol2.versionID, addr, conf);
proxy2 = serverInfo.getProxy();
}
public int echo(int value) throws IOException, NumberFormatException {
if (serverInfo.isMethodSupported("echo", int.class)) {
System.out.println("echo int is supported");
return -value; // use version 3 echo long
} else { // server is version 2
System.out.println("echo int is NOT supported");
return Integer.parseInt(proxy2.echo(String.valueOf(value)));
}
}
public String echo(String value) throws IOException {
return proxy2.echo(value);
}
public void ping() throws IOException {
proxy2.ping();
}
}
@Test // Compatible new client & old server
public void testVersion2ClientVersion1Server() throws Exception {
// create a server with two handlers
TestImpl1 impl = new TestImpl1();
server = new RPC.Builder(conf).setProtocol(TestProtocol1.class)
.setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2)
.setVerbose(false).build();
server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
server.start();
addr = NetUtils.getConnectAddress(server);
Version2Client client = new Version2Client();
client.ping();
assertEquals("hello", client.echo("hello"));
// echo(int) is not supported by server, so returning 3
// This verifies that echo(int) and echo(String)'s hash codes are different
assertEquals(3, client.echo(3));
}
@Test // equal version client and server
public void testVersion2ClientVersion2Server() throws Exception {
// create a server with two handlers
TestImpl2 impl = new TestImpl2();
server = new RPC.Builder(conf).setProtocol(TestProtocol2.class)
.setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2)
.setVerbose(false).build();
server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
server.start();
addr = NetUtils.getConnectAddress(server);
Version2Client client = new Version2Client();
client.ping();
assertEquals("hello", client.echo("hello"));
// now that echo(int) is supported by the server, echo(int) should return -3
assertEquals(-3, client.echo(3));
}
public interface TestProtocol3 {
int echo(String value);
int echo(int value);
@ -194,4 +297,97 @@ public class TestRPCCompatibility {
@Override
int echo(int value) throws IOException;
}
@Test
public void testVersionMismatch() throws IOException {
server = new RPC.Builder(conf).setProtocol(TestProtocol2.class)
.setInstance(new TestImpl2()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(2).setVerbose(false).build();
server.start();
addr = NetUtils.getConnectAddress(server);
TestProtocol4 proxy = RPC.getProxy(TestProtocol4.class,
TestProtocol4.versionID, addr, conf);
try {
proxy.echo(21);
fail("The call must throw VersionMismatch exception");
} catch (RemoteException ex) {
Assert.assertEquals(RPC.VersionMismatch.class.getName(),
ex.getClassName());
Assert.assertTrue(ex.getErrorCode().equals(
RpcErrorCodeProto.ERROR_RPC_VERSION_MISMATCH));
} catch (IOException ex) {
fail("Expected version mismatch but got " + ex);
}
}
@Test
public void testIsMethodSupported() throws IOException {
server = new RPC.Builder(conf).setProtocol(TestProtocol2.class)
.setInstance(new TestImpl2()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(2).setVerbose(false).build();
server.start();
addr = NetUtils.getConnectAddress(server);
TestProtocol2 proxy = RPC.getProxy(TestProtocol2.class,
TestProtocol2.versionID, addr, conf);
boolean supported = RpcClientUtil.isMethodSupported(proxy,
TestProtocol2.class, RPC.RpcKind.RPC_WRITABLE,
RPC.getProtocolVersion(TestProtocol2.class), "echo");
Assert.assertTrue(supported);
supported = RpcClientUtil.isMethodSupported(proxy,
TestProtocol2.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
RPC.getProtocolVersion(TestProtocol2.class), "echo");
Assert.assertFalse(supported);
}
/**
* Verify that ProtocolMetaInfoServerSideTranslatorPB correctly looks up
* the server registry to extract protocol signatures and versions.
*/
@Test
public void testProtocolMetaInfoSSTranslatorPB() throws Exception {
TestImpl1 impl = new TestImpl1();
server = new RPC.Builder(conf).setProtocol(TestProtocol1.class)
.setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2)
.setVerbose(false).build();
server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
server.start();
ProtocolMetaInfoServerSideTranslatorPB xlator =
new ProtocolMetaInfoServerSideTranslatorPB(server);
GetProtocolSignatureResponseProto resp = xlator.getProtocolSignature(
null,
createGetProtocolSigRequestProto(TestProtocol1.class,
RPC.RpcKind.RPC_PROTOCOL_BUFFER));
//No signatures should be found
Assert.assertEquals(0, resp.getProtocolSignatureCount());
resp = xlator.getProtocolSignature(
null,
createGetProtocolSigRequestProto(TestProtocol1.class,
RPC.RpcKind.RPC_WRITABLE));
Assert.assertEquals(1, resp.getProtocolSignatureCount());
ProtocolSignatureProto sig = resp.getProtocolSignatureList().get(0);
Assert.assertEquals(TestProtocol1.versionID, sig.getVersion());
boolean found = false;
int expected = ProtocolSignature.getFingerprint(TestProtocol1.class
.getMethod("echo", String.class));
for (int m : sig.getMethodsList()) {
if (expected == m) {
found = true;
break;
}
}
Assert.assertTrue(found);
}
private GetProtocolSignatureRequestProto createGetProtocolSigRequestProto(
Class<?> protocol, RPC.RpcKind rpcKind) {
GetProtocolSignatureRequestProto.Builder builder =
GetProtocolSignatureRequestProto.newBuilder();
builder.setProtocol(protocol.getName());
builder.setRpcKind(rpcKind.toString());
return builder.build();
}
}

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.ipc;
import org.apache.hadoop.conf.Configuration;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.*;
import org.apache.hadoop.ipc.TestRPC.TestProtocol;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
@ -28,13 +30,11 @@ import java.net.ConnectException;
import java.net.InetSocketAddress;
import java.nio.channels.ClosedByInterruptException;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY;
/**
* tests that the proxy can be interrupted
*/
public class TestRPCWaitForProxy extends TestRpcBase {
public class TestRPCWaitForProxy extends Assert {
private static final String ADDRESS = "0.0.0.0";
private static final Logger
LOG = LoggerFactory.getLogger(TestRPCWaitForProxy.class);
@ -46,15 +46,14 @@ public class TestRPCWaitForProxy extends TestRpcBase {
*
* @throws Throwable any exception other than that which was expected
*/
@Test(timeout = 50000)
@Test(timeout = 10000)
public void testWaitForProxy() throws Throwable {
RpcThread worker = new RpcThread(0);
worker.start();
worker.join();
Throwable caught = worker.getCaught();
Throwable cause = caught.getCause();
Assert.assertNotNull("No exception was raised", cause);
if (!(cause instanceof ConnectException)) {
assertNotNull("No exception was raised", caught);
if (!(caught instanceof ConnectException)) {
throw caught;
}
}
@ -70,11 +69,11 @@ public class TestRPCWaitForProxy extends TestRpcBase {
RpcThread worker = new RpcThread(100);
worker.start();
Thread.sleep(1000);
Assert.assertTrue("worker hasn't started", worker.waitStarted);
assertTrue("worker hasn't started", worker.waitStarted);
worker.interrupt();
worker.join();
Throwable caught = worker.getCaught();
Assert.assertNotNull("No exception was raised", caught);
assertNotNull("No exception was raised", caught);
// looking for the root cause here, which can be wrapped
// as part of the NetUtils work. Having this test look
// a the type of exception there would be brittle to improvements
@ -83,8 +82,6 @@ public class TestRPCWaitForProxy extends TestRpcBase {
if (cause == null) {
// no inner cause, use outer exception as root cause.
cause = caught;
} else if (cause.getCause() != null) {
cause = cause.getCause();
}
if (!(cause instanceof InterruptedIOException)
&& !(cause instanceof ClosedByInterruptException)) {
@ -115,16 +112,12 @@ public class TestRPCWaitForProxy extends TestRpcBase {
IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
connectRetries);
waitStarted = true;
short invalidPort = 20;
InetSocketAddress invalidAddress = new InetSocketAddress(ADDRESS,
invalidPort);
TestRpcBase.TestRpcService proxy = RPC.getProxy(
TestRpcBase.TestRpcService.class,
1L, invalidAddress, conf);
// Test echo method
proxy.echo(null, newEchoRequest("hello"));
TestProtocol proxy = RPC.waitForProxy(TestProtocol.class,
TestProtocol.versionID,
new InetSocketAddress(ADDRESS, 20),
config,
15000L);
proxy.echo("");
} catch (Throwable throwable) {
caught = throwable;
}

View File

@ -112,8 +112,7 @@ public class TestRpcBase {
return setupTestServer(builder);
}
protected static RPC.Server setupTestServer(
RPC.Builder builder) throws IOException {
protected static RPC.Server setupTestServer(RPC.Builder builder) throws IOException {
RPC.Server server = builder.build();
server.start();
@ -176,21 +175,17 @@ public class TestRpcBase {
public TestTokenIdentifier() {
this(new Text(), new Text());
}
public TestTokenIdentifier(Text tokenid) {
this(tokenid, new Text());
}
public TestTokenIdentifier(Text tokenid, Text realUser) {
this.tokenid = tokenid == null ? new Text() : tokenid;
this.realUser = realUser == null ? new Text() : realUser;
}
@Override
public Text getKind() {
return KIND_NAME;
}
@Override
public UserGroupInformation getUser() {
if (realUser.toString().isEmpty()) {
@ -208,7 +203,6 @@ public class TestRpcBase {
tokenid.readFields(in);
realUser.readFields(in);
}
@Override
public void write(DataOutput out) throws IOException {
tokenid.write(out);
@ -240,7 +234,7 @@ public class TestRpcBase {
@SuppressWarnings("unchecked")
@Override
public Token<TestTokenIdentifier> selectToken(Text service,
Collection<Token<? extends TokenIdentifier>> tokens) {
Collection<Token<? extends TokenIdentifier>> tokens) {
if (service == null) {
return null;
}
@ -394,17 +388,19 @@ public class TestRpcBase {
}
@Override
public TestProtos.UserResponseProto getAuthUser(
public TestProtos.AuthUserResponseProto getAuthUser(
RpcController controller, TestProtos.EmptyRequestProto request)
throws ServiceException {
UserGroupInformation authUser;
UserGroupInformation authUser = null;
try {
authUser = UserGroupInformation.getCurrentUser();
} catch (IOException e) {
throw new ServiceException(e);
}
return newUserResponse(authUser.getUserName());
return TestProtos.AuthUserResponseProto.newBuilder()
.setAuthUser(authUser.getUserName())
.build();
}
@Override
@ -436,34 +432,6 @@ public class TestRpcBase {
return TestProtos.EmptyResponseProto.newBuilder().build();
}
@Override
public TestProtos.UserResponseProto getCurrentUser(
RpcController controller,
TestProtos.EmptyRequestProto request) throws ServiceException {
String user;
try {
user = UserGroupInformation.getCurrentUser().toString();
} catch (IOException e) {
throw new ServiceException("Failed to get current user", e);
}
return newUserResponse(user);
}
@Override
public TestProtos.UserResponseProto getServerRemoteUser(
RpcController controller,
TestProtos.EmptyRequestProto request) throws ServiceException {
String serverRemoteUser = Server.getRemoteUser().toString();
return newUserResponse(serverRemoteUser);
}
private TestProtos.UserResponseProto newUserResponse(String user) {
return TestProtos.UserResponseProto.newBuilder()
.setUser(user)
.build();
}
}
protected static TestProtos.EmptyRequestProto newEmptyRequest() {
@ -510,4 +478,8 @@ public class TestRpcBase {
}
return null;
}
protected static String convert(TestProtos.AuthUserResponseProto response) {
return response.getAuthUser();
}
}

View File

@ -45,55 +45,30 @@ import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.CallbackHandler;
import javax.security.auth.callback.NameCallback;
import javax.security.auth.callback.PasswordCallback;
import javax.security.auth.callback.UnsupportedCallbackException;
import javax.security.sasl.AuthorizeCallback;
import javax.security.sasl.Sasl;
import javax.security.sasl.SaslClient;
import javax.security.sasl.SaslException;
import javax.security.sasl.SaslServer;
import javax.security.auth.callback.*;
import javax.security.sasl.*;
import java.io.IOException;
import java.lang.annotation.Annotation;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.security.PrivilegedExceptionAction;
import java.security.Security;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.regex.Pattern;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.KERBEROS;
import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.SIMPLE;
import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.TOKEN;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.*;
import static org.junit.Assert.*;
/** Unit tests for using Sasl over RPC. */
@RunWith(Parameterized.class)
public class TestSaslRPC extends TestRpcBase {
@Parameters
public static Collection<Object[]> data() {
Collection<Object[]> params = new ArrayList<>();
Collection<Object[]> params = new ArrayList<Object[]>();
for (QualityOfProtection qop : QualityOfProtection.values()) {
params.add(new Object[]{ new QualityOfProtection[]{qop},qop, null });
}
@ -139,7 +114,7 @@ public class TestSaslRPC extends TestRpcBase {
NONE(),
VALID(),
INVALID(),
OTHER()
OTHER();
}
@BeforeClass
@ -255,7 +230,7 @@ public class TestSaslRPC extends TestRpcBase {
final Server server = setupTestServer(conf, 5, sm);
doDigestRpc(server, sm);
} finally {
SecurityUtil.setSecurityInfoProviders();
SecurityUtil.setSecurityInfoProviders(new SecurityInfo[0]);
}
}
@ -284,7 +259,7 @@ public class TestSaslRPC extends TestRpcBase {
addr = NetUtils.getConnectAddress(server);
TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
.getUserName()));
Token<TestTokenIdentifier> token = new Token<>(tokenId, sm);
Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId, sm);
SecurityUtil.setTokenService(token, addr);
current.addToken(token);
@ -321,8 +296,8 @@ public class TestSaslRPC extends TestRpcBase {
// set doPing to true
newConf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, true);
ConnectionId remoteId = ConnectionId.getConnectionId(
new InetSocketAddress(0), TestRpcService.class, null, 0, null, newConf);
ConnectionId remoteId = ConnectionId.getConnectionId(new InetSocketAddress(0),
TestRpcService.class, null, 0, null, newConf);
assertEquals(CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT,
remoteId.getPingInterval());
// set doPing to false
@ -831,13 +806,13 @@ public class TestSaslRPC extends TestRpcBase {
final TestTokenSecretManager sm = new TestTokenSecretManager();
boolean useSecretManager = (serverAuth != SIMPLE);
if (enableSecretManager != null) {
useSecretManager &= enableSecretManager;
useSecretManager &= enableSecretManager.booleanValue();
}
if (forceSecretManager != null) {
useSecretManager |= forceSecretManager;
useSecretManager |= forceSecretManager.booleanValue();
}
final SecretManager<?> serverSm = useSecretManager ? sm : null;
Server server = serverUgi.doAs(new PrivilegedExceptionAction<Server>() {
@Override
public Server run() throws IOException {
@ -892,13 +867,13 @@ public class TestSaslRPC extends TestRpcBase {
proxy.ping(null, newEmptyRequest());
// make sure the other side thinks we are who we said we are!!!
assertEquals(clientUgi.getUserName(),
proxy.getAuthUser(null, newEmptyRequest()).getUser());
convert(proxy.getAuthUser(null, newEmptyRequest())));
AuthMethod authMethod =
convert(proxy.getAuthMethod(null, newEmptyRequest()));
// verify sasl completed with correct QOP
assertEquals((authMethod != SIMPLE) ? expectedQop.saslQop : null,
RPC.getConnectionIdForProxy(proxy).getSaslQop());
return authMethod != null ? authMethod.toString() : null;
RPC.getConnectionIdForProxy(proxy).getSaslQop());
return authMethod.toString();
} catch (ServiceException se) {
if (se.getCause() instanceof RemoteException) {
throw (RemoteException) se.getCause();
@ -923,18 +898,21 @@ public class TestSaslRPC extends TestRpcBase {
String actual) {
assertEquals(expect.toString(), actual);
}
private static void assertAuthEquals(Pattern expect, String actual) {
private static void assertAuthEquals(Pattern expect,
String actual) {
// this allows us to see the regexp and the value it didn't match
if (!expect.matcher(actual).matches()) {
fail(); // it failed
assertEquals(expect, actual); // it failed
} else {
assertTrue(true); // it matched
}
}
/*
* Class used to test overriding QOP values using SaslPropertiesResolver
*/
static class AuthSaslPropertiesResolver extends SaslPropertiesResolver {
static class AuthSaslPropertiesResolver extends SaslPropertiesResolver{
@Override
public Map<String, String> getServerProperties(InetAddress address) {
@ -943,7 +921,7 @@ public class TestSaslRPC extends TestRpcBase {
return newPropertes;
}
}
public static void main(String[] args) throws Exception {
System.out.println("Testing Kerberos authentication over RPC");
if (args.length != 2) {

View File

@ -17,35 +17,40 @@
*/
package org.apache.hadoop.security;
import com.google.protobuf.ServiceException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ipc.TestRpcBase;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.security.token.Token;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.NetworkInterface;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Enumeration;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenInfo;
import org.junit.Before;
import org.junit.Test;
import org.apache.hadoop.ipc.TestRpcBase.TestTokenSecretManager;
import org.apache.hadoop.ipc.TestRpcBase.TestTokenIdentifier;
import org.apache.hadoop.ipc.TestRpcBase.TestTokenSelector;
import org.apache.commons.logging.*;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
/**
* Test do as effective user.
*
*/
public class TestDoAsEffectiveUser extends TestRpcBase {
public class TestDoAsEffectiveUser {
final private static String REAL_USER_NAME = "realUser1@HADOOP.APACHE.ORG";
final private static String REAL_USER_SHORT_NAME = "realUser1";
final private static String PROXY_USER_NAME = "proxyUser";
@ -53,8 +58,8 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
final private static String GROUP2_NAME = "group2";
final private static String[] GROUP_NAMES = new String[] { GROUP1_NAME,
GROUP2_NAME };
private TestRpcService client;
private static final String ADDRESS = "0.0.0.0";
private TestProtocol proxy;
private static final Configuration masterConf = new Configuration();
@ -77,7 +82,7 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
private void configureSuperUserIPAddresses(Configuration conf,
String superUserShortName) throws IOException {
ArrayList<String> ipList = new ArrayList<>();
ArrayList<String> ipList = new ArrayList<String>();
Enumeration<NetworkInterface> netInterfaceList = NetworkInterface
.getNetworkInterfaces();
while (netInterfaceList.hasMoreElements()) {
@ -125,19 +130,50 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
curUGI.toString());
}
private void checkRemoteUgi(final UserGroupInformation ugi,
final Configuration conf) throws Exception {
@TokenInfo(TestTokenSelector.class)
public interface TestProtocol extends VersionedProtocol {
public static final long versionID = 1L;
String aMethod() throws IOException;
String getServerRemoteUser() throws IOException;
}
public class TestImpl implements TestProtocol {
@Override
public String aMethod() throws IOException {
return UserGroupInformation.getCurrentUser().toString();
}
@Override
public String getServerRemoteUser() throws IOException {
return Server.getRemoteUser().toString();
}
@Override
public long getProtocolVersion(String protocol, long clientVersion)
throws IOException {
return TestProtocol.versionID;
}
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
return new ProtocolSignature(TestProtocol.versionID, null);
}
}
private void checkRemoteUgi(final Server server,
final UserGroupInformation ugi, final Configuration conf)
throws Exception {
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws ServiceException {
client = getClient(addr, conf);
String currentUser = client.getCurrentUser(null,
newEmptyRequest()).getUser();
String serverRemoteUser = client.getServerRemoteUser(null,
newEmptyRequest()).getUser();
Assert.assertEquals(ugi.toString(), currentUser);
Assert.assertEquals(ugi.toString(), serverRemoteUser);
public Void run() throws IOException {
proxy = RPC.getProxy(
TestProtocol.class, TestProtocol.versionID,
NetUtils.getConnectAddress(server), conf);
Assert.assertEquals(ugi.toString(), proxy.aMethod());
Assert.assertEquals(ugi.toString(), proxy.getServerRemoteUser());
return null;
}
});
@ -149,27 +185,29 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1");
configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
// Set RPC engine to protobuf RPC engine
RPC.setProtocolEngine(conf, TestRpcService.class,
ProtobufRpcEngine.class);
UserGroupInformation.setConfiguration(conf);
final Server server = setupTestServer(conf, 5);
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(5).setVerbose(true).build();
refreshConf(conf);
try {
server.start();
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(REAL_USER_NAME);
checkRemoteUgi(realUserUgi, conf);
checkRemoteUgi(server, realUserUgi, conf);
UserGroupInformation proxyUserUgi =
UserGroupInformation.createProxyUserForTesting(
UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
checkRemoteUgi(proxyUserUgi, conf);
checkRemoteUgi(server, proxyUserUgi, conf);
} catch (Exception e) {
e.printStackTrace();
Assert.fail();
} finally {
stop(server, client);
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
@ -180,25 +218,29 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
"group1");
RPC.setProtocolEngine(conf, TestRpcService.class,
ProtobufRpcEngine.class);
UserGroupInformation.setConfiguration(conf);
final Server server = setupTestServer(conf, 5);
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(2).setVerbose(false).build();
refreshConf(conf);
try {
server.start();
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(REAL_USER_NAME);
checkRemoteUgi(realUserUgi, conf);
checkRemoteUgi(server, realUserUgi, conf);
UserGroupInformation proxyUserUgi = UserGroupInformation
.createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
checkRemoteUgi(proxyUserUgi, conf);
checkRemoteUgi(server, proxyUserUgi, conf);
} catch (Exception e) {
e.printStackTrace();
Assert.fail();
} finally {
stop(server, client);
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
@ -214,14 +256,17 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
"group1");
RPC.setProtocolEngine(conf, TestRpcService.class,
ProtobufRpcEngine.class);
UserGroupInformation.setConfiguration(conf);
final Server server = setupTestServer(conf, 5);
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(2).setVerbose(false).build();
refreshConf(conf);
try {
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(REAL_USER_NAME);
@ -230,10 +275,11 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
String retVal = proxyUserUgi
.doAs(new PrivilegedExceptionAction<String>() {
@Override
public String run() throws ServiceException {
client = getClient(addr, conf);
return client.getCurrentUser(null,
newEmptyRequest()).getUser();
public String run() throws IOException {
proxy = RPC.getProxy(TestProtocol.class,
TestProtocol.versionID, addr, conf);
String ret = proxy.aMethod();
return ret;
}
});
@ -241,7 +287,10 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
} catch (Exception e) {
e.printStackTrace();
} finally {
stop(server, client);
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
@ -250,14 +299,17 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
final Configuration conf = new Configuration();
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1");
RPC.setProtocolEngine(conf, TestRpcService.class,
ProtobufRpcEngine.class);
UserGroupInformation.setConfiguration(conf);
final Server server = setupTestServer(conf, 2);
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(2).setVerbose(false).build();
refreshConf(conf);
try {
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(REAL_USER_NAME);
@ -266,10 +318,11 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
String retVal = proxyUserUgi
.doAs(new PrivilegedExceptionAction<String>() {
@Override
public String run() throws ServiceException {
client = getClient(addr, conf);
return client.getCurrentUser(null,
newEmptyRequest()).getUser();
public String run() throws IOException {
proxy = RPC.getProxy(TestProtocol.class,
TestProtocol.versionID, addr, conf);
String ret = proxy.aMethod();
return ret;
}
});
@ -277,7 +330,10 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
} catch (Exception e) {
e.printStackTrace();
} finally {
stop(server, client);
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
@ -285,12 +341,15 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
public void testRealUserGroupNotSpecified() throws IOException {
final Configuration conf = new Configuration();
configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
RPC.setProtocolEngine(conf, TestRpcService.class,
ProtobufRpcEngine.class);
UserGroupInformation.setConfiguration(conf);
final Server server = setupTestServer(conf, 2);
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(2).setVerbose(false).build();
try {
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(REAL_USER_NAME);
@ -299,10 +358,11 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
String retVal = proxyUserUgi
.doAs(new PrivilegedExceptionAction<String>() {
@Override
public String run() throws ServiceException {
client = getClient(addr, conf);
return client.getCurrentUser(null,
newEmptyRequest()).getUser();
public String run() throws IOException {
proxy = (TestProtocol) RPC.getProxy(TestProtocol.class,
TestProtocol.versionID, addr, conf);
String ret = proxy.aMethod();
return ret;
}
});
@ -310,7 +370,10 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
} catch (Exception e) {
e.printStackTrace();
} finally {
stop(server, client);
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
@ -321,14 +384,17 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
"group3");
RPC.setProtocolEngine(conf, TestRpcService.class,
ProtobufRpcEngine.class);
UserGroupInformation.setConfiguration(conf);
final Server server = setupTestServer(conf, 2);
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(2).setVerbose(false).build();
refreshConf(conf);
try {
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(REAL_USER_NAME);
@ -337,10 +403,11 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
String retVal = proxyUserUgi
.doAs(new PrivilegedExceptionAction<String>() {
@Override
public String run() throws ServiceException {
client = getClient(addr, conf);
return client.getCurrentUser(null,
newEmptyRequest()).getUser();
public String run() throws IOException {
proxy = RPC.getProxy(TestProtocol.class,
TestProtocol.versionID, addr, conf);
String ret = proxy.aMethod();
return ret;
}
});
@ -348,7 +415,10 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
} catch (Exception e) {
e.printStackTrace();
} finally {
stop(server, client);
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
@ -362,17 +432,20 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
final Configuration conf = new Configuration(masterConf);
TestTokenSecretManager sm = new TestTokenSecretManager();
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
RPC.setProtocolEngine(conf, TestRpcService.class,
ProtobufRpcEngine.class);
UserGroupInformation.setConfiguration(conf);
final Server server = setupTestServer(conf, 5, sm);
final Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(5).setVerbose(true).setSecretManager(sm).build();
server.start();
final UserGroupInformation current = UserGroupInformation
.createRemoteUser(REAL_USER_NAME);
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
.getUserName()), new Text("SomeSuperUser"));
Token<TestTokenIdentifier> token = new Token<>(tokenId,
Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId,
sm);
SecurityUtil.setTokenService(token, addr);
UserGroupInformation proxyUserUgi = UserGroupInformation
@ -380,19 +453,23 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
proxyUserUgi.addToken(token);
refreshConf(conf);
String retVal = proxyUserUgi.doAs(new PrivilegedExceptionAction<String>() {
@Override
public String run() throws Exception {
try {
client = getClient(addr, conf);
return client.getCurrentUser(null,
newEmptyRequest()).getUser();
proxy = RPC.getProxy(TestProtocol.class,
TestProtocol.versionID, addr, conf);
String ret = proxy.aMethod();
return ret;
} catch (Exception e) {
e.printStackTrace();
throw e;
} finally {
stop(server, client);
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
});
@ -409,34 +486,42 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
TestTokenSecretManager sm = new TestTokenSecretManager();
final Configuration newConf = new Configuration(masterConf);
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, newConf);
// Set RPC engine to protobuf RPC engine
RPC.setProtocolEngine(newConf, TestRpcService.class,
ProtobufRpcEngine.class);
UserGroupInformation.setConfiguration(newConf);
final Server server = setupTestServer(newConf, 5, sm);
final Server server = new RPC.Builder(newConf)
.setProtocol(TestProtocol.class).setInstance(new TestImpl())
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
.setSecretManager(sm).build();
server.start();
final UserGroupInformation current = UserGroupInformation
.createUserForTesting(REAL_USER_NAME, GROUP_NAMES);
refreshConf(newConf);
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
.getUserName()), new Text("SomeSuperUser"));
Token<TestTokenIdentifier> token = new Token<>(tokenId, sm);
Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId,
sm);
SecurityUtil.setTokenService(token, addr);
current.addToken(token);
String retVal = current.doAs(new PrivilegedExceptionAction<String>() {
@Override
public String run() throws Exception {
try {
client = getClient(addr, newConf);
return client.getCurrentUser(null,
newEmptyRequest()).getUser();
proxy = RPC.getProxy(TestProtocol.class,
TestProtocol.versionID, addr, newConf);
String ret = proxy.aMethod();
return ret;
} catch (Exception e) {
e.printStackTrace();
throw e;
} finally {
stop(server, client);
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
});

View File

@ -20,7 +20,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.TestRpcBase.TestTokenIdentifier;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
@ -29,11 +28,7 @@ import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.StringUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.*;
import javax.security.auth.Subject;
import javax.security.auth.kerberos.KerberosPrincipal;
@ -55,22 +50,9 @@ import java.util.Set;
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.assertCounterGt;
import static org.apache.hadoop.test.MetricsAsserts.assertGaugeGt;
import static org.apache.hadoop.test.MetricsAsserts.assertQuantileGauges;
import static org.apache.hadoop.test.MetricsAsserts.getDoubleGauge;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.apache.hadoop.ipc.TestSaslRPC.*;
import static org.apache.hadoop.test.MetricsAsserts.*;
import static org.junit.Assert.*;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@ -127,7 +109,7 @@ public class TestUserGroupInformation {
UserGroupInformation.setLoginUser(null);
}
@Test(timeout = 30000)
@Test (timeout = 30000)
public void testSimpleLogin() throws IOException {
tryLoginAuthenticationMethod(AuthenticationMethod.SIMPLE, true);
}

View File

@ -91,7 +91,6 @@ public class TestNodeHealthScriptRunner {
public void testNodeHealthScript() throws Exception {
String errorScript = "echo ERROR\n echo \"Tracker not healthy\"";
String normalScript = "echo \"I am all fine\"";
String failWithExitCodeScript = "echo \"Not healthy\"; exit -1";
String timeOutScript =
Shell.WINDOWS ? "@echo off\nping -n 4 127.0.0.1 >nul\necho \"I am fine\""
: "sleep 4\necho \"I am fine\"";
@ -125,12 +124,6 @@ public class TestNodeHealthScriptRunner {
nodeHealthScriptRunner.isHealthy());
Assert.assertEquals("", nodeHealthScriptRunner.getHealthReport());
// Script which fails with exit code.
writeNodeHealthScriptFile(failWithExitCodeScript, true);
timerTask.run();
Assert.assertFalse("Node health status reported healthy",
nodeHealthScriptRunner.isHealthy());
// Timeout script.
writeNodeHealthScriptFile(timeOutScript, true);
timerTask.run();

View File

@ -88,6 +88,6 @@ message AuthMethodResponseProto {
required string mechanismName = 2;
}
message UserResponseProto {
required string user = 1;
message AuthUserResponseProto {
required string authUser = 1;
}

View File

@ -40,11 +40,9 @@ service TestProtobufRpcProto {
rpc exchange(ExchangeRequestProto) returns (ExchangeResponseProto);
rpc sleep(SleepRequestProto) returns (EmptyResponseProto);
rpc getAuthMethod(EmptyRequestProto) returns (AuthMethodResponseProto);
rpc getAuthUser(EmptyRequestProto) returns (UserResponseProto);
rpc getAuthUser(EmptyRequestProto) returns (AuthUserResponseProto);
rpc echoPostponed(EchoRequestProto) returns (EchoResponseProto);
rpc sendPostponed(EmptyRequestProto) returns (EmptyResponseProto);
rpc getCurrentUser(EmptyRequestProto) returns (UserResponseProto);
rpc getServerRemoteUser(EmptyRequestProto) returns (UserResponseProto);
}
service TestProtobufRpc2Proto {

View File

@ -533,7 +533,8 @@ public class DFSInputStream extends FSInputStream
* Open a DataInputStream to a DataNode so that it can be read from.
* We get block ID and the IDs of the destinations at startup, from the namenode.
*/
private synchronized DatanodeInfo blockSeekTo(long target) throws IOException {
private synchronized DatanodeInfo blockSeekTo(long target)
throws IOException {
if (target >= getFileLength()) {
throw new IOException("Attempted to read past end of file");
}
@ -962,14 +963,14 @@ public class DFSInputStream extends FSInputStream
}
protected void fetchBlockByteRange(LocatedBlock block, long start, long end,
byte[] buf, int offset, CorruptedBlocks corruptedBlocks)
ByteBuffer buf, CorruptedBlocks corruptedBlocks)
throws IOException {
block = refreshLocatedBlock(block);
while (true) {
DNAddrPair addressPair = chooseDataNode(block, null);
try {
actualGetFromOneDataNode(addressPair, block, start, end,
buf, offset, corruptedBlocks);
buf, corruptedBlocks);
return;
} catch (IOException e) {
checkInterrupted(e); // check if the read has been interrupted
@ -988,12 +989,10 @@ public class DFSInputStream extends FSInputStream
return new Callable<ByteBuffer>() {
@Override
public ByteBuffer call() throws Exception {
byte[] buf = bb.array();
int offset = bb.position();
try (TraceScope ignored = dfsClient.getTracer().
newScope("hedgedRead" + hedgedReadId, parentSpanId)) {
actualGetFromOneDataNode(datanode, block, start, end, buf,
offset, corruptedBlocks);
actualGetFromOneDataNode(datanode, block, start, end, bb,
corruptedBlocks);
return bb;
}
}
@ -1007,13 +1006,12 @@ public class DFSInputStream extends FSInputStream
* @param block the located block containing the requested data
* @param startInBlk the startInBlk offset of the block
* @param endInBlk the endInBlk offset of the block
* @param buf the given byte array into which the data is read
* @param offset the offset in buf
* @param buf the given byte buffer into which the data is read
* @param corruptedBlocks map recording list of datanodes with corrupted
* block replica
*/
void actualGetFromOneDataNode(final DNAddrPair datanode, LocatedBlock block,
final long startInBlk, final long endInBlk, byte[] buf, int offset,
final long startInBlk, final long endInBlk, ByteBuffer buf,
CorruptedBlocks corruptedBlocks)
throws IOException {
DFSClientFaultInjector.get().startFetchFromDatanode();
@ -1031,7 +1029,22 @@ public class DFSInputStream extends FSInputStream
DFSClientFaultInjector.get().fetchFromDatanodeException();
reader = getBlockReader(block, startInBlk, len, datanode.addr,
datanode.storageType, datanode.info);
int nread = reader.readAll(buf, offset, len);
//Behave exactly as the readAll() call
ByteBuffer tmp = buf.duplicate();
tmp.limit(tmp.position() + len);
tmp = tmp.slice();
int nread = 0;
int ret;
while (true) {
ret = reader.read(tmp);
if (ret <= 0) {
break;
}
nread += ret;
}
buf.position(buf.position() + nread);
IOUtilsClient.updateReadStatistics(readStatistics, nread, reader);
dfsClient.updateFileSystemReadStats(
reader.getNetworkDistance(), nread);
@ -1098,7 +1111,7 @@ public class DFSInputStream extends FSInputStream
* time. We then wait on which ever read returns first.
*/
private void hedgedFetchBlockByteRange(LocatedBlock block, long start,
long end, byte[] buf, int offset, CorruptedBlocks corruptedBlocks)
long end, ByteBuffer buf, CorruptedBlocks corruptedBlocks)
throws IOException {
final DfsClientConf conf = dfsClient.getConf();
ArrayList<Future<ByteBuffer>> futures = new ArrayList<>();
@ -1130,8 +1143,8 @@ public class DFSInputStream extends FSInputStream
conf.getHedgedReadThresholdMillis(), TimeUnit.MILLISECONDS);
if (future != null) {
ByteBuffer result = future.get();
System.arraycopy(result.array(), result.position(), buf, offset,
len);
result.flip();
buf.put(result);
return;
}
DFSClient.LOG.debug("Waited {}ms to read from {}; spawning hedged "
@ -1173,8 +1186,8 @@ public class DFSInputStream extends FSInputStream
// cancel the rest.
cancelAll(futures);
dfsClient.getHedgedReadMetrics().incHedgedReadWins();
System.arraycopy(result.array(), result.position(), buf, offset,
len);
result.flip();
buf.put(result);
return;
} catch (InterruptedException ie) {
// Ignore and retry
@ -1244,7 +1257,8 @@ public class DFSInputStream extends FSInputStream
* access key from its memory since it's considered expired based on
* the estimated expiration date.
*/
if (ex instanceof InvalidBlockTokenException || ex instanceof InvalidToken) {
if (ex instanceof InvalidBlockTokenException ||
ex instanceof InvalidToken) {
DFSClient.LOG.info("Access token was invalid when connecting to "
+ targetAddr + " : " + ex);
return true;
@ -1272,7 +1286,8 @@ public class DFSInputStream extends FSInputStream
try (TraceScope scope = dfsClient.
newReaderTraceScope("DFSInputStream#byteArrayPread",
src, position, length)) {
int retLen = pread(position, buffer, offset, length);
ByteBuffer bb = ByteBuffer.wrap(buffer, offset, length);
int retLen = pread(position, bb);
if (retLen < length) {
dfsClient.addRetLenToReaderScope(scope, retLen);
}
@ -1280,7 +1295,7 @@ public class DFSInputStream extends FSInputStream
}
}
private int pread(long position, byte[] buffer, int offset, int length)
private int pread(long position, ByteBuffer buffer)
throws IOException {
// sanity checks
dfsClient.checkOpen();
@ -1292,6 +1307,7 @@ public class DFSInputStream extends FSInputStream
if ((position < 0) || (position >= filelen)) {
return -1;
}
int length = buffer.remaining();
int realLen = length;
if ((position + length) > filelen) {
realLen = (int)(filelen - position);
@ -1304,14 +1320,16 @@ public class DFSInputStream extends FSInputStream
CorruptedBlocks corruptedBlocks = new CorruptedBlocks();
for (LocatedBlock blk : blockRange) {
long targetStart = position - blk.getStartOffset();
long bytesToRead = Math.min(remaining, blk.getBlockSize() - targetStart);
int bytesToRead = (int) Math.min(remaining,
blk.getBlockSize() - targetStart);
long targetEnd = targetStart + bytesToRead - 1;
try {
if (dfsClient.isHedgedReadsEnabled() && !blk.isStriped()) {
hedgedFetchBlockByteRange(blk, targetStart,
targetStart + bytesToRead - 1, buffer, offset, corruptedBlocks);
targetEnd, buffer, corruptedBlocks);
} else {
fetchBlockByteRange(blk, targetStart, targetStart + bytesToRead - 1,
buffer, offset, corruptedBlocks);
fetchBlockByteRange(blk, targetStart, targetEnd,
buffer, corruptedBlocks);
}
} finally {
// Check and report if any block replicas are corrupted.
@ -1323,7 +1341,6 @@ public class DFSInputStream extends FSInputStream
remaining -= bytesToRead;
position += bytesToRead;
offset += bytesToRead;
}
assert remaining == 0 : "Wrong number of bytes read.";
return realLen;
@ -1457,7 +1474,8 @@ public class DFSInputStream extends FSInputStream
* If another node could not be found, then returns false.
*/
@Override
public synchronized boolean seekToNewSource(long targetPos) throws IOException {
public synchronized boolean seekToNewSource(long targetPos)
throws IOException {
if (currentNode == null) {
return seekToBlockSource(targetPos);
}

View File

@ -307,8 +307,8 @@ public class DFSStripedInputStream extends DFSInputStream {
stripeLimit - stripeBufOffset);
LocatedStripedBlock blockGroup = (LocatedStripedBlock) currentLocatedBlock;
AlignedStripe[] stripes = StripedBlockUtil.divideOneStripe(ecPolicy, cellSize,
blockGroup, offsetInBlockGroup,
AlignedStripe[] stripes = StripedBlockUtil.divideOneStripe(ecPolicy,
cellSize, blockGroup, offsetInBlockGroup,
offsetInBlockGroup + stripeRange.length - 1, curStripeBuf);
final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(
blockGroup, cellSize, dataBlkNum, parityBlkNum);
@ -523,13 +523,13 @@ public class DFSStripedInputStream extends DFSInputStream {
*/
@Override
protected void fetchBlockByteRange(LocatedBlock block, long start,
long end, byte[] buf, int offset, CorruptedBlocks corruptedBlocks)
long end, ByteBuffer buf, CorruptedBlocks corruptedBlocks)
throws IOException {
// Refresh the striped block group
LocatedStripedBlock blockGroup = getBlockGroupAt(block.getStartOffset());
AlignedStripe[] stripes = StripedBlockUtil.divideByteRangeIntoStripes(
ecPolicy, cellSize, blockGroup, start, end, buf, offset);
ecPolicy, cellSize, blockGroup, start, end, buf);
CompletionService<Void> readService = new ExecutorCompletionService<>(
dfsClient.getStripedReadsThreadPool());
final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(
@ -542,6 +542,7 @@ public class DFSStripedInputStream extends DFSInputStream {
blks, preaderInfos, corruptedBlocks);
preader.readStripe();
}
buf.position(buf.position() + (int)(end - start + 1));
} finally {
for (BlockReaderInfo preaderInfo : preaderInfos) {
closeReader(preaderInfo);
@ -698,16 +699,15 @@ public class DFSStripedInputStream extends DFSInputStream {
}
private ByteBufferStrategy[] getReadStrategies(StripingChunk chunk) {
if (chunk.byteBuffer != null) {
ByteBufferStrategy strategy =
new ByteBufferStrategy(chunk.byteBuffer, readStatistics, dfsClient);
if (chunk.useByteBuffer()) {
ByteBufferStrategy strategy = new ByteBufferStrategy(
chunk.getByteBuffer(), readStatistics, dfsClient);
return new ByteBufferStrategy[]{strategy};
} else {
ByteBufferStrategy[] strategies =
new ByteBufferStrategy[chunk.byteArray.getOffsets().length];
new ByteBufferStrategy[chunk.getChunkBuffer().getSlices().size()];
for (int i = 0; i < strategies.length; i++) {
ByteBuffer buffer = ByteBuffer.wrap(chunk.byteArray.buf(),
chunk.byteArray.getOffsets()[i], chunk.byteArray.getLengths()[i]);
ByteBuffer buffer = chunk.getChunkBuffer().getSlice(i);
strategies[i] =
new ByteBufferStrategy(buffer, readStatistics, dfsClient);
}
@ -814,7 +814,7 @@ public class DFSStripedInputStream extends DFSInputStream {
}
class PositionStripeReader extends StripeReader {
private byte[][] decodeInputs = null;
private ByteBuffer[] decodeInputs = null;
PositionStripeReader(CompletionService<Void> service,
AlignedStripe alignedStripe, LocatedBlock[] targetBlocks,
@ -836,8 +836,6 @@ public class DFSStripedInputStream extends DFSInputStream {
Preconditions.checkState(index >= dataBlkNum &&
alignedStripe.chunks[index] == null);
alignedStripe.chunks[index] = new StripingChunk(decodeInputs[index]);
alignedStripe.chunks[index].addByteArraySlice(0,
(int) alignedStripe.getSpanInBlock());
return true;
}

View File

@ -73,7 +73,8 @@ import java.util.concurrent.TimeUnit;
@InterfaceAudience.Private
public class StripedBlockUtil {
public static final Logger LOG = LoggerFactory.getLogger(StripedBlockUtil.class);
public static final Logger LOG =
LoggerFactory.getLogger(StripedBlockUtil.class);
/**
* Parses a striped block group into individual blocks.
@ -312,16 +313,17 @@ public class StripedBlockUtil {
* schedule a new fetch request with the decoding input buffer as transfer
* destination.
*/
public static byte[][] initDecodeInputs(AlignedStripe alignedStripe,
public static ByteBuffer[] initDecodeInputs(AlignedStripe alignedStripe,
int dataBlkNum, int parityBlkNum) {
byte[][] decodeInputs =
new byte[dataBlkNum + parityBlkNum][(int) alignedStripe.getSpanInBlock()];
ByteBuffer[] decodeInputs = new ByteBuffer[dataBlkNum + parityBlkNum];
for (int i = 0; i < decodeInputs.length; i++) {
decodeInputs[i] = ByteBuffer.allocate(
(int) alignedStripe.getSpanInBlock());
}
// read the full data aligned stripe
for (int i = 0; i < dataBlkNum; i++) {
if (alignedStripe.chunks[i] == null) {
alignedStripe.chunks[i] = new StripingChunk(decodeInputs[i]);
alignedStripe.chunks[i].addByteArraySlice(0,
(int) alignedStripe.getSpanInBlock());
}
}
return decodeInputs;
@ -334,14 +336,21 @@ public class StripedBlockUtil {
* When all pending requests have returned, this method should be called to
* finalize decode input buffers.
*/
public static void finalizeDecodeInputs(final byte[][] decodeInputs,
public static void finalizeDecodeInputs(final ByteBuffer[] decodeInputs,
AlignedStripe alignedStripe) {
for (int i = 0; i < alignedStripe.chunks.length; i++) {
final StripingChunk chunk = alignedStripe.chunks[i];
if (chunk != null && chunk.state == StripingChunk.FETCHED) {
chunk.copyTo(decodeInputs[i]);
if (chunk.useChunkBuffer()) {
chunk.getChunkBuffer().copyTo(decodeInputs[i]);
} else {
chunk.getByteBuffer().flip();
}
} else if (chunk != null && chunk.state == StripingChunk.ALLZERO) {
Arrays.fill(decodeInputs[i], (byte) 0);
//ZERO it. Will be better handled in other following issue.
byte[] emptyBytes = new byte[decodeInputs[i].limit()];
decodeInputs[i].put(emptyBytes);
decodeInputs[i].flip();
} else {
decodeInputs[i] = null;
}
@ -351,7 +360,7 @@ public class StripedBlockUtil {
/**
* Decode based on the given input buffers and erasure coding policy.
*/
public static void decodeAndFillBuffer(final byte[][] decodeInputs,
public static void decodeAndFillBuffer(final ByteBuffer[] decodeInputs,
AlignedStripe alignedStripe, int dataBlkNum, int parityBlkNum,
RawErasureDecoder decoder) {
// Step 1: prepare indices and output buffers for missing data units
@ -364,8 +373,11 @@ public class StripedBlockUtil {
}
}
decodeIndices = Arrays.copyOf(decodeIndices, pos);
byte[][] decodeOutputs =
new byte[decodeIndices.length][(int) alignedStripe.getSpanInBlock()];
ByteBuffer[] decodeOutputs = new ByteBuffer[decodeIndices.length];
for (int i = 0; i < decodeOutputs.length; i++) {
decodeOutputs[i] = ByteBuffer.allocate(
(int) alignedStripe.getSpanInBlock());
}
// Step 2: decode into prepared output buffers
decoder.decode(decodeInputs, decodeIndices, decodeOutputs);
@ -374,8 +386,8 @@ public class StripedBlockUtil {
for (int i = 0; i < decodeIndices.length; i++) {
int missingBlkIdx = decodeIndices[i];
StripingChunk chunk = alignedStripe.chunks[missingBlkIdx];
if (chunk.state == StripingChunk.MISSING) {
chunk.copyFrom(decodeOutputs[i]);
if (chunk.state == StripingChunk.MISSING && chunk.useChunkBuffer()) {
chunk.getChunkBuffer().copyFrom(decodeOutputs[i]);
}
}
}
@ -402,7 +414,8 @@ public class StripedBlockUtil {
// Step 4: calculate each chunk's position in destination buffer. Since the
// whole read range is within a single stripe, the logic is simpler here.
int bufOffset = (int) (rangeStartInBlockGroup % ((long) cellSize * dataBlkNum));
int bufOffset =
(int) (rangeStartInBlockGroup % ((long) cellSize * dataBlkNum));
for (StripingCell cell : cells) {
long cellStart = cell.idxInInternalBlk * cellSize + cell.offset;
long cellEnd = cellStart + cell.size - 1;
@ -437,15 +450,14 @@ public class StripedBlockUtil {
* @param rangeStartInBlockGroup The byte range's start offset in block group
* @param rangeEndInBlockGroup The byte range's end offset in block group
* @param buf Destination buffer of the read operation for the byte range
* @param offsetInBuf Start offset into the destination buffer
*
* At most 5 stripes will be generated from each logical range, as
* demonstrated in the header of {@link AlignedStripe}.
*/
public static AlignedStripe[] divideByteRangeIntoStripes(ErasureCodingPolicy ecPolicy,
public static AlignedStripe[] divideByteRangeIntoStripes(
ErasureCodingPolicy ecPolicy,
int cellSize, LocatedStripedBlock blockGroup,
long rangeStartInBlockGroup, long rangeEndInBlockGroup, byte[] buf,
int offsetInBuf) {
long rangeStartInBlockGroup, long rangeEndInBlockGroup, ByteBuffer buf) {
// Step 0: analyze range and calculate basic parameters
final int dataBlkNum = ecPolicy.getNumDataUnits();
@ -462,7 +474,7 @@ public class StripedBlockUtil {
AlignedStripe[] stripes = mergeRangesForInternalBlocks(ecPolicy, ranges);
// Step 4: calculate each chunk's position in destination buffer
calcualteChunkPositionsInBuf(cellSize, stripes, cells, buf, offsetInBuf);
calcualteChunkPositionsInBuf(cellSize, stripes, cells, buf);
// Step 5: prepare ALLZERO blocks
prepareAllZeroChunks(blockGroup, stripes, cellSize, dataBlkNum);
@ -476,7 +488,8 @@ public class StripedBlockUtil {
* used by {@link DFSStripedOutputStream} in encoding
*/
@VisibleForTesting
private static StripingCell[] getStripingCellsOfByteRange(ErasureCodingPolicy ecPolicy,
private static StripingCell[] getStripingCellsOfByteRange(
ErasureCodingPolicy ecPolicy,
int cellSize, LocatedStripedBlock blockGroup,
long rangeStartInBlockGroup, long rangeEndInBlockGroup) {
Preconditions.checkArgument(
@ -511,7 +524,8 @@ public class StripedBlockUtil {
* the physical byte range (inclusive) on each stored internal block.
*/
@VisibleForTesting
private static VerticalRange[] getRangesForInternalBlocks(ErasureCodingPolicy ecPolicy,
private static VerticalRange[] getRangesForInternalBlocks(
ErasureCodingPolicy ecPolicy,
int cellSize, StripingCell[] cells) {
int dataBlkNum = ecPolicy.getNumDataUnits();
int parityBlkNum = ecPolicy.getNumParityUnits();
@ -575,8 +589,7 @@ public class StripedBlockUtil {
}
private static void calcualteChunkPositionsInBuf(int cellSize,
AlignedStripe[] stripes, StripingCell[] cells, byte[] buf,
int offsetInBuf) {
AlignedStripe[] stripes, StripingCell[] cells, ByteBuffer buf) {
/**
* | <--------------- AlignedStripe --------------->|
*
@ -598,6 +611,7 @@ public class StripedBlockUtil {
for (StripingCell cell : cells) {
long cellStart = cell.idxInInternalBlk * cellSize + cell.offset;
long cellEnd = cellStart + cell.size - 1;
StripingChunk chunk;
for (AlignedStripe s : stripes) {
long stripeEnd = s.getOffsetInBlock() + s.getSpanInBlock() - 1;
long overlapStart = Math.max(cellStart, s.getOffsetInBlock());
@ -606,11 +620,13 @@ public class StripedBlockUtil {
if (overLapLen <= 0) {
continue;
}
if (s.chunks[cell.idxInStripe] == null) {
s.chunks[cell.idxInStripe] = new StripingChunk(buf);
chunk = s.chunks[cell.idxInStripe];
if (chunk == null) {
chunk = new StripingChunk();
s.chunks[cell.idxInStripe] = chunk;
}
s.chunks[cell.idxInStripe].addByteArraySlice(
(int)(offsetInBuf + done + overlapStart - cellStart), overLapLen);
chunk.getChunkBuffer().addSlice(buf,
(int) (done + overlapStart - cellStart), overLapLen);
}
done += cell.size;
}
@ -833,88 +849,89 @@ public class StripedBlockUtil {
*/
public int state = REQUESTED;
public final ChunkByteArray byteArray;
public final ByteBuffer byteBuffer;
private final ChunkByteBuffer chunkBuffer;
private final ByteBuffer byteBuffer;
public StripingChunk(byte[] buf) {
this.byteArray = new ChunkByteArray(buf);
public StripingChunk() {
this.chunkBuffer = new ChunkByteBuffer();
byteBuffer = null;
}
public StripingChunk(ByteBuffer buf) {
this.byteArray = null;
this.chunkBuffer = null;
this.byteBuffer = buf;
}
public StripingChunk(int state) {
this.byteArray = null;
this.chunkBuffer = null;
this.byteBuffer = null;
this.state = state;
}
public void addByteArraySlice(int offset, int length) {
assert byteArray != null;
byteArray.offsetsInBuf.add(offset);
byteArray.lengthsInBuf.add(length);
public boolean useByteBuffer(){
return byteBuffer != null;
}
void copyTo(byte[] target) {
assert byteArray != null;
byteArray.copyTo(target);
public boolean useChunkBuffer() {
return chunkBuffer != null;
}
void copyFrom(byte[] src) {
assert byteArray != null;
byteArray.copyFrom(src);
public ByteBuffer getByteBuffer() {
assert byteBuffer != null;
return byteBuffer;
}
public ChunkByteBuffer getChunkBuffer() {
assert chunkBuffer != null;
return chunkBuffer;
}
}
public static class ChunkByteArray {
private final byte[] buf;
private final List<Integer> offsetsInBuf;
private final List<Integer> lengthsInBuf;
/**
* A utility to manage ByteBuffer slices for a reader.
*/
public static class ChunkByteBuffer {
private final List<ByteBuffer> slices;
ChunkByteArray(byte[] buf) {
this.buf = buf;
this.offsetsInBuf = new ArrayList<>();
this.lengthsInBuf = new ArrayList<>();
ChunkByteBuffer() {
this.slices = new ArrayList<>();
}
public int[] getOffsets() {
int[] offsets = new int[offsetsInBuf.size()];
for (int i = 0; i < offsets.length; i++) {
offsets[i] = offsetsInBuf.get(i);
public void addSlice(ByteBuffer buffer, int offset, int len) {
ByteBuffer tmp = buffer.duplicate();
tmp.position(buffer.position() + offset);
tmp.limit(buffer.position() + offset + len);
slices.add(tmp.slice());
}
public ByteBuffer getSlice(int i) {
return slices.get(i);
}
public List<ByteBuffer> getSlices() {
return slices;
}
/**
* Note: target will be ready-to-read state after the call.
*/
void copyTo(ByteBuffer target) {
for (ByteBuffer slice : slices) {
slice.flip();
target.put(slice);
}
return offsets;
target.flip();
}
public int[] getLengths() {
int[] lens = new int[this.lengthsInBuf.size()];
for (int i = 0; i < lens.length; i++) {
lens[i] = this.lengthsInBuf.get(i);
}
return lens;
}
public byte[] buf() {
return buf;
}
void copyTo(byte[] target) {
int posInBuf = 0;
for (int i = 0; i < offsetsInBuf.size(); i++) {
System.arraycopy(buf, offsetsInBuf.get(i),
target, posInBuf, lengthsInBuf.get(i));
posInBuf += lengthsInBuf.get(i);
}
}
void copyFrom(byte[] src) {
int srcPos = 0;
for (int j = 0; j < offsetsInBuf.size(); j++) {
System.arraycopy(src, srcPos, buf, offsetsInBuf.get(j),
lengthsInBuf.get(j));
srcPos += lengthsInBuf.get(j);
void copyFrom(ByteBuffer src) {
ByteBuffer tmp;
int len;
for (ByteBuffer slice : slices) {
len = slice.remaining();
tmp = src.duplicate();
tmp.limit(tmp.position() + len);
slice.put(tmp);
src.position(src.position() + len);
}
}
}

View File

@ -183,6 +183,7 @@ public class URLConnectionFactory {
return openConnection(url, false);
} catch (AuthenticationException e) {
// Unreachable
LOG.error("Open connection {} failed", url, e);
return null;
}
}

View File

@ -114,7 +114,7 @@ int hdfsFileGetReadStatistics(hdfsFile file,
jthr = invokeMethod(env, &jVal, INSTANCE, file->file,
"org/apache/hadoop/hdfs/client/HdfsDataInputStream",
"getReadStatistics",
"()Lorg/apache/hadoop/hdfs/DFSInputStream$ReadStatistics;");
"()Lorg/apache/hadoop/hdfs/ReadStatistics;");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsFileGetReadStatistics: getReadStatistics failed");
@ -127,7 +127,7 @@ int hdfsFileGetReadStatistics(hdfsFile file,
goto done;
}
jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
"org/apache/hadoop/hdfs/DFSInputStream$ReadStatistics",
"org/apache/hadoop/hdfs/ReadStatistics",
"getTotalBytesRead", "()J");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@ -137,7 +137,7 @@ int hdfsFileGetReadStatistics(hdfsFile file,
s->totalBytesRead = jVal.j;
jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
"org/apache/hadoop/hdfs/DFSInputStream$ReadStatistics",
"org/apache/hadoop/hdfs/ReadStatistics",
"getTotalLocalBytesRead", "()J");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@ -147,7 +147,7 @@ int hdfsFileGetReadStatistics(hdfsFile file,
s->totalLocalBytesRead = jVal.j;
jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
"org/apache/hadoop/hdfs/DFSInputStream$ReadStatistics",
"org/apache/hadoop/hdfs/ReadStatistics",
"getTotalShortCircuitBytesRead", "()J");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@ -156,7 +156,7 @@ int hdfsFileGetReadStatistics(hdfsFile file,
}
s->totalShortCircuitBytesRead = jVal.j;
jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
"org/apache/hadoop/hdfs/DFSInputStream$ReadStatistics",
"org/apache/hadoop/hdfs/ReadStatistics",
"getTotalZeroCopyBytesRead", "()J");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,

View File

@ -129,7 +129,7 @@ function hdfscmd_case
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
;;
diskbalancer)
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DiskBalancer
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DiskBalancerCLI
hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
;;

View File

@ -419,6 +419,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY =
"dfs.namenode.read-lock-reporting-threshold-ms";
public static final long DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT = 5000L;
// Threshold for how long the lock warnings must be suppressed
public static final String DFS_LOCK_SUPPRESS_WARNING_INTERVAL_KEY =
"dfs.lock.suppress.warning.interval";
public static final long DFS_LOCK_SUPPRESS_WARNING_INTERVAL_DEFAULT =
10000; //ms
public static final String DFS_UPGRADE_DOMAIN_FACTOR = "dfs.namenode.upgrade.domain.factor";
public static final int DFS_UPGRADE_DOMAIN_FACTOR_DEFAULT = DFS_REPLICATION_DEFAULT;

View File

@ -0,0 +1,185 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.commons.logging.Log;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Timer;
import com.google.common.annotations.VisibleForTesting;
/**
* This is a debugging class that can be used by callers to track
* whether a specifc lock is being held for too long and periodically
* log a warning and stack trace, if so.
*
* The logged warnings are throttled so that logs are not spammed.
*
* A new instance of InstrumentedLock can be created for each object
* that needs to be instrumented.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class InstrumentedLock implements Lock {
private final Lock lock;
private final Log logger;
private final String name;
private final Timer clock;
/** Minimum gap between two lock warnings. */
private final long minLoggingGap;
/** Threshold for detecting long lock held time. */
private final long lockWarningThreshold;
// Tracking counters for lock statistics.
private volatile long lockAcquireTimestamp;
private final AtomicLong lastLogTimestamp;
private final AtomicLong warningsSuppressed = new AtomicLong(0);
/**
* Create a instrumented lock instance which logs a warning message
* when lock held time is above given threshold.
*
* @param name the identifier of the lock object
* @param logger this class does not have its own logger, will log to the
* given logger instead
* @param minLoggingGapMs the minimum time gap between two log messages,
* this is to avoid spamming to many logs
* @param lockWarningThresholdMs the time threshold to view lock held
* time as being "too long"
*/
public InstrumentedLock(String name, Log logger, long minLoggingGapMs,
long lockWarningThresholdMs) {
this(name, logger, new ReentrantLock(),
minLoggingGapMs, lockWarningThresholdMs);
}
public InstrumentedLock(String name, Log logger, Lock lock,
long minLoggingGapMs, long lockWarningThresholdMs) {
this(name, logger, lock,
minLoggingGapMs, lockWarningThresholdMs, new Timer());
}
@VisibleForTesting
InstrumentedLock(String name, Log logger, Lock lock,
long minLoggingGapMs, long lockWarningThresholdMs, Timer clock) {
this.name = name;
this.lock = lock;
this.clock = clock;
this.logger = logger;
minLoggingGap = minLoggingGapMs;
lockWarningThreshold = lockWarningThresholdMs;
lastLogTimestamp = new AtomicLong(
clock.monotonicNow() - Math.max(minLoggingGap, lockWarningThreshold));
}
@Override
public void lock() {
lock.lock();
lockAcquireTimestamp = clock.monotonicNow();
}
@Override
public void lockInterruptibly() throws InterruptedException {
lock.lockInterruptibly();
lockAcquireTimestamp = clock.monotonicNow();
}
@Override
public boolean tryLock() {
if (lock.tryLock()) {
lockAcquireTimestamp = clock.monotonicNow();
return true;
}
return false;
}
@Override
public boolean tryLock(long time, TimeUnit unit) throws InterruptedException {
if (lock.tryLock(time, unit)) {
lockAcquireTimestamp = clock.monotonicNow();
return true;
}
return false;
}
@Override
public void unlock() {
long localLockReleaseTime = clock.monotonicNow();
long localLockAcquireTime = lockAcquireTimestamp;
lock.unlock();
check(localLockAcquireTime, localLockReleaseTime);
}
@Override
public Condition newCondition() {
return lock.newCondition();
}
@VisibleForTesting
void logWarning(long lockHeldTime, long suppressed) {
logger.warn(String.format("Lock held time above threshold: " +
"lock identifier: %s " +
"lockHeldTimeMs=%d ms. Suppressed %d lock warnings. " +
"The stack trace is: %s" ,
name, lockHeldTime, suppressed,
StringUtils.getStackTrace(Thread.currentThread())));
}
/**
* Log a warning if the lock was held for too long.
*
* Should be invoked by the caller immediately AFTER releasing the lock.
*
* @param acquireTime - timestamp just after acquiring the lock.
* @param releaseTime - timestamp just before releasing the lock.
*/
private void check(long acquireTime, long releaseTime) {
if (!logger.isWarnEnabled()) {
return;
}
final long lockHeldTime = releaseTime - acquireTime;
if (lockWarningThreshold - lockHeldTime < 0) {
long now;
long localLastLogTs;
do {
now = clock.monotonicNow();
localLastLogTs = lastLogTimestamp.get();
long deltaSinceLastLog = now - localLastLogTs;
// check should print log or not
if (deltaSinceLastLog - minLoggingGap < 0) {
warningsSuppressed.incrementAndGet();
return;
}
} while (!lastLogTimestamp.compareAndSet(localLastLogTs, now));
long suppressed = warningsSuppressed.getAndSet(0);
logWarning(lockHeldTime, suppressed);
}
}
}

View File

@ -40,6 +40,7 @@ import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
import javax.management.NotCompliantMBeanException;
import javax.management.ObjectName;
@ -60,6 +61,7 @@ import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.ExtendedBlockId;
import org.apache.hadoop.hdfs.InstrumentedLock;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
@ -278,7 +280,13 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
this.dataStorage = storage;
this.conf = conf;
this.smallBufferSize = DFSUtilClient.getSmallBufferSize(conf);
this.datasetLock = new AutoCloseableLock();
this.datasetLock = new AutoCloseableLock(
new InstrumentedLock(getClass().getName(), LOG,
conf.getTimeDuration(
DFSConfigKeys.DFS_LOCK_SUPPRESS_WARNING_INTERVAL_KEY,
DFSConfigKeys.DFS_LOCK_SUPPRESS_WARNING_INTERVAL_DEFAULT,
TimeUnit.MILLISECONDS),
300));
// The number of volumes required for operation is the total number
// of volumes minus the number of failed volumes we can tolerate.
volFailuresTolerated = datanode.getDnConf().getVolFailuresTolerated();

View File

@ -29,7 +29,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
import org.apache.hadoop.hdfs.tools.DiskBalancer;
import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
import java.io.IOException;
@ -44,9 +44,10 @@ public class CancelCommand extends Command {
*/
public CancelCommand(Configuration conf) {
super(conf);
addValidCommandParameters(DiskBalancer.CANCEL, "Cancels a running plan.");
addValidCommandParameters(DiskBalancer.NODE, "Node to run the command " +
"against in node:port format.");
addValidCommandParameters(DiskBalancerCLI.CANCEL,
"Cancels a running plan.");
addValidCommandParameters(DiskBalancerCLI.NODE,
"Node to run the command against in node:port format.");
}
/**
@ -57,20 +58,20 @@ public class CancelCommand extends Command {
@Override
public void execute(CommandLine cmd) throws Exception {
LOG.info("Executing \"Cancel plan\" command.");
Preconditions.checkState(cmd.hasOption(DiskBalancer.CANCEL));
verifyCommandOptions(DiskBalancer.CANCEL, cmd);
Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.CANCEL));
verifyCommandOptions(DiskBalancerCLI.CANCEL, cmd);
// We can cancel a plan using datanode address and plan ID
// that you can read from a datanode using queryStatus
if(cmd.hasOption(DiskBalancer.NODE)) {
String nodeAddress = cmd.getOptionValue(DiskBalancer.NODE);
String planHash = cmd.getOptionValue(DiskBalancer.CANCEL);
if(cmd.hasOption(DiskBalancerCLI.NODE)) {
String nodeAddress = cmd.getOptionValue(DiskBalancerCLI.NODE);
String planHash = cmd.getOptionValue(DiskBalancerCLI.CANCEL);
cancelPlanUsingHash(nodeAddress, planHash);
} else {
// Or you can cancel a plan using the plan file. If the user
// points us to the plan file, we can compute the hash as well as read
// the address of the datanode from the plan file.
String planFile = cmd.getOptionValue(DiskBalancer.CANCEL);
String planFile = cmd.getOptionValue(DiskBalancerCLI.CANCEL);
Preconditions.checkArgument(planFile != null && !planFile.isEmpty(),
"Invalid plan file specified.");
String planData = null;
@ -142,6 +143,6 @@ public class CancelCommand extends Command {
HelpFormatter helpFormatter = new HelpFormatter();
helpFormatter.printHelp("hdfs diskbalancer -cancel <planFile> | -cancel " +
"<planID> -node <hostname>",
header, DiskBalancer.getCancelOptions(), footer);
header, DiskBalancerCLI.getCancelOptions(), footer);
}
}

View File

@ -43,7 +43,7 @@ import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet;
import org.apache.hadoop.hdfs.tools.DiskBalancer;
import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.codehaus.jackson.map.ObjectMapper;
@ -418,7 +418,7 @@ public abstract class Command extends Configured {
* @return default top number of nodes.
*/
protected int getDefaultTop() {
return DiskBalancer.DEFAULT_TOP;
return DiskBalancerCLI.DEFAULT_TOP;
}
/**
@ -437,7 +437,7 @@ public abstract class Command extends Configured {
protected int parseTopNodes(final CommandLine cmd, final StrBuilder result) {
String outputLine = "";
int nodes = 0;
final String topVal = cmd.getOptionValue(DiskBalancer.TOP);
final String topVal = cmd.getOptionValue(DiskBalancerCLI.TOP);
if (StringUtils.isBlank(topVal)) {
outputLine = String.format(
"No top limit specified, using default top value %d.",

View File

@ -29,7 +29,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
import org.apache.hadoop.hdfs.tools.DiskBalancer;
import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
import java.io.IOException;
@ -46,7 +46,8 @@ public class ExecuteCommand extends Command {
*/
public ExecuteCommand(Configuration conf) {
super(conf);
addValidCommandParameters(DiskBalancer.EXECUTE, "Executes a given plan.");
addValidCommandParameters(DiskBalancerCLI.EXECUTE,
"Executes a given plan.");
}
/**
@ -57,10 +58,10 @@ public class ExecuteCommand extends Command {
@Override
public void execute(CommandLine cmd) throws Exception {
LOG.info("Executing \"execute plan\" command");
Preconditions.checkState(cmd.hasOption(DiskBalancer.EXECUTE));
verifyCommandOptions(DiskBalancer.EXECUTE, cmd);
Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.EXECUTE));
verifyCommandOptions(DiskBalancerCLI.EXECUTE, cmd);
String planFile = cmd.getOptionValue(DiskBalancer.EXECUTE);
String planFile = cmd.getOptionValue(DiskBalancerCLI.EXECUTE);
Preconditions.checkArgument(planFile != null && !planFile.isEmpty(),
"Invalid plan file specified.");
@ -88,7 +89,7 @@ public class ExecuteCommand extends Command {
String planHash = DigestUtils.shaHex(planData);
try {
// TODO : Support skipping date check.
dataNode.submitDiskBalancerPlan(planHash, DiskBalancer.PLAN_VERSION,
dataNode.submitDiskBalancerPlan(planHash, DiskBalancerCLI.PLAN_VERSION,
planFile, planData, false);
} catch (DiskBalancerException ex) {
LOG.error("Submitting plan on {} failed. Result: {}, Message: {}",
@ -111,6 +112,6 @@ public class ExecuteCommand extends Command {
HelpFormatter helpFormatter = new HelpFormatter();
helpFormatter.printHelp("hdfs diskbalancer -execute <planfile>",
header, DiskBalancer.getExecuteOptions(), footer);
header, DiskBalancerCLI.getExecuteOptions(), footer);
}
}

View File

@ -23,7 +23,7 @@ import com.google.common.base.Preconditions;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.HelpFormatter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.tools.DiskBalancer;
import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
/**
* Help Command prints out detailed help about each command.
@ -37,7 +37,7 @@ public class HelpCommand extends Command {
*/
public HelpCommand(Configuration conf) {
super(conf);
addValidCommandParameters(DiskBalancer.HELP, "Help Command");
addValidCommandParameters(DiskBalancerCLI.HELP, "Help Command");
}
/**
@ -53,9 +53,9 @@ public class HelpCommand extends Command {
return;
}
Preconditions.checkState(cmd.hasOption(DiskBalancer.HELP));
verifyCommandOptions(DiskBalancer.HELP, cmd);
String helpCommand = cmd.getOptionValue(DiskBalancer.HELP);
Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.HELP));
verifyCommandOptions(DiskBalancerCLI.HELP, cmd);
String helpCommand = cmd.getOptionValue(DiskBalancerCLI.HELP);
if (helpCommand == null || helpCommand.isEmpty()) {
this.printHelp();
return;
@ -65,19 +65,19 @@ public class HelpCommand extends Command {
helpCommand = helpCommand.toLowerCase();
Command command = null;
switch (helpCommand) {
case DiskBalancer.PLAN:
case DiskBalancerCLI.PLAN:
command = new PlanCommand(getConf());
break;
case DiskBalancer.EXECUTE:
case DiskBalancerCLI.EXECUTE:
command = new ExecuteCommand(getConf());
break;
case DiskBalancer.QUERY:
case DiskBalancerCLI.QUERY:
command = new QueryCommand(getConf());
break;
case DiskBalancer.CANCEL:
case DiskBalancerCLI.CANCEL:
command = new CancelCommand(getConf());
break;
case DiskBalancer.REPORT:
case DiskBalancerCLI.REPORT:
command = new ReportCommand(getConf(), null);
break;
default:
@ -102,7 +102,7 @@ public class HelpCommand extends Command {
HelpFormatter helpFormatter = new HelpFormatter();
helpFormatter.printHelp("hdfs diskbalancer [command] [options]",
header, DiskBalancer.getHelpOptions(), "");
header, DiskBalancerCLI.getHelpOptions(), "");
}

View File

@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.server.diskbalancer.datamodel
.DiskBalancerDataNode;
import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
import org.apache.hadoop.hdfs.server.diskbalancer.planner.Step;
import org.apache.hadoop.hdfs.tools.DiskBalancer;
import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
import java.nio.charset.StandardCharsets;
import java.util.List;
@ -53,18 +53,18 @@ public class PlanCommand extends Command {
this.thresholdPercentage = 1;
this.bandwidth = 0;
this.maxError = 0;
addValidCommandParameters(DiskBalancer.OUTFILE, "Output directory in " +
addValidCommandParameters(DiskBalancerCLI.OUTFILE, "Output directory in " +
"HDFS. The generated plan will be written to a file in this " +
"directory.");
addValidCommandParameters(DiskBalancer.BANDWIDTH, "Maximum Bandwidth to " +
"be used while copying.");
addValidCommandParameters(DiskBalancer.THRESHOLD, "Percentage skew that " +
"we tolerate before diskbalancer starts working.");
addValidCommandParameters(DiskBalancer.MAXERROR, "Max errors to tolerate " +
"between 2 disks");
addValidCommandParameters(DiskBalancer.VERBOSE, "Run plan command in " +
addValidCommandParameters(DiskBalancerCLI.BANDWIDTH,
"Maximum Bandwidth to be used while copying.");
addValidCommandParameters(DiskBalancerCLI.THRESHOLD,
"Percentage skew that we tolerate before diskbalancer starts working.");
addValidCommandParameters(DiskBalancerCLI.MAXERROR,
"Max errors to tolerate between 2 disks");
addValidCommandParameters(DiskBalancerCLI.VERBOSE, "Run plan command in " +
"verbose mode.");
addValidCommandParameters(DiskBalancer.PLAN, "Plan Command");
addValidCommandParameters(DiskBalancerCLI.PLAN, "Plan Command");
}
/**
@ -77,36 +77,37 @@ public class PlanCommand extends Command {
@Override
public void execute(CommandLine cmd) throws Exception {
LOG.debug("Processing Plan Command.");
Preconditions.checkState(cmd.hasOption(DiskBalancer.PLAN));
verifyCommandOptions(DiskBalancer.PLAN, cmd);
Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.PLAN));
verifyCommandOptions(DiskBalancerCLI.PLAN, cmd);
if (cmd.getOptionValue(DiskBalancer.PLAN) == null) {
if (cmd.getOptionValue(DiskBalancerCLI.PLAN) == null) {
throw new IllegalArgumentException("A node name is required to create a" +
" plan.");
}
if (cmd.hasOption(DiskBalancer.BANDWIDTH)) {
this.bandwidth = Integer.parseInt(cmd.getOptionValue(DiskBalancer
if (cmd.hasOption(DiskBalancerCLI.BANDWIDTH)) {
this.bandwidth = Integer.parseInt(cmd.getOptionValue(DiskBalancerCLI
.BANDWIDTH));
}
if (cmd.hasOption(DiskBalancer.MAXERROR)) {
this.maxError = Integer.parseInt(cmd.getOptionValue(DiskBalancer
if (cmd.hasOption(DiskBalancerCLI.MAXERROR)) {
this.maxError = Integer.parseInt(cmd.getOptionValue(DiskBalancerCLI
.MAXERROR));
}
readClusterInfo(cmd);
String output = null;
if (cmd.hasOption(DiskBalancer.OUTFILE)) {
output = cmd.getOptionValue(DiskBalancer.OUTFILE);
if (cmd.hasOption(DiskBalancerCLI.OUTFILE)) {
output = cmd.getOptionValue(DiskBalancerCLI.OUTFILE);
}
setOutputPath(output);
// -plan nodename is the command line argument.
DiskBalancerDataNode node = getNode(cmd.getOptionValue(DiskBalancer.PLAN));
DiskBalancerDataNode node =
getNode(cmd.getOptionValue(DiskBalancerCLI.PLAN));
if (node == null) {
throw new IllegalArgumentException("Unable to find the specified node. " +
cmd.getOptionValue(DiskBalancer.PLAN));
cmd.getOptionValue(DiskBalancerCLI.PLAN));
}
this.thresholdPercentage = getThresholdPercentage(cmd);
@ -124,8 +125,8 @@ public class PlanCommand extends Command {
try (FSDataOutputStream beforeStream = create(String.format(
DiskBalancer.BEFORE_TEMPLATE,
cmd.getOptionValue(DiskBalancer.PLAN)))) {
DiskBalancerCLI.BEFORE_TEMPLATE,
cmd.getOptionValue(DiskBalancerCLI.PLAN)))) {
beforeStream.write(getCluster().toJson()
.getBytes(StandardCharsets.UTF_8));
}
@ -133,17 +134,17 @@ public class PlanCommand extends Command {
if (plan != null && plan.getVolumeSetPlans().size() > 0) {
LOG.info("Writing plan to : {}", getOutputPath());
try (FSDataOutputStream planStream = create(String.format(
DiskBalancer.PLAN_TEMPLATE,
cmd.getOptionValue(DiskBalancer.PLAN)))) {
DiskBalancerCLI.PLAN_TEMPLATE,
cmd.getOptionValue(DiskBalancerCLI.PLAN)))) {
planStream.write(plan.toJson().getBytes(StandardCharsets.UTF_8));
}
} else {
LOG.info("No plan generated. DiskBalancing not needed for node: {} " +
"threshold used: {}", cmd.getOptionValue(DiskBalancer.PLAN),
"threshold used: {}", cmd.getOptionValue(DiskBalancerCLI.PLAN),
this.thresholdPercentage);
}
if (cmd.hasOption(DiskBalancer.VERBOSE) && plans.size() > 0) {
if (cmd.hasOption(DiskBalancerCLI.VERBOSE) && plans.size() > 0) {
printToScreen(plans);
}
}
@ -162,8 +163,8 @@ public class PlanCommand extends Command {
" will balance the data.";
HelpFormatter helpFormatter = new HelpFormatter();
helpFormatter.printHelp("hdfs diskbalancer -plan " +
"<hostname> [options]", header, DiskBalancer.getPlanOptions(), footer);
helpFormatter.printHelp("hdfs diskbalancer -plan <hostname> [options]",
header, DiskBalancerCLI.getPlanOptions(), footer);
}
/**
@ -174,8 +175,8 @@ public class PlanCommand extends Command {
*/
private double getThresholdPercentage(CommandLine cmd) {
Double value = 0.0;
if (cmd.hasOption(DiskBalancer.THRESHOLD)) {
value = Double.parseDouble(cmd.getOptionValue(DiskBalancer.THRESHOLD));
if (cmd.hasOption(DiskBalancerCLI.THRESHOLD)) {
value = Double.parseDouble(cmd.getOptionValue(DiskBalancerCLI.THRESHOLD));
}
if ((value <= 0.0) || (value > 100.0)) {

View File

@ -27,7 +27,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus;
import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
import org.apache.hadoop.hdfs.tools.DiskBalancer;
import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
import org.apache.hadoop.net.NetUtils;
/**
@ -42,9 +42,10 @@ public class QueryCommand extends Command {
*/
public QueryCommand(Configuration conf) {
super(conf);
addValidCommandParameters(DiskBalancer.QUERY, "Queries the status of disk" +
" plan running on a given datanode.");
addValidCommandParameters(DiskBalancer.VERBOSE, "Prints verbose results.");
addValidCommandParameters(DiskBalancerCLI.QUERY,
"Queries the status of disk plan running on a given datanode.");
addValidCommandParameters(DiskBalancerCLI.VERBOSE,
"Prints verbose results.");
}
/**
@ -55,9 +56,9 @@ public class QueryCommand extends Command {
@Override
public void execute(CommandLine cmd) throws Exception {
LOG.info("Executing \"query plan\" command.");
Preconditions.checkState(cmd.hasOption(DiskBalancer.QUERY));
verifyCommandOptions(DiskBalancer.QUERY, cmd);
String nodeName = cmd.getOptionValue(DiskBalancer.QUERY);
Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.QUERY));
verifyCommandOptions(DiskBalancerCLI.QUERY, cmd);
String nodeName = cmd.getOptionValue(DiskBalancerCLI.QUERY);
Preconditions.checkNotNull(nodeName);
nodeName = nodeName.trim();
String nodeAddress = nodeName;
@ -79,7 +80,7 @@ public class QueryCommand extends Command {
workStatus.getPlanID(),
workStatus.getResult().toString());
if (cmd.hasOption(DiskBalancer.VERBOSE)) {
if (cmd.hasOption(DiskBalancerCLI.VERBOSE)) {
System.out.printf("%s", workStatus.currentStateString());
}
} catch (DiskBalancerException ex) {
@ -101,6 +102,6 @@ public class QueryCommand extends Command {
HelpFormatter helpFormatter = new HelpFormatter();
helpFormatter.printHelp("hdfs diskbalancer -query <hostname> [options]",
header, DiskBalancer.getQueryOptions(), footer);
header, DiskBalancerCLI.getQueryOptions(), footer);
}
}

View File

@ -30,7 +30,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet;
import org.apache.hadoop.hdfs.tools.DiskBalancer;
import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
@ -52,15 +52,15 @@ public class ReportCommand extends Command {
super(conf);
this.out = out;
addValidCommandParameters(DiskBalancer.REPORT,
addValidCommandParameters(DiskBalancerCLI.REPORT,
"Report volume information of nodes.");
String desc = String.format(
"Top number of nodes to be processed. Default: %d", getDefaultTop());
addValidCommandParameters(DiskBalancer.TOP, desc);
addValidCommandParameters(DiskBalancerCLI.TOP, desc);
desc = String.format("Print out volume information for a DataNode.");
addValidCommandParameters(DiskBalancer.NODE, desc);
addValidCommandParameters(DiskBalancerCLI.NODE, desc);
}
@Override
@ -69,8 +69,8 @@ public class ReportCommand extends Command {
String outputLine = "Processing report command";
recordOutput(result, outputLine);
Preconditions.checkState(cmd.hasOption(DiskBalancer.REPORT));
verifyCommandOptions(DiskBalancer.REPORT, cmd);
Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.REPORT));
verifyCommandOptions(DiskBalancerCLI.REPORT, cmd);
readClusterInfo(cmd);
final String nodeFormat =
@ -81,7 +81,7 @@ public class ReportCommand extends Command {
"[%s: volume-%s] - %.2f used: %d/%d, %.2f free: %d/%d, "
+ "isFailed: %s, isReadOnly: %s, isSkip: %s, isTransient: %s.";
if (cmd.hasOption(DiskBalancer.NODE)) {
if (cmd.hasOption(DiskBalancerCLI.NODE)) {
/*
* Reporting volume information for a specific DataNode
*/
@ -136,7 +136,7 @@ public class ReportCommand extends Command {
* get value that identifies a DataNode from command line, it could be UUID,
* IP address or host name.
*/
final String nodeVal = cmd.getOptionValue(DiskBalancer.NODE);
final String nodeVal = cmd.getOptionValue(DiskBalancerCLI.NODE);
if (StringUtils.isBlank(nodeVal)) {
outputLine = "The value for '-node' is neither specified or empty.";
@ -211,6 +211,6 @@ public class ReportCommand extends Command {
HelpFormatter helpFormatter = new HelpFormatter();
helpFormatter.printHelp("hdfs diskbalancer -fs http://namenode.uri " +
"-report [options]",
header, DiskBalancer.getReportOptions(), footer);
header, DiskBalancerCLI.getReportOptions(), footer);
}
}

View File

@ -168,6 +168,7 @@ import org.apache.hadoop.ipc.RetryCache.CacheEntry;
import org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.ipc.WritableRpcEngine;
import org.apache.hadoop.ipc.RefreshRegistry;
import org.apache.hadoop.ipc.RefreshResponse;
import org.apache.hadoop.net.Node;
@ -316,6 +317,8 @@ public class NameNodeRpcServer implements NamenodeProtocols {
new TraceAdminProtocolServerSideTranslatorPB(this);
BlockingService traceAdminService = TraceAdminService
.newReflectiveBlockingService(traceAdminXlator);
WritableRpcEngine.ensureInitialized();
InetSocketAddress serviceRpcAddr = nn.getServiceRpcServerAddress(conf);
if (serviceRpcAddr != null) {

View File

@ -50,7 +50,7 @@ import java.io.PrintStream;
* At very high level diskbalancer computes a set of moves that will make disk
* utilization equal and then those moves are executed by the datanode.
*/
public class DiskBalancer extends Configured implements Tool {
public class DiskBalancerCLI extends Configured implements Tool {
/**
* Computes a plan for a given set of nodes.
*/
@ -126,7 +126,7 @@ public class DiskBalancer extends Configured implements Tool {
*/
public static final String PLAN_TEMPLATE = "%s.plan.json";
private static final Logger LOG =
LoggerFactory.getLogger(DiskBalancer.class);
LoggerFactory.getLogger(DiskBalancerCLI.class);
private static final Options PLAN_OPTIONS = new Options();
private static final Options EXECUTE_OPTIONS = new Options();
@ -140,7 +140,7 @@ public class DiskBalancer extends Configured implements Tool {
*
* @param conf
*/
public DiskBalancer(Configuration conf) {
public DiskBalancerCLI(Configuration conf) {
super(conf);
}
@ -151,7 +151,7 @@ public class DiskBalancer extends Configured implements Tool {
* @throws Exception
*/
public static void main(String[] argv) throws Exception {
DiskBalancer shell = new DiskBalancer(new HdfsConfiguration());
DiskBalancerCLI shell = new DiskBalancerCLI(new HdfsConfiguration());
int res = 0;
try {
res = ToolRunner.run(shell, argv);
@ -446,27 +446,27 @@ public class DiskBalancer extends Configured implements Tool {
private int dispatch(CommandLine cmd, Options opts, final PrintStream out)
throws Exception {
Command currentCommand = null;
if (cmd.hasOption(DiskBalancer.PLAN)) {
if (cmd.hasOption(DiskBalancerCLI.PLAN)) {
currentCommand = new PlanCommand(getConf());
}
if (cmd.hasOption(DiskBalancer.EXECUTE)) {
if (cmd.hasOption(DiskBalancerCLI.EXECUTE)) {
currentCommand = new ExecuteCommand(getConf());
}
if (cmd.hasOption(DiskBalancer.QUERY)) {
if (cmd.hasOption(DiskBalancerCLI.QUERY)) {
currentCommand = new QueryCommand(getConf());
}
if (cmd.hasOption(DiskBalancer.CANCEL)) {
if (cmd.hasOption(DiskBalancerCLI.CANCEL)) {
currentCommand = new CancelCommand(getConf());
}
if (cmd.hasOption(DiskBalancer.REPORT)) {
if (cmd.hasOption(DiskBalancerCLI.REPORT)) {
currentCommand = new ReportCommand(getConf(), out);
}
if (cmd.hasOption(DiskBalancer.HELP)) {
if (cmd.hasOption(DiskBalancerCLI.HELP)) {
currentCommand = new HelpCommand(getConf());
}

View File

@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSImageUtil;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
import org.apache.hadoop.util.LimitInputStream;
import org.apache.hadoop.util.StringUtils;
import com.google.common.base.Preconditions;
@ -75,11 +76,14 @@ final class FileDistributionCalculator {
private long totalSpace;
private long maxFileSize;
private boolean formatOutput = false;
FileDistributionCalculator(Configuration conf, long maxSize, int steps,
PrintStream out) {
boolean formatOutput, PrintStream out) {
this.conf = conf;
this.maxSize = maxSize == 0 ? MAX_SIZE_DEFAULT : maxSize;
this.steps = steps == 0 ? INTERVAL_DEFAULT : steps;
this.formatOutput = formatOutput;
this.out = out;
long numIntervals = this.maxSize / this.steps;
// avoid OutOfMemoryError when allocating an array
@ -148,10 +152,20 @@ final class FileDistributionCalculator {
private void output() {
// write the distribution into the output file
out.print("Size\tNumFiles\n");
out.print((formatOutput ? "Size Range" : "Size") + "\tNumFiles\n");
for (int i = 0; i < distribution.length; i++) {
if (distribution[i] != 0) {
out.print(((long) i * steps) + "\t" + distribution[i]);
if (formatOutput) {
out.print((i == 0 ? "[" : "(")
+ StringUtils.byteDesc(((long) (i == 0 ? 0 : i - 1) * steps))
+ ", "
+ StringUtils.byteDesc((long)
(i == distribution.length - 1 ? maxFileSize : i * steps))
+ "]\t" + distribution[i]);
} else {
out.print(((long) i * steps) + "\t" + distribution[i]);
}
out.print('\n');
}
}

View File

@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import java.io.IOException;
import java.util.LinkedList;
import org.apache.hadoop.util.StringUtils;
/**
* File size distribution visitor.
*
@ -67,6 +69,7 @@ class FileDistributionVisitor extends TextWriterImageVisitor {
private FileContext current;
private boolean inInode = false;
private boolean formatOutput = false;
/**
* File or directory information.
@ -78,12 +81,12 @@ class FileDistributionVisitor extends TextWriterImageVisitor {
int replication;
}
public FileDistributionVisitor(String filename,
long maxSize,
int step) throws IOException {
public FileDistributionVisitor(String filename, long maxSize, int step,
boolean formatOutput) throws IOException {
super(filename, false);
this.maxSize = (maxSize == 0 ? MAX_SIZE_DEFAULT : maxSize);
this.step = (step == 0 ? INTERVAL_DEFAULT : step);
this.formatOutput = formatOutput;
long numIntervals = this.maxSize / this.step;
if(numIntervals >= Integer.MAX_VALUE)
throw new IOException("Too many distribution intervals " + numIntervals);
@ -113,9 +116,22 @@ class FileDistributionVisitor extends TextWriterImageVisitor {
private void output() throws IOException {
// write the distribution into the output file
write("Size\tNumFiles\n");
for(int i = 0; i < distribution.length; i++)
write(((long)i * step) + "\t" + distribution[i] + "\n");
write((formatOutput ? "Size Range" : "Size") + "\tNumFiles\n");
for (int i = 0; i < distribution.length; i++) {
if (distribution[i] > 0) {
if (formatOutput) {
write((i == 0 ? "[" : "(")
+ StringUtils.byteDesc(((long) (i == 0 ? 0 : i - 1) * step))
+ ", "
+ StringUtils.byteDesc((long)
(i == distribution.length - 1 ? maxFileSize : i * step))
+ "]\t"
+ distribution[i] + "\n");
} else {
write(((long) i * step) + "\t" + distribution[i] + "\n");
}
}
}
System.out.println("totalFiles = " + totalFiles);
System.out.println("totalDirectories = " + totalDirectories);
System.out.println("totalBlocks = " + totalBlocks);

View File

@ -46,61 +46,63 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.PositionTrackingIn
public class OfflineImageViewer {
public static final Log LOG = LogFactory.getLog(OfflineImageViewer.class);
private final static String usage =
"Usage: bin/hdfs oiv_legacy [OPTIONS] -i INPUTFILE -o OUTPUTFILE\n" +
"Offline Image Viewer\n" +
"View a Hadoop fsimage INPUTFILE using the specified PROCESSOR,\n" +
"saving the results in OUTPUTFILE.\n" +
"\n" +
"The oiv utility will attempt to parse correctly formed image files\n" +
"and will abort fail with mal-formed image files.\n" +
"\n" +
"The tool works offline and does not require a running cluster in\n" +
"order to process an image file.\n" +
"\n" +
"The following image processors are available:\n" +
" * Ls: The default image processor generates an lsr-style listing\n" +
" of the files in the namespace, with the same fields in the same\n" +
" order. Note that in order to correctly determine file sizes,\n" +
" this formatter cannot skip blocks and will override the\n" +
" -skipBlocks option.\n" +
" * Indented: This processor enumerates over all of the elements in\n" +
" the fsimage file, using levels of indentation to delineate\n" +
" sections within the file.\n" +
" * Delimited: Generate a text file with all of the elements common\n" +
" to both inodes and inodes-under-construction, separated by a\n" +
" delimiter. The default delimiter is \u0001, though this may be\n" +
" changed via the -delimiter argument. This processor also overrides\n" +
" the -skipBlocks option for the same reason as the Ls processor\n" +
" * XML: This processor creates an XML document with all elements of\n" +
" the fsimage enumerated, suitable for further analysis by XML\n" +
" tools.\n" +
" * FileDistribution: This processor analyzes the file size\n" +
" distribution in the image.\n" +
" -maxSize specifies the range [0, maxSize] of file sizes to be\n" +
" analyzed (128GB by default).\n" +
" -step defines the granularity of the distribution. (2MB by default)\n" +
" * NameDistribution: This processor analyzes the file names\n" +
" in the image and prints total number of file names and how frequently\n" +
" file names are reused.\n" +
"\n" +
"Required command line arguments:\n" +
"-i,--inputFile <arg> FSImage file to process.\n" +
"-o,--outputFile <arg> Name of output file. If the specified\n" +
" file exists, it will be overwritten.\n" +
"\n" +
"Optional command line arguments:\n" +
"-p,--processor <arg> Select which type of processor to apply\n" +
" against image file." +
" (Ls|XML|Delimited|Indented|FileDistribution).\n" +
"-h,--help Display usage information and exit\n" +
"-printToScreen For processors that write to a file, also\n" +
" output to screen. On large image files this\n" +
" will dramatically increase processing time.\n" +
"-skipBlocks Skip inodes' blocks information. May\n" +
" significantly decrease output.\n" +
" (default = false).\n" +
"-delimiter <arg> Delimiting string to use with Delimited processor\n";
private final static String usage =
"Usage: bin/hdfs oiv_legacy [OPTIONS] -i INPUTFILE -o OUTPUTFILE\n"
+ "Offline Image Viewer\n"
+ "View a Hadoop fsimage INPUTFILE using the specified PROCESSOR,\n"
+ "saving the results in OUTPUTFILE.\n"
+ "\n"
+ "The oiv utility will attempt to parse correctly formed image files\n"
+ "and will abort fail with mal-formed image files.\n"
+ "\n"
+ "The tool works offline and does not require a running cluster in\n"
+ "order to process an image file.\n"
+ "\n"
+ "The following image processors are available:\n"
+ " * Ls: The default image processor generates an lsr-style listing\n"
+ " of the files in the namespace, with the same fields in the same\n"
+ " order. Note that in order to correctly determine file sizes,\n"
+ " this formatter cannot skip blocks and will override the\n"
+ " -skipBlocks option.\n"
+ " * Indented: This processor enumerates over all of the elements in\n"
+ " the fsimage file, using levels of indentation to delineate\n"
+ " sections within the file.\n"
+ " * Delimited: Generate a text file with all of the elements common\n"
+ " to both inodes and inodes-under-construction, separated by a\n"
+ " delimiter. The default delimiter is \u0001, though this may be\n"
+ " changed via the -delimiter argument. This processor also overrides\n"
+ " the -skipBlocks option for the same reason as the Ls processor\n"
+ " * XML: This processor creates an XML document with all elements of\n"
+ " the fsimage enumerated, suitable for further analysis by XML\n"
+ " tools.\n"
+ " * FileDistribution: This processor analyzes the file size\n"
+ " distribution in the image.\n"
+ " -maxSize specifies the range [0, maxSize] of file sizes to be\n"
+ " analyzed (128GB by default).\n"
+ " -step defines the granularity of the distribution. (2MB by default)\n"
+ " -format formats the output result in a human-readable fashion\n"
+ " rather than a number of bytes. (false by default)\n"
+ " * NameDistribution: This processor analyzes the file names\n"
+ " in the image and prints total number of file names and how frequently\n"
+ " file names are reused.\n"
+ "\n"
+ "Required command line arguments:\n"
+ "-i,--inputFile <arg> FSImage file to process.\n"
+ "-o,--outputFile <arg> Name of output file. If the specified\n"
+ " file exists, it will be overwritten.\n"
+ "\n"
+ "Optional command line arguments:\n"
+ "-p,--processor <arg> Select which type of processor to apply\n"
+ " against image file."
+ " (Ls|XML|Delimited|Indented|FileDistribution).\n"
+ "-h,--help Display usage information and exit\n"
+ "-printToScreen For processors that write to a file, also\n"
+ " output to screen. On large image files this\n"
+ " will dramatically increase processing time.\n"
+ "-skipBlocks Skip inodes' blocks information. May\n"
+ " significantly decrease output.\n"
+ " (default = false).\n"
+ "-delimiter <arg> Delimiting string to use with Delimited processor\n";
private final boolean skipBlocks;
private final String inputFile;
@ -188,6 +190,7 @@ public class OfflineImageViewer {
options.addOption("h", "help", false, "");
options.addOption("maxSize", true, "");
options.addOption("step", true, "");
options.addOption("format", false, "");
options.addOption("skipBlocks", false, "");
options.addOption("printToScreen", false, "");
options.addOption("delimiter", true, "");
@ -253,7 +256,8 @@ public class OfflineImageViewer {
} else if (processor.equals("FileDistribution")) {
long maxSize = Long.parseLong(cmd.getOptionValue("maxSize", "0"));
int step = Integer.parseInt(cmd.getOptionValue("step", "0"));
v = new FileDistributionVisitor(outputFile, maxSize, step);
boolean formatOutput = cmd.hasOption("format");
v = new FileDistributionVisitor(outputFile, maxSize, step, formatOutput);
} else if (processor.equals("NameDistribution")) {
v = new NameDistributionVisitor(outputFile, printToScreen);
} else {

View File

@ -67,6 +67,8 @@ public class OfflineImageViewerPB {
+ " -maxSize specifies the range [0, maxSize] of file sizes to be\n"
+ " analyzed (128GB by default).\n"
+ " -step defines the granularity of the distribution. (2MB by default)\n"
+ " -format formats the output result in a human-readable fashion\n"
+ " rather than a number of bytes. (false by default)\n"
+ " * Web: Run a viewer to expose read-only WebHDFS API.\n"
+ " -addr specifies the address to listen. (localhost:5978 by default)\n"
+ " * Delimited (experimental): Generate a text file with all of the elements common\n"
@ -111,6 +113,7 @@ public class OfflineImageViewerPB {
options.addOption("h", "help", false, "");
options.addOption("maxSize", true, "");
options.addOption("step", true, "");
options.addOption("format", false, "");
options.addOption("addr", true, "");
options.addOption("delimiter", true, "");
options.addOption("t", "temp", true, "");
@ -172,43 +175,44 @@ public class OfflineImageViewerPB {
try (PrintStream out = outputFile.equals("-") ?
System.out : new PrintStream(outputFile, "UTF-8")) {
switch (processor) {
case "FileDistribution":
long maxSize = Long.parseLong(cmd.getOptionValue("maxSize", "0"));
int step = Integer.parseInt(cmd.getOptionValue("step", "0"));
new FileDistributionCalculator(conf, maxSize, step, out).visit(
new RandomAccessFile(inputFile, "r"));
break;
case "XML":
new PBImageXmlWriter(conf, out).visit(
new RandomAccessFile(inputFile, "r"));
break;
case "ReverseXML":
try {
OfflineImageReconstructor.run(inputFile, outputFile);
} catch (Exception e) {
System.err.println("OfflineImageReconstructor failed: " +
e.getMessage());
e.printStackTrace(System.err);
System.exit(1);
}
break;
case "Web":
String addr = cmd.getOptionValue("addr", "localhost:5978");
try (WebImageViewer viewer = new WebImageViewer(
NetUtils.createSocketAddr(addr))) {
viewer.start(inputFile);
}
break;
case "Delimited":
try (PBImageDelimitedTextWriter writer =
new PBImageDelimitedTextWriter(out, delimiter, tempPath)) {
writer.visit(new RandomAccessFile(inputFile, "r"));
}
break;
default:
System.err.println("Invalid processor specified : " + processor);
printUsage();
return -1;
case "FileDistribution":
long maxSize = Long.parseLong(cmd.getOptionValue("maxSize", "0"));
int step = Integer.parseInt(cmd.getOptionValue("step", "0"));
boolean formatOutput = cmd.hasOption("format");
new FileDistributionCalculator(conf, maxSize, step, formatOutput, out)
.visit(new RandomAccessFile(inputFile, "r"));
break;
case "XML":
new PBImageXmlWriter(conf, out).visit(new RandomAccessFile(inputFile,
"r"));
break;
case "ReverseXML":
try {
OfflineImageReconstructor.run(inputFile, outputFile);
} catch (Exception e) {
System.err.println("OfflineImageReconstructor failed: "
+ e.getMessage());
e.printStackTrace(System.err);
System.exit(1);
}
break;
case "Web":
String addr = cmd.getOptionValue("addr", "localhost:5978");
try (WebImageViewer viewer =
new WebImageViewer(NetUtils.createSocketAddr(addr))) {
viewer.start(inputFile);
}
break;
case "Delimited":
try (PBImageDelimitedTextWriter writer =
new PBImageDelimitedTextWriter(out, delimiter, tempPath)) {
writer.visit(new RandomAccessFile(inputFile, "r"));
}
break;
default:
System.err.println("Invalid processor specified : " + processor);
printUsage();
return -1;
}
return 0;
} catch (EOFException e) {

View File

@ -650,7 +650,7 @@
<property>
<name>dfs.blockreport.initialDelay</name>
<value>0</value>
<value>0s</value>
<description>
Delay for first block report in seconds. Support multiple time unit
suffix(case insensitive), as described in dfs.heartbeat.interval.
@ -694,7 +694,7 @@
<property>
<name>dfs.datanode.directoryscan.interval</name>
<value>21600</value>
<value>21600s</value>
<description>Interval in seconds for Datanode to scan data directories and
reconcile the difference between blocks in memory and on the disk.
Support multiple time unit suffix(case insensitive), as described
@ -732,7 +732,7 @@
<property>
<name>dfs.heartbeat.interval</name>
<value>3</value>
<value>3s</value>
<description>
Determines datanode heartbeat interval in seconds.
Can use the following suffix (case insensitive):
@ -942,7 +942,7 @@
<property>
<name>dfs.namenode.decommission.interval</name>
<value>30</value>
<value>30s</value>
<description>Namenode periodicity in seconds to check if decommission is
complete. Support multiple time unit suffix(case insensitive), as described
in dfs.heartbeat.interval.
@ -973,7 +973,7 @@
<property>
<name>dfs.namenode.replication.interval</name>
<value>3</value>
<value>3s</value>
<description>The periodicity in seconds with which the namenode computes
replication work for datanodes. Support multiple time unit suffix(case insensitive),
as described in dfs.heartbeat.interval.
@ -1071,7 +1071,7 @@
<property>
<name>dfs.namenode.checkpoint.period</name>
<value>3600</value>
<value>3600s</value>
<description>
The number of seconds between two periodic checkpoints.
Support multiple time unit suffix(case insensitive), as described
@ -1090,7 +1090,7 @@
<property>
<name>dfs.namenode.checkpoint.check.period</name>
<value>60</value>
<value>60s</value>
<description>The SecondaryNameNode and CheckpointNode will poll the NameNode
every 'dfs.namenode.checkpoint.check.period' seconds to query the number
of uncheckpointed transactions. Support multiple time unit suffix(case insensitive),
@ -1433,7 +1433,7 @@
<property>
<name>dfs.client.datanode-restart.timeout</name>
<value>30</value>
<value>30s</value>
<description>
Expert only. The time to wait, in seconds, from reception of an
datanode shutdown notification for quick restart, until declaring
@ -1502,7 +1502,7 @@
<property>
<name>dfs.ha.log-roll.period</name>
<value>120</value>
<value>120s</value>
<description>
How often, in seconds, the StandbyNode should ask the active to
roll edit logs. Since the StandbyNode only reads from finalized
@ -1516,7 +1516,7 @@
<property>
<name>dfs.ha.tail-edits.period</name>
<value>60</value>
<value>60s</value>
<description>
How often, in seconds, the StandbyNode should check for new
finalized log segments in the shared edits log.
@ -2950,7 +2950,7 @@
<property>
<name>dfs.datanode.bp-ready.timeout</name>
<value>20</value>
<value>20s</value>
<description>
The maximum wait time for datanode to be ready before failing the
received request. Setting this to 0 fails requests right away if the
@ -4273,4 +4273,12 @@
a plan.
</description>
</property>
<property>
<name>dfs.lock.suppress.warning.interval</name>
<value>10s</value>
<description>Instrumentation reporting long critical sections will suppress
consecutive warnings within this interval.</description>
</property>
</configuration>

View File

@ -239,6 +239,7 @@ Usage: `hdfs oiv [OPTIONS] -i INPUT_FILE`
| `-addr` *address* | Specify the address(host:port) to listen. (localhost:5978 by default). This option is used with Web processor. |
| `-maxSize` *size* | Specify the range [0, maxSize] of file sizes to be analyzed in bytes (128GB by default). This option is used with FileDistribution processor. |
| `-step` *size* | Specify the granularity of the distribution in bytes (2MB by default). This option is used with FileDistribution processor. |
| `-format` | Format the output result in a human-readable fashion rather than a number of bytes. (false by default). This option is used with FileDistribution processor. |
| `-delimiter` *arg* | Delimiting string to use with Delimited processor. |
| `-t`,`--temp` *temporary dir* | Use temporary dir to cache intermediate result to generate Delimited outputs. If not set, Delimited processor constructs the namespace in memory before outputting text. |
| `-h`,`--help` | Display the tool usage and help information and exit. |
@ -260,6 +261,9 @@ Usage: `hdfs oiv_legacy [OPTIONS] -i INPUT_FILE -o OUTPUT_FILE`
| COMMAND\_OPTION | Description |
|:---- |:---- |
| `-p`\|`--processor` *processor* | Specify the image processor to apply against the image file. Valid options are Ls (default), XML, Delimited, Indented, and FileDistribution. |
| `-maxSize` *size* | Specify the range [0, maxSize] of file sizes to be analyzed in bytes (128GB by default). This option is used with FileDistribution processor. |
| `-step` *size* | Specify the granularity of the distribution in bytes (2MB by default). This option is used with FileDistribution processor. |
| `-format` | Format the output result in a human-readable fashion rather than a number of bytes. (false by default). This option is used with FileDistribution processor. |
| `-skipBlocks` | Do not enumerate individual blocks within files. This may save processing time and outfile file space on namespaces with very large files. The Ls processor reads the blocks to correctly determine file sizes and ignores this option. |
| `-printToScreen` | Pipe output of processor to console as well as specified file. On extremely large namespaces, this may increase processing time by an order of magnitude. |
| `-delimiter` *arg* | When used in conjunction with the Delimited processor, replaces the default tab delimiter with the string specified by *arg*. |

View File

@ -50,10 +50,13 @@ The Offline Image Viewer provides several output processors:
..., s[n-1], maxSize], and the processor calculates how many files
in the system fall into each segment [s[i-1], s[i]). Note that
files larger than maxSize always fall into the very last segment.
The output file is formatted as a tab separated two column table:
Size and NumFiles. Where Size represents the start of the segment,
By default, the output file is formatted as a tab separated two column
table: Size and NumFiles. Where Size represents the start of the segment,
and numFiles is the number of files form the image which size falls
in this segment.
in this segment. By specifying the option -format, the output file will be
formatted in a human-readable fashion rather than a number of bytes that
showed in Size column. In addition, the Size column will be changed to the
Size Range column.
4. Delimited (experimental): Generate a text file with all of the elements
common to both inodes and inodes-under-construction, separated by a
@ -150,6 +153,7 @@ Options
| `-addr` *address* | Specify the address(host:port) to listen. (localhost:5978 by default). This option is used with Web processor. |
| `-maxSize` *size* | Specify the range [0, maxSize] of file sizes to be analyzed in bytes (128GB by default). This option is used with FileDistribution processor. |
| `-step` *size* | Specify the granularity of the distribution in bytes (2MB by default). This option is used with FileDistribution processor. |
| `-format` | Format the output result in a human-readable fashion rather than a number of bytes. (false by default). This option is used with FileDistribution processor. |
| `-delimiter` *arg* | Delimiting string to use with Delimited processor. |
| `-t`\|`--temp` *temporary dir* | Use temporary dir to cache intermediate result to generate Delimited outputs. If not set, Delimited processor constructs the namespace in memory before outputting text. |
| `-h`\|`--help` | Display the tool usage and help information and exit. |
@ -181,6 +185,9 @@ Due to the internal layout changes introduced by the ProtocolBuffer-based fsimag
| `-i`\|`--inputFile` *input file* | Specify the input fsimage file to process. Required. |
| `-o`\|`--outputFile` *output file* | Specify the output filename, if the specified output processor generates one. If the specified file already exists, it is silently overwritten. Required. |
| `-p`\|`--processor` *processor* | Specify the image processor to apply against the image file. Valid options are Ls (default), XML, Delimited, Indented, and FileDistribution. |
| `-maxSize` *size* | Specify the range [0, maxSize] of file sizes to be analyzed in bytes (128GB by default). This option is used with FileDistribution processor. |
| `-step` *size* | Specify the granularity of the distribution in bytes (2MB by default). This option is used with FileDistribution processor. |
| `-format` | Format the output result in a human-readable fashion rather than a number of bytes. (false by default). This option is used with FileDistribution processor. |
| `-skipBlocks` | Do not enumerate individual blocks within files. This may save processing time and outfile file space on namespaces with very large files. The Ls processor reads the blocks to correctly determine file sizes and ignores this option. |
| `-printToScreen` | Pipe output of processor to console as well as specified file. On extremely large namespaces, this may increase processing time by an order of magnitude. |
| `-delimiter` *arg* | When used in conjunction with the Delimited processor, replaces the default tab delimiter with the string specified by *arg*. |

View File

@ -57,7 +57,8 @@ import static org.junit.Assert.assertTrue;
public class TestDFSStripedInputStream {
public static final Log LOG = LogFactory.getLog(TestDFSStripedInputStream.class);
public static final Log LOG =
LogFactory.getLog(TestDFSStripedInputStream.class);
private MiniDFSCluster cluster;
private Configuration conf = new Configuration();
@ -272,12 +273,16 @@ public class TestDFSStripedInputStream {
// |10 |
done += in.read(0, readBuffer, 0, delta);
assertEquals(delta, done);
assertArrayEquals(Arrays.copyOf(expected, done),
Arrays.copyOf(readBuffer, done));
// both head and trail cells are partial
// |c_0 |c_1 |c_2 |c_3 |c_4 |c_5 |
// |256K - 10|missing|256K|256K|256K - 10|not in range|
done += in.read(delta, readBuffer, delta,
CELLSIZE * (DATA_BLK_NUM - 1) - 2 * delta);
assertEquals(CELLSIZE * (DATA_BLK_NUM - 1) - delta, done);
assertArrayEquals(Arrays.copyOf(expected, done),
Arrays.copyOf(readBuffer, done));
// read the rest
done += in.read(done, readBuffer, done, readSize - done);
assertEquals(readSize, done);
@ -291,8 +296,8 @@ public class TestDFSStripedInputStream {
testStatefulRead(true, true);
}
private void testStatefulRead(boolean useByteBuffer, boolean cellMisalignPacket)
throws Exception {
private void testStatefulRead(boolean useByteBuffer,
boolean cellMisalignPacket) throws Exception {
final int numBlocks = 2;
final int fileSize = numBlocks * BLOCK_GROUP_SIZE;
if (cellMisalignPacket) {
@ -302,7 +307,8 @@ public class TestDFSStripedInputStream {
}
DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks,
NUM_STRIPE_PER_BLOCK, false);
LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(filePath.toString(), 0, fileSize);
LocatedBlocks lbs = fs.getClient().namenode.
getBlockLocations(filePath.toString(), 0, fileSize);
assert lbs.getLocatedBlocks().size() == numBlocks;
for (LocatedBlock lb : lbs.getLocatedBlocks()) {
@ -360,4 +366,111 @@ public class TestDFSStripedInputStream {
}
fs.delete(filePath, true);
}
@Test
public void testStatefulReadWithDNFailure() throws Exception {
final int numBlocks = 4;
final int failedDNIdx = DATA_BLK_NUM - 1;
DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks,
NUM_STRIPE_PER_BLOCK, false);
LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(
filePath.toString(), 0, BLOCK_GROUP_SIZE);
assert lbs.get(0) instanceof LocatedStripedBlock;
LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
for (int i = 0; i < DATA_BLK_NUM + PARITY_BLK_NUM; i++) {
Block blk = new Block(bg.getBlock().getBlockId() + i,
NUM_STRIPE_PER_BLOCK * CELLSIZE,
bg.getBlock().getGenerationStamp());
blk.setGenerationStamp(bg.getBlock().getGenerationStamp());
cluster.injectBlocks(i, Arrays.asList(blk),
bg.getBlock().getBlockPoolId());
}
DFSStripedInputStream in =
new DFSStripedInputStream(fs.getClient(), filePath.toString(), false,
ecPolicy, null);
int readSize = BLOCK_GROUP_SIZE;
byte[] readBuffer = new byte[readSize];
byte[] expected = new byte[readSize];
/** A variation of {@link DFSTestUtil#fillExpectedBuf} for striped blocks */
for (int i = 0; i < NUM_STRIPE_PER_BLOCK; i++) {
for (int j = 0; j < DATA_BLK_NUM; j++) {
for (int k = 0; k < CELLSIZE; k++) {
int posInBlk = i * CELLSIZE + k;
int posInFile = i * CELLSIZE * DATA_BLK_NUM + j * CELLSIZE + k;
expected[posInFile] = SimulatedFSDataset.simulatedByte(
new Block(bg.getBlock().getBlockId() + j), posInBlk);
}
}
}
ErasureCoderOptions coderOptions = new ErasureCoderOptions(
DATA_BLK_NUM, PARITY_BLK_NUM);
RawErasureDecoder rawDecoder = CodecUtil.createRawDecoder(conf,
ecPolicy.getCodecName(), coderOptions);
// Update the expected content for decoded data
int[] missingBlkIdx = new int[PARITY_BLK_NUM];
for (int i = 0; i < missingBlkIdx.length; i++) {
if (i == 0) {
missingBlkIdx[i] = failedDNIdx;
} else {
missingBlkIdx[i] = DATA_BLK_NUM + i;
}
}
cluster.stopDataNode(failedDNIdx);
for (int i = 0; i < NUM_STRIPE_PER_BLOCK; i++) {
byte[][] decodeInputs = new byte[DATA_BLK_NUM + PARITY_BLK_NUM][CELLSIZE];
byte[][] decodeOutputs = new byte[missingBlkIdx.length][CELLSIZE];
for (int j = 0; j < DATA_BLK_NUM; j++) {
int posInBuf = i * CELLSIZE * DATA_BLK_NUM + j * CELLSIZE;
if (j != failedDNIdx) {
System.arraycopy(expected, posInBuf, decodeInputs[j], 0, CELLSIZE);
}
}
for (int j = DATA_BLK_NUM; j < DATA_BLK_NUM + PARITY_BLK_NUM; j++) {
for (int k = 0; k < CELLSIZE; k++) {
int posInBlk = i * CELLSIZE + k;
decodeInputs[j][k] = SimulatedFSDataset.simulatedByte(
new Block(bg.getBlock().getBlockId() + j), posInBlk);
}
}
for (int m : missingBlkIdx) {
decodeInputs[m] = null;
}
rawDecoder.decode(decodeInputs, missingBlkIdx, decodeOutputs);
int posInBuf = i * CELLSIZE * DATA_BLK_NUM + failedDNIdx * CELLSIZE;
System.arraycopy(decodeOutputs[0], 0, expected, posInBuf, CELLSIZE);
}
int delta = 10;
int done = 0;
// read a small delta, shouldn't trigger decode
// |cell_0 |
// |10 |
done += in.read(readBuffer, 0, delta);
assertEquals(delta, done);
// both head and trail cells are partial
// |c_0 |c_1 |c_2 |c_3 |c_4 |c_5 |
// |256K - 10|missing|256K|256K|256K - 10|not in range|
while (done < (CELLSIZE * (DATA_BLK_NUM - 1) - 2 * delta)) {
int ret = in.read(readBuffer, delta,
CELLSIZE * (DATA_BLK_NUM - 1) - 2 * delta);
assertTrue(ret > 0);
done += ret;
}
assertEquals(CELLSIZE * (DATA_BLK_NUM - 1) - delta, done);
// read the rest
int restSize;
restSize = readSize - done;
while (done < restSize) {
int ret = in.read(readBuffer, done, restSize);
assertTrue(ret > 0);
done += ret;
}
assertEquals(readSize, done);
assertArrayEquals(expected, readBuffer);
}
}

View File

@ -0,0 +1,166 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.Lock;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.util.Timer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
import static org.mockito.Mockito.*;
import static org.junit.Assert.*;
/**
* A test class for InstrumentedLock.
*/
public class TestInstrumentedLock {
static final Log LOG = LogFactory.getLog(TestInstrumentedLock.class);
@Rule public TestName name = new TestName();
/**
* Test exclusive access of the lock.
* @throws Exception
*/
@Test(timeout=10000)
public void testMultipleThread() throws Exception {
String testname = name.getMethodName();
InstrumentedLock lock = new InstrumentedLock(testname, LOG, 0, 300);
lock.lock();
try {
Thread competingThread = new Thread() {
@Override
public void run() {
assertFalse(lock.tryLock());
}
};
competingThread.start();
competingThread.join();
} finally {
lock.unlock();
}
}
/**
* Test the correctness with try-with-resource syntax.
* @throws Exception
*/
@Test(timeout=10000)
public void testTryWithResourceSyntax() throws Exception {
String testname = name.getMethodName();
final AtomicReference<Thread> lockThread = new AtomicReference<>(null);
Lock lock = new InstrumentedLock(testname, LOG, 0, 300) {
@Override
public void lock() {
super.lock();
lockThread.set(Thread.currentThread());
}
@Override
public void unlock() {
super.unlock();
lockThread.set(null);
}
};
AutoCloseableLock acl = new AutoCloseableLock(lock);
try (AutoCloseable localLock = acl.acquire()) {
assertEquals(acl, localLock);
Thread competingThread = new Thread() {
@Override
public void run() {
assertNotEquals(Thread.currentThread(), lockThread.get());
assertFalse(lock.tryLock());
}
};
competingThread.start();
competingThread.join();
assertEquals(Thread.currentThread(), lockThread.get());
}
assertNull(lockThread.get());
}
/**
* Test the lock logs warning when lock held time is greater than threshold
* and not log warning otherwise.
* @throws Exception
*/
@Test(timeout=10000)
public void testLockLongHoldingReport() throws Exception {
String testname = name.getMethodName();
final AtomicLong time = new AtomicLong(0);
Timer mclock = new Timer() {
@Override
public long monotonicNow() {
return time.get();
}
};
Lock mlock = mock(Lock.class);
final AtomicLong wlogged = new AtomicLong(0);
final AtomicLong wsuppresed = new AtomicLong(0);
InstrumentedLock lock = new InstrumentedLock(
testname, LOG, mlock, 2000, 300, mclock) {
@Override
void logWarning(long lockHeldTime, long suppressed) {
wlogged.incrementAndGet();
wsuppresed.set(suppressed);
}
};
// do not log warning when the lock held time is short
lock.lock(); // t = 0
time.set(200);
lock.unlock(); // t = 200
assertEquals(0, wlogged.get());
assertEquals(0, wsuppresed.get());
lock.lock(); // t = 200
time.set(700);
lock.unlock(); // t = 700
assertEquals(1, wlogged.get());
assertEquals(0, wsuppresed.get());
// despite the lock held time is greater than threshold
// suppress the log warning due to the logging gap
// (not recorded in wsuppressed until next log message)
lock.lock(); // t = 700
time.set(1100);
lock.unlock(); // t = 1100
assertEquals(1, wlogged.get());
assertEquals(0, wsuppresed.get());
// log a warning message when the lock held time is greater the threshold
// and the logging time gap is satisfied. Also should display suppressed
// previous warnings.
time.set(2400);
lock.lock(); // t = 2400
time.set(2800);
lock.unlock(); // t = 2800
assertEquals(2, wlogged.get());
assertEquals(1, wsuppresed.get());
}
}

View File

@ -0,0 +1,119 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.security;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
import static org.mockito.Mockito.mock;
import java.net.InetSocketAddress;
import java.security.PrivilegedExceptionAction;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.Client;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SaslInputStream;
import org.apache.hadoop.security.SaslRpcClient;
import org.apache.hadoop.security.SaslRpcServer;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Test;
/** Unit tests for using Delegation Token over RPC. */
public class TestClientProtocolWithDelegationToken {
private static final String ADDRESS = "0.0.0.0";
public static final Log LOG = LogFactory
.getLog(TestClientProtocolWithDelegationToken.class);
private static final Configuration conf;
static {
conf = new Configuration();
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
}
static {
GenericTestUtils.setLogLevel(Client.LOG, Level.ALL);
GenericTestUtils.setLogLevel(Server.LOG, Level.ALL);
GenericTestUtils.setLogLevel(SaslRpcClient.LOG, Level.ALL);
GenericTestUtils.setLogLevel(SaslRpcServer.LOG, Level.ALL);
GenericTestUtils.setLogLevel(SaslInputStream.LOG, Level.ALL);
}
@Test
public void testDelegationTokenRpc() throws Exception {
ClientProtocol mockNN = mock(ClientProtocol.class);
FSNamesystem mockNameSys = mock(FSNamesystem.class);
DelegationTokenSecretManager sm = new DelegationTokenSecretManager(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT,
DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT,
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT,
3600000, mockNameSys);
sm.startThreads();
final Server server = new RPC.Builder(conf)
.setProtocol(ClientProtocol.class).setInstance(mockNN)
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
.setSecretManager(sm).build();
server.start();
final UserGroupInformation current = UserGroupInformation.getCurrentUser();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
String user = current.getUserName();
Text owner = new Text(user);
DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(owner, owner, null);
Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(
dtId, sm);
SecurityUtil.setTokenService(token, addr);
LOG.info("Service for token is " + token.getService());
current.addToken(token);
current.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
ClientProtocol proxy = null;
try {
proxy = RPC.getProxy(ClientProtocol.class,
ClientProtocol.versionID, addr, conf);
proxy.getServerDefaults();
} finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
return null;
}
});
}
}

View File

@ -41,18 +41,19 @@ import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import com.google.common.collect.Lists;
import static org.apache.hadoop.hdfs.tools.DiskBalancer.CANCEL;
import static org.apache.hadoop.hdfs.tools.DiskBalancer.HELP;
import static org.apache.hadoop.hdfs.tools.DiskBalancer.NODE;
import static org.apache.hadoop.hdfs.tools.DiskBalancer.PLAN;
import static org.apache.hadoop.hdfs.tools.DiskBalancer.QUERY;
import static org.apache.hadoop.hdfs.tools.DiskBalancer.REPORT;
import static org.apache.hadoop.hdfs.tools.DiskBalancerCLI.CANCEL;
import static org.apache.hadoop.hdfs.tools.DiskBalancerCLI.HELP;
import static org.apache.hadoop.hdfs.tools.DiskBalancerCLI.NODE;
import static org.apache.hadoop.hdfs.tools.DiskBalancerCLI.PLAN;
import static org.apache.hadoop.hdfs.tools.DiskBalancerCLI.QUERY;
import static org.apache.hadoop.hdfs.tools.DiskBalancerCLI.REPORT;
import org.junit.Rule;
import org.junit.rules.ExpectedException;
@ -387,8 +388,7 @@ public class TestDiskBalancerCommand {
private List<String> runCommandInternal(final String cmdLine) throws
Exception {
String[] cmds = StringUtils.split(cmdLine, ' ');
org.apache.hadoop.hdfs.tools.DiskBalancer db =
new org.apache.hadoop.hdfs.tools.DiskBalancer(conf);
DiskBalancerCLI db = new DiskBalancerCLI(conf);
ByteArrayOutputStream bufOut = new ByteArrayOutputStream();
PrintStream out = new PrintStream(bufOut);

View File

@ -237,7 +237,7 @@ public class TestOfflineImageViewer {
File truncatedFile = new File(tempDir, "truncatedFsImage");
PrintStream output = new PrintStream(NullOutputStream.NULL_OUTPUT_STREAM);
copyPartOfFile(originalFsimage, truncatedFile);
new FileDistributionCalculator(new Configuration(), 0, 0, output)
new FileDistributionCalculator(new Configuration(), 0, 0, false, output)
.visit(new RandomAccessFile(truncatedFile, "r"));
}
@ -259,7 +259,7 @@ public class TestOfflineImageViewer {
public void testFileDistributionCalculator() throws IOException {
ByteArrayOutputStream output = new ByteArrayOutputStream();
PrintStream o = new PrintStream(output);
new FileDistributionCalculator(new Configuration(), 0, 0, o)
new FileDistributionCalculator(new Configuration(), 0, 0, false, o)
.visit(new RandomAccessFile(originalFsimage, "r"));
o.close();
@ -620,4 +620,24 @@ public class TestOfflineImageViewer {
IOUtils.closeStream(out);
}
}
@Test
public void testOfflineImageViewerWithFormatOption() throws Exception {
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
final PrintStream out = new PrintStream(bytes);
final PrintStream oldOut = System.out;
try {
System.setOut(out);
int status =
OfflineImageViewerPB.run(new String[] {"-i",
originalFsimage.getAbsolutePath(), "-o", "-", "-p",
"FileDistribution", "-maxSize", "512", "-step", "8",
"-format"});
assertEquals(0, status);
Assert.assertTrue(bytes.toString().contains("(0 B, 8 B]"));
} finally {
System.setOut(oldOut);
IOUtils.closeStream(out);
}
}
}

View File

@ -36,6 +36,7 @@ import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import java.nio.ByteBuffer;
import java.util.Random;
import static org.junit.Assert.assertEquals;
@ -242,7 +243,8 @@ public class TestStripedBlockUtil {
*/
@Test
public void testDivideByteRangeIntoStripes() {
byte[] assembled = new byte[BLK_GROUP_STRIPE_NUM * FULL_STRIPE_SIZE];
ByteBuffer assembled =
ByteBuffer.allocate(BLK_GROUP_STRIPE_NUM * FULL_STRIPE_SIZE);
for (int bgSize : blockGroupSizes) {
LocatedStripedBlock blockGroup = createDummyLocatedBlock(bgSize);
byte[][] internalBlkBufs = createInternalBlkBuffers(bgSize);
@ -252,7 +254,7 @@ public class TestStripedBlockUtil {
continue;
}
AlignedStripe[] stripes = divideByteRangeIntoStripes(EC_POLICY,
CELLSIZE, blockGroup, brStart, brStart + brSize - 1, assembled, 0);
CELLSIZE, blockGroup, brStart, brStart + brSize - 1, assembled);
for (AlignedStripe stripe : stripes) {
for (int i = 0; i < DATA_BLK_NUM; i++) {
@ -261,21 +263,21 @@ public class TestStripedBlockUtil {
continue;
}
int done = 0;
for (int j = 0; j < chunk.byteArray.getLengths().length; j++) {
System.arraycopy(internalBlkBufs[i],
(int) stripe.getOffsetInBlock() + done, assembled,
chunk.byteArray.getOffsets()[j],
chunk.byteArray.getLengths()[j]);
done += chunk.byteArray.getLengths()[j];
int len;
for (ByteBuffer slice : chunk.getChunkBuffer().getSlices()) {
len = slice.remaining();
slice.put(internalBlkBufs[i],
(int) stripe.getOffsetInBlock() + done, len);
done += len;
}
}
}
for (int i = 0; i < brSize; i++) {
if (hashIntToByte(brStart + i) != assembled[i]) {
if (hashIntToByte(brStart + i) != assembled.get(i)) {
System.out.println("Oops");
}
assertEquals("Byte at " + (brStart + i) + " should be the same",
hashIntToByte(brStart + i), assembled[i]);
hashIntToByte(brStart + i), assembled.get(i));
}
}
}

View File

@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.WritableRpcEngine;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.Groups;
@ -97,6 +98,8 @@ public class HSAdminServer extends AbstractService implements HSAdminProtocol {
BlockingService refreshHSAdminProtocolService = HSAdminRefreshProtocolService
.newReflectiveBlockingService(refreshHSAdminProtocolXlator);
WritableRpcEngine.ensureInitialized();
clientRpcAddress = conf.getSocketAddr(
JHAdminConfig.MR_HISTORY_BIND_HOST,
JHAdminConfig.JHS_ADMIN_ADDRESS,