Merge trunk into HA branch.

Resolved some semantic conflicts in TestFileAppendRestart - we now log more OP_ADDs in the HA branch than we did in trunk.
Resolved some conflicts around removal of VersionedProtocol, etc.


git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1295342 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Todd Lipcon 2012-02-29 23:47:49 +00:00
commit 1ba357553a
113 changed files with 1820 additions and 1288 deletions

View File

@ -41,14 +41,14 @@ public class TestAuthenticationFilter extends TestCase {
FilterConfig config = Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter(AuthenticationFilter.CONFIG_PREFIX)).thenReturn("");
Mockito.when(config.getInitParameter("a")).thenReturn("A");
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList("a")).elements());
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector<String>(Arrays.asList("a")).elements());
Properties props = filter.getConfiguration("", config);
assertEquals("A", props.getProperty("a"));
config = Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter(AuthenticationFilter.CONFIG_PREFIX)).thenReturn("foo");
Mockito.when(config.getInitParameter("foo.a")).thenReturn("A");
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList("foo.a")).elements());
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector<String>(Arrays.asList("foo.a")).elements());
props = filter.getConfiguration("foo.", config);
assertEquals("A", props.getProperty("a"));
}
@ -57,7 +57,7 @@ public class TestAuthenticationFilter extends TestCase {
AuthenticationFilter filter = new AuthenticationFilter();
try {
FilterConfig config = Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector().elements());
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector<String>().elements());
filter.init(config);
fail();
} catch (ServletException ex) {
@ -119,7 +119,7 @@ public class TestAuthenticationFilter extends TestCase {
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TOKEN_VALIDITY)).thenReturn("1000");
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements());
filter.init(config);
assertEquals(PseudoAuthenticationHandler.class, filter.getAuthenticationHandler().getClass());
@ -138,7 +138,7 @@ public class TestAuthenticationFilter extends TestCase {
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
AuthenticationFilter.SIGNATURE_SECRET)).elements());
filter.init(config);
assertFalse(filter.isRandomSecret());
@ -154,7 +154,7 @@ public class TestAuthenticationFilter extends TestCase {
Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_DOMAIN)).thenReturn(".foo.com");
Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_PATH)).thenReturn("/bar");
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
AuthenticationFilter.COOKIE_DOMAIN,
AuthenticationFilter.COOKIE_PATH)).elements());
filter.init(config);
@ -173,7 +173,7 @@ public class TestAuthenticationFilter extends TestCase {
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
filter.init(config);
assertTrue(DummyAuthenticationHandler.init);
} finally {
@ -187,7 +187,7 @@ public class TestAuthenticationFilter extends TestCase {
FilterConfig config = Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("kerberos");
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
filter.init(config);
} catch (ServletException ex) {
// Expected
@ -204,7 +204,7 @@ public class TestAuthenticationFilter extends TestCase {
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
filter.init(config);
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@ -225,7 +225,7 @@ public class TestAuthenticationFilter extends TestCase {
DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
AuthenticationFilter.SIGNATURE_SECRET)).elements());
filter.init(config);
@ -254,7 +254,7 @@ public class TestAuthenticationFilter extends TestCase {
DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
AuthenticationFilter.SIGNATURE_SECRET)).elements());
filter.init(config);
@ -288,7 +288,7 @@ public class TestAuthenticationFilter extends TestCase {
DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
AuthenticationFilter.SIGNATURE_SECRET)).elements());
filter.init(config);
@ -321,7 +321,7 @@ public class TestAuthenticationFilter extends TestCase {
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
filter.init(config);
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@ -332,7 +332,7 @@ public class TestAuthenticationFilter extends TestCase {
FilterChain chain = Mockito.mock(FilterChain.class);
Mockito.doAnswer(
new Answer() {
new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
fail();
@ -358,7 +358,7 @@ public class TestAuthenticationFilter extends TestCase {
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TOKEN_VALIDITY)).thenReturn("1000");
Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
AuthenticationFilter.AUTH_TOKEN_VALIDITY,
AuthenticationFilter.SIGNATURE_SECRET)).elements());
@ -366,7 +366,7 @@ public class TestAuthenticationFilter extends TestCase {
Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_DOMAIN)).thenReturn(".foo.com");
Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_PATH)).thenReturn("/bar");
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
AuthenticationFilter.AUTH_TOKEN_VALIDITY,
AuthenticationFilter.SIGNATURE_SECRET,
AuthenticationFilter.COOKIE_DOMAIN,
@ -387,7 +387,7 @@ public class TestAuthenticationFilter extends TestCase {
final boolean[] calledDoFilter = new boolean[1];
Mockito.doAnswer(
new Answer() {
new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
calledDoFilter[0] = true;
@ -398,7 +398,7 @@ public class TestAuthenticationFilter extends TestCase {
final Cookie[] setCookie = new Cookie[1];
Mockito.doAnswer(
new Answer() {
new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
Object[] args = invocation.getArguments();
@ -451,7 +451,7 @@ public class TestAuthenticationFilter extends TestCase {
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
filter.init(config);
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@ -470,7 +470,7 @@ public class TestAuthenticationFilter extends TestCase {
FilterChain chain = Mockito.mock(FilterChain.class);
Mockito.doAnswer(
new Answer() {
new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
Object[] args = invocation.getArguments();
@ -496,7 +496,7 @@ public class TestAuthenticationFilter extends TestCase {
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
filter.init(config);
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@ -515,7 +515,7 @@ public class TestAuthenticationFilter extends TestCase {
FilterChain chain = Mockito.mock(FilterChain.class);
Mockito.doAnswer(
new Answer() {
new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
fail();
@ -526,7 +526,7 @@ public class TestAuthenticationFilter extends TestCase {
final Cookie[] setCookie = new Cookie[1];
Mockito.doAnswer(
new Answer() {
new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
Object[] args = invocation.getArguments();
@ -556,7 +556,7 @@ public class TestAuthenticationFilter extends TestCase {
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
filter.init(config);
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@ -575,7 +575,7 @@ public class TestAuthenticationFilter extends TestCase {
FilterChain chain = Mockito.mock(FilterChain.class);
Mockito.doAnswer(
new Answer() {
new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
fail();
@ -586,7 +586,7 @@ public class TestAuthenticationFilter extends TestCase {
final Cookie[] setCookie = new Cookie[1];
Mockito.doAnswer(
new Answer() {
new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
Object[] args = invocation.getArguments();

View File

@ -52,6 +52,11 @@ Trunk (unreleased changes)
HADOOP-8078. Add capability to turn on security in unit tests. (Jaimin Jetly
via jitendra)
HADOOP-7994. Remove getProtocolVersion and getProtocolSignature from the
client side translator and server side implementation. (jitendra)
HADOOP-7557 Make IPC header be extensible (sanjay radia)
BUG FIXES
HADOOP-8018. Hudson auto test for HDFS has started throwing javadoc
@ -157,6 +162,9 @@ Release 0.23.3 - UNRELEASED
HADOOP-8098. KerberosAuthenticatorHandler should use _HOST replacement to
resolve principal name (tucu)
HADOOP-8118. In metrics2.util.MBeans, change log level to trace for the
stack trace of InstanceAlreadyExistsException. (szetszwo)
OPTIMIZATIONS
BUG FIXES
@ -183,6 +191,13 @@ Release 0.23.3 - UNRELEASED
HADOOP-7931. o.a.h.ipc.WritableRpcEngine should have a way to force
initialization. (atm)
HADOOP-8104. Inconsistent Jackson versions (tucu)
HADOOP-7940. The Text.clear() method does not clear the bytes as intended. (Csaba Miklos via harsh)
HADOOP-8119. Fix javac warnings in TestAuthenticationFilter in hadoop-auth.
(szetszwo)
Release 0.23.2 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -277,5 +277,9 @@
<Match>
<!-- protobuf generated code -->
<Class name="~org\.apache\.hadoop\.ipc\.protobuf\.ProtocolInfoProtos.*"/>
</Match>
<Match>
<!-- protobuf generated code -->
<Class name="~org\.apache\.hadoop\.ipc\.protobuf\.IpcConnectionContextProtos.*"/>
</Match>
</FindBugsFilter>

View File

@ -20,7 +20,6 @@ package org.apache.hadoop.ha;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.KerberosInfo;
@ -36,7 +35,7 @@ import java.io.IOException;
serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface HAServiceProtocol extends VersionedProtocol {
public interface HAServiceProtocol {
/**
* Initial version of the protocol
*/

View File

@ -239,6 +239,7 @@ public class Text extends BinaryComparable
*/
public void clear() {
length = 0;
bytes = EMPTY_BYTES;
}
/*

View File

@ -51,6 +51,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.ipc.RpcPayloadHeader.*;
import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
@ -66,6 +67,7 @@ import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.TokenSelector;
import org.apache.hadoop.security.token.TokenInfo;
import org.apache.hadoop.util.ProtoUtil;
import org.apache.hadoop.util.ReflectionUtils;
/** A client for an IPC service. IPC calls take a single {@link Writable} as a
@ -211,7 +213,7 @@ public class Client {
private class Connection extends Thread {
private InetSocketAddress server; // server ip:port
private String serverPrincipal; // server's krb5 principal name
private ConnectionHeader header; // connection header
private IpcConnectionContextProto connectionContext; // connection context
private final ConnectionId remoteId; // connection id
private AuthMethod authMethod; // authentication method
private boolean useSasl;
@ -295,8 +297,8 @@ public class Client {
authMethod = AuthMethod.KERBEROS;
}
header =
new ConnectionHeader(RPC.getProtocolName(protocol), ticket, authMethod);
connectionContext = ProtoUtil.makeIpcConnectionContext(
RPC.getProtocolName(protocol), ticket, authMethod);
if (LOG.isDebugEnabled())
LOG.debug("Use " + authMethod + " authentication for protocol "
@ -563,7 +565,7 @@ public class Client {
setupConnection();
InputStream inStream = NetUtils.getInputStream(socket);
OutputStream outStream = NetUtils.getOutputStream(socket);
writeRpcHeader(outStream);
writeConnectionHeader(outStream);
if (useSasl) {
final InputStream in2 = inStream;
final OutputStream out2 = outStream;
@ -597,8 +599,11 @@ public class Client {
} else {
// fall back to simple auth because server told us so.
authMethod = AuthMethod.SIMPLE;
header = new ConnectionHeader(header.getProtocol(), header
.getUgi(), authMethod);
// remake the connectionContext
connectionContext = ProtoUtil.makeIpcConnectionContext(
connectionContext.getProtocol(),
ProtoUtil.getUgi(connectionContext.getUserInfo()),
authMethod);
useSasl = false;
}
}
@ -678,13 +683,26 @@ public class Client {
". Already tried " + curRetries + " time(s).");
}
/* Write the RPC header */
private void writeRpcHeader(OutputStream outStream) throws IOException {
/**
* Write the connection header - this is sent when connection is established
* +----------------------------------+
* | "hrpc" 4 bytes |
* +----------------------------------+
* | Version (1 bytes) |
* +----------------------------------+
* | Authmethod (1 byte) |
* +----------------------------------+
* | IpcSerializationType (1 byte) |
* +----------------------------------+
*/
private void writeConnectionHeader(OutputStream outStream)
throws IOException {
DataOutputStream out = new DataOutputStream(new BufferedOutputStream(outStream));
// Write out the header, version and authentication method
out.write(Server.HEADER.array());
out.write(Server.CURRENT_VERSION);
authMethod.write(out);
Server.IpcSerializationType.PROTOBUF.write(out);
out.flush();
}
@ -694,7 +712,7 @@ public class Client {
private void writeHeader() throws IOException {
// Write out the ConnectionHeader
DataOutputBuffer buf = new DataOutputBuffer();
header.write(buf);
connectionContext.writeTo(buf);
// Write out the payload length
int bufLen = buf.getLength();
@ -1261,18 +1279,18 @@ public class Client {
public static class ConnectionId {
InetSocketAddress address;
UserGroupInformation ticket;
Class<?> protocol;
final Class<?> protocol;
private static final int PRIME = 16777619;
private int rpcTimeout;
private String serverPrincipal;
private int maxIdleTime; //connections will be culled if it was idle for
private final int rpcTimeout;
private final String serverPrincipal;
private final int maxIdleTime; //connections will be culled if it was idle for
//maxIdleTime msecs
private int maxRetries; //the max. no. of retries for socket connections
private final int maxRetries; //the max. no. of retries for socket connections
// the max. no. of retries for socket connections on time out exceptions
private int maxRetriesOnSocketTimeouts;
private boolean tcpNoDelay; // if T then disable Nagle's Algorithm
private boolean doPing; //do we need to send ping message
private int pingInterval; // how often sends ping to the server in msecs
private final int maxRetriesOnSocketTimeouts;
private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
private final boolean doPing; //do we need to send ping message
private final int pingInterval; // how often sends ping to the server in msecs
ConnectionId(InetSocketAddress address, Class<?> protocol,
UserGroupInformation ticket, int rpcTimeout,

View File

@ -1,121 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
/**
* The IPC connection header sent by the client to the server
* on connection establishment.
*/
class ConnectionHeader implements Writable {
public static final Log LOG = LogFactory.getLog(ConnectionHeader.class);
private String protocol;
private UserGroupInformation ugi = null;
private AuthMethod authMethod;
public ConnectionHeader() {}
/**
* Create a new {@link ConnectionHeader} with the given <code>protocol</code>
* and {@link UserGroupInformation}.
* @param protocol protocol used for communication between the IPC client
* and the server
* @param ugi {@link UserGroupInformation} of the client communicating with
* the server
*/
public ConnectionHeader(String protocol, UserGroupInformation ugi, AuthMethod authMethod) {
this.protocol = protocol;
this.ugi = ugi;
this.authMethod = authMethod;
}
@Override
public void readFields(DataInput in) throws IOException {
protocol = Text.readString(in);
if (protocol.isEmpty()) {
protocol = null;
}
boolean ugiUsernamePresent = in.readBoolean();
if (ugiUsernamePresent) {
String username = in.readUTF();
boolean realUserNamePresent = in.readBoolean();
if (realUserNamePresent) {
String realUserName = in.readUTF();
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(realUserName);
ugi = UserGroupInformation.createProxyUser(username, realUserUgi);
} else {
ugi = UserGroupInformation.createRemoteUser(username);
}
} else {
ugi = null;
}
}
@Override
public void write(DataOutput out) throws IOException {
Text.writeString(out, (protocol == null) ? "" : protocol);
if (ugi != null) {
if (authMethod == AuthMethod.KERBEROS) {
// Send effective user for Kerberos auth
out.writeBoolean(true);
out.writeUTF(ugi.getUserName());
out.writeBoolean(false);
} else if (authMethod == AuthMethod.DIGEST) {
// Don't send user for token auth
out.writeBoolean(false);
} else {
//Send both effective user and real user for simple auth
out.writeBoolean(true);
out.writeUTF(ugi.getUserName());
if (ugi.getRealUser() != null) {
out.writeBoolean(true);
out.writeUTF(ugi.getRealUser().getUserName());
} else {
out.writeBoolean(false);
}
}
} else {
out.writeBoolean(false);
}
}
public String getProtocol() {
return protocol;
}
public UserGroupInformation getUgi() {
return ugi;
}
public String toString() {
return protocol + "-" + ugi;
}
}

View File

@ -0,0 +1,34 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.io.IOException;
/**
* IPC exception is thrown by IPC layer when the IPC
* connection cannot be established.
*/
public class IpcException extends IOException {
private static final long serialVersionUID = 1L;
final String errMsg;
public IpcException(final String err) {
errMsg = err;
}
}

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.ipc;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.BindException;
@ -74,6 +75,7 @@ import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcPayloadOperation;
import org.apache.hadoop.ipc.metrics.RpcDetailedMetrics;
import org.apache.hadoop.ipc.metrics.RpcMetrics;
import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.SaslRpcServer;
@ -90,6 +92,7 @@ import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.ProtoUtil;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
@ -110,6 +113,22 @@ public abstract class Server {
*/
public static final ByteBuffer HEADER = ByteBuffer.wrap("hrpc".getBytes());
/**
* Serialization type for ConnectionContext and RpcPayloadHeader
*/
public enum IpcSerializationType {
// Add new serialization type to the end without affecting the enum order
PROTOBUF;
void write(DataOutput out) throws IOException {
out.writeByte(this.ordinal());
}
static IpcSerializationType fromByte(byte b) {
return IpcSerializationType.values()[b];
}
}
/**
* If the user accidentally sends an HTTP GET to an IPC port, we detect this
* and send back a nicer response.
@ -133,7 +152,8 @@ public abstract class Server {
// 5 : Introduced use of {@link ArrayPrimitiveWritable$Internal}
// in ObjectWritable to efficiently transmit arrays of primitives
// 6 : Made RPC payload header explicit
public static final byte CURRENT_VERSION = 6;
// 7 : Changed Ipc Connection Header to use Protocol buffers
public static final byte CURRENT_VERSION = 7;
/**
* Initial and max size of response buffer
@ -968,9 +988,9 @@ public abstract class Server {
/** Reads calls from a connection and queues them for handling. */
public class Connection {
private boolean rpcHeaderRead = false; // if initial rpc header is read
private boolean headerRead = false; //if the connection header that
//follows version is read.
private boolean connectionHeaderRead = false; // connection header is read?
private boolean connectionContextRead = false; //if connection context that
//follows connection header is read
private SocketChannel channel;
private ByteBuffer data;
@ -986,14 +1006,14 @@ public abstract class Server {
private int remotePort;
private InetAddress addr;
ConnectionHeader header = new ConnectionHeader();
IpcConnectionContextProto connectionContext;
String protocolName;
boolean useSasl;
SaslServer saslServer;
private AuthMethod authMethod;
private boolean saslContextEstablished;
private boolean skipInitialSaslHandshake;
private ByteBuffer rpcHeaderBuffer;
private ByteBuffer connectionHeaderBuf = null;
private ByteBuffer unwrappedData;
private ByteBuffer unwrappedDataLengthBuffer;
@ -1241,17 +1261,17 @@ public abstract class Server {
return count;
}
if (!rpcHeaderRead) {
if (!connectionHeaderRead) {
//Every connection is expected to send the header.
if (rpcHeaderBuffer == null) {
rpcHeaderBuffer = ByteBuffer.allocate(2);
if (connectionHeaderBuf == null) {
connectionHeaderBuf = ByteBuffer.allocate(3);
}
count = channelRead(channel, rpcHeaderBuffer);
if (count < 0 || rpcHeaderBuffer.remaining() > 0) {
count = channelRead(channel, connectionHeaderBuf);
if (count < 0 || connectionHeaderBuf.remaining() > 0) {
return count;
}
int version = rpcHeaderBuffer.get(0);
byte[] method = new byte[] {rpcHeaderBuffer.get(1)};
int version = connectionHeaderBuf.get(0);
byte[] method = new byte[] {connectionHeaderBuf.get(1)};
authMethod = AuthMethod.read(new DataInputStream(
new ByteArrayInputStream(method)));
dataLengthBuffer.flip();
@ -1273,6 +1293,14 @@ public abstract class Server {
setupBadVersionResponse(version);
return -1;
}
IpcSerializationType serializationType = IpcSerializationType
.fromByte(connectionHeaderBuf.get(2));
if (serializationType != IpcSerializationType.PROTOBUF) {
respondUnsupportedSerialization(serializationType);
return -1;
}
dataLengthBuffer.clear();
if (authMethod == null) {
throw new IOException("Unable to read authentication method");
@ -1302,8 +1330,8 @@ public abstract class Server {
useSasl = true;
}
rpcHeaderBuffer = null;
rpcHeaderRead = true;
connectionHeaderBuf = null;
connectionHeaderRead = true;
continue;
}
@ -1334,7 +1362,7 @@ public abstract class Server {
skipInitialSaslHandshake = false;
continue;
}
boolean isHeaderRead = headerRead;
boolean isHeaderRead = connectionContextRead;
if (useSasl) {
saslReadAndProcess(data.array());
} else {
@ -1383,6 +1411,17 @@ public abstract class Server {
}
}
private void respondUnsupportedSerialization(IpcSerializationType st) throws IOException {
String errMsg = "Server IPC version " + CURRENT_VERSION
+ " do not support serilization " + st.toString();
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
Call fakeCall = new Call(-1, null, this);
setupResponse(buffer, fakeCall, Status.FATAL, null,
IpcException.class.getName(), errMsg);
responder.doRespond(fakeCall);
}
private void setupHttpRequestOnIpcPortResponse() throws IOException {
Call fakeCall = new Call(0, null, this);
fakeCall.setResponse(ByteBuffer.wrap(
@ -1390,15 +1429,15 @@ public abstract class Server {
responder.doRespond(fakeCall);
}
/// Reads the connection header following version
private void processHeader(byte[] buf) throws IOException {
/** Reads the connection context following the connection header */
private void processConnectionContext(byte[] buf) throws IOException {
DataInputStream in =
new DataInputStream(new ByteArrayInputStream(buf));
header.readFields(in);
protocolName = header.getProtocol();
connectionContext = IpcConnectionContextProto.parseFrom(in);
protocolName = connectionContext.hasProtocol() ? connectionContext
.getProtocol() : null;
UserGroupInformation protocolUser = header.getUgi();
UserGroupInformation protocolUser = ProtoUtil.getUgi(connectionContext);
if (!useSasl) {
user = protocolUser;
if (user != null) {
@ -1472,14 +1511,14 @@ public abstract class Server {
private void processOneRpc(byte[] buf) throws IOException,
InterruptedException {
if (headerRead) {
if (connectionContextRead) {
processData(buf);
} else {
processHeader(buf);
headerRead = true;
processConnectionContext(buf);
connectionContextRead = true;
if (!authorizeConnection()) {
throw new AccessControlException("Connection from " + this
+ " for protocol " + header.getProtocol()
+ " for protocol " + connectionContext.getProtocol()
+ " is unauthorized for user " + user);
}
}
@ -1549,9 +1588,9 @@ public abstract class Server {
&& (authMethod != AuthMethod.DIGEST)) {
ProxyUsers.authorize(user, this.getHostAddress(), conf);
}
authorize(user, header, getHostInetAddress());
authorize(user, protocolName, getHostInetAddress());
if (LOG.isDebugEnabled()) {
LOG.debug("Successfully authorized " + header);
LOG.debug("Successfully authorized " + connectionContext);
}
rpcMetrics.incrAuthorizationSuccesses();
} catch (AuthorizationException ae) {
@ -1596,11 +1635,10 @@ public abstract class Server {
while (running) {
try {
final Call call = callQueue.take(); // pop the queue; maybe blocked here
if (LOG.isDebugEnabled())
if (LOG.isDebugEnabled()) {
LOG.debug(getName() + ": has Call#" + call.callId +
"for RpcKind " + call.rpcKind + " from " + call.connection);
}
String errorClass = null;
String error = null;
Writable value = null;
@ -1925,21 +1963,22 @@ public abstract class Server {
* Authorize the incoming client connection.
*
* @param user client user
* @param connection incoming connection
* @param protocolName - the protocol
* @param addr InetAddress of incoming connection
* @throws AuthorizationException when the client isn't authorized to talk the protocol
*/
public void authorize(UserGroupInformation user,
ConnectionHeader connection,
InetAddress addr
) throws AuthorizationException {
private void authorize(UserGroupInformation user, String protocolName,
InetAddress addr) throws AuthorizationException {
if (authorize) {
if (protocolName == null) {
throw new AuthorizationException("Null protocol not authorized");
}
Class<?> protocol = null;
try {
protocol = getProtocolClass(connection.getProtocol(), getConf());
protocol = getProtocolClass(protocolName, getConf());
} catch (ClassNotFoundException cfne) {
throw new AuthorizationException("Unknown protocol: " +
connection.getProtocol());
protocolName);
}
serviceAuthorizationManager.authorize(user, protocol, getConf(), addr);
}

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.metrics2.util;
import java.lang.management.ManagementFactory;
import javax.management.InstanceAlreadyExistsException;
import javax.management.MBeanServer;
import javax.management.ObjectName;
@ -55,8 +57,15 @@ public class MBeans {
mbs.registerMBean(theMbean, name);
LOG.debug("Registered "+ name);
return name;
} catch (InstanceAlreadyExistsException iaee) {
if (LOG.isTraceEnabled()) {
LOG.trace("Failed to register MBean \""+ name + "\"", iaee);
} else {
LOG.warn("Failed to register MBean \""+ name
+ "\": Instance already exists.");
}
} catch (Exception e) {
LOG.warn("Error registering "+ name, e);
LOG.warn("Failed to register MBean \""+ name + "\"", e);
}
return null;
}

View File

@ -33,7 +33,7 @@ import org.apache.hadoop.security.KerberosInfo;
serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public interface RefreshUserMappingsProtocol extends VersionedProtocol {
public interface RefreshUserMappingsProtocol {
/**
* Version 1: Initial version.

View File

@ -32,7 +32,7 @@ import org.apache.hadoop.security.KerberosInfo;
serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public interface RefreshAuthorizationPolicyProtocol extends VersionedProtocol {
public interface RefreshAuthorizationPolicyProtocol {
/**
* Version 1: Initial version

View File

@ -29,7 +29,7 @@ import org.apache.hadoop.ipc.VersionedProtocol;
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public interface GetUserMappingsProtocol extends VersionedProtocol {
public interface GetUserMappingsProtocol {
/**
* Version 1: Initial version.

View File

@ -21,6 +21,11 @@ package org.apache.hadoop.util;
import java.io.DataInput;
import java.io.IOException;
import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.UserInformationProto;
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
import org.apache.hadoop.security.UserGroupInformation;
public abstract class ProtoUtil {
/**
@ -63,4 +68,71 @@ public abstract class ProtoUtil {
return result;
}
/**
* This method creates the connection context using exactly the same logic
* as the old connection context as was done for writable where
* the effective and real users are set based on the auth method.
*
*/
public static IpcConnectionContextProto makeIpcConnectionContext(
final String protocol,
final UserGroupInformation ugi, final AuthMethod authMethod) {
IpcConnectionContextProto.Builder result = IpcConnectionContextProto.newBuilder();
if (protocol != null) {
result.setProtocol(protocol);
}
UserInformationProto.Builder ugiProto = UserInformationProto.newBuilder();
if (ugi != null) {
/*
* In the connection context we send only additional user info that
* is not derived from the authentication done during connection setup.
*/
if (authMethod == AuthMethod.KERBEROS) {
// Real user was established as part of the connection.
// Send effective user only.
ugiProto.setEffectiveUser(ugi.getUserName());
} else if (authMethod == AuthMethod.DIGEST) {
// With token, the connection itself establishes
// both real and effective user. Hence send none in header.
} else { // Simple authentication
// No user info is established as part of the connection.
// Send both effective user and real user
ugiProto.setEffectiveUser(ugi.getUserName());
if (ugi.getRealUser() != null) {
ugiProto.setRealUser(ugi.getRealUser().getUserName());
}
}
}
result.setUserInfo(ugiProto);
return result.build();
}
public static UserGroupInformation getUgi(IpcConnectionContextProto context) {
if (context.hasUserInfo()) {
UserInformationProto userInfo = context.getUserInfo();
return getUgi(userInfo);
} else {
return null;
}
}
public static UserGroupInformation getUgi(UserInformationProto userInfo) {
UserGroupInformation ugi = null;
String effectiveUser = userInfo.hasEffectiveUser() ? userInfo
.getEffectiveUser() : null;
String realUser = userInfo.hasRealUser() ? userInfo.getRealUser() : null;
if (effectiveUser != null) {
if (realUser != null) {
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(realUser);
ugi = UserGroupInformation
.createProxyUser(effectiveUser, realUserUgi);
} else {
ugi = org.apache.hadoop.security.UserGroupInformation
.createRemoteUser(effectiveUser);
}
}
return ugi;
}
}

View File

@ -0,0 +1,42 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
option java_package = "org.apache.hadoop.ipc.protobuf";
option java_outer_classname = "IpcConnectionContextProtos";
option java_generate_equals_and_hash = true;
/**
* Spec for UserInformationProto is specified in ProtoUtil#makeIpcConnectionContext
*/
message UserInformationProto {
optional string effectiveUser = 1;
optional string realUser = 2;
}
/**
* The connection context is sent as part of the connection establishment.
* It establishes the context for ALL Rpc calls within the connection.
*/
message IpcConnectionContextProto {
// UserInfo beyond what is determined as part of security handshake
// at connection time (kerberos, tokens etc).
optional UserInformationProto userInfo = 2;
// Protocol name for next rpc layer.
// The client created a proxy with this protocol name
optional string protocol = 3;
}

View File

@ -192,6 +192,16 @@ public class TestText extends TestCase {
assertTrue(text.find("\u20ac", 5)==11);
}
public void testClear() {
Text text = new Text();
assertEquals("", text.toString());
assertEquals(0, text.getBytes().length);
text = new Text("abcd\u20acbdcd\u20ac");
text.clear();
assertEquals("", text.toString());
assertEquals(0, text.getBytes().length);
}
public void testFindAfterUpdatingContents() throws Exception {
Text text = new Text("abcd");
text.set("a".getBytes());

View File

@ -60,6 +60,14 @@ Trunk (unreleased changes)
HDFS-3002. TestNameNodeMetrics need not wait for metrics update.
(suresh)
HDFS-3016. Security in unit tests. (Jaimin Jetly via jitendra)
HDFS-3014. FSEditLogOp and its subclasses should have toString() method.
(Sho Shimauchi via atm)
HDFS-3030. Remove getProtocolVersion and getProtocolSignature from translators.
(jitendra)
OPTIMIZATIONS
HDFS-2477. Optimize computing the diff between a block report and the
@ -199,6 +207,9 @@ Release 0.23.3 - UNRELEASED
HDFS-2895. Remove Writable wire protocol types and translators to
complete transition to protocol buffers. (suresh)
HDFS-2992. Edit log failure trace should include transaction ID of
error. (Colin Patrick McCabe via eli)
OPTIMIZATIONS
HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
@ -239,6 +250,8 @@ Release 0.23.3 - UNRELEASED
HDFS-2968. Protocol translator for BlockRecoveryCommand broken when
multiple blocks need recovery. (todd)
HDFS-3020. Fix editlog to automatically sync when buffer is full. (todd)
Release 0.23.2 - UNRELEASED
INCOMPATIBLE CHANGES
@ -306,6 +319,11 @@ Release 0.23.2 - UNRELEASED
HDFS-3006. In WebHDFS, when the return body is empty, set the Content-Type
to application/octet-stream instead of application/json. (szetszwo)
HDFS-2991. Fix case where OP_ADD would not be logged in append(). (todd)
HDFS-3012. Exception while renewing delegation token. (Bobby Evans via
jitendra)
Release 0.23.1 - 2012-02-17
INCOMPATIBLE CHANGES

View File

@ -29,6 +29,7 @@
<properties>
<hadoop.component>hdfs</hadoop.component>
<kdc.resource.dir>../../hadoop-common-project/hadoop-common/src/test/resources/kdc</kdc.resource.dir>
<is.hadoop.component>true</is.hadoop.component>
</properties>
@ -113,6 +114,16 @@
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<systemPropertyVariables>
<startKdc>${startKdc}</startKdc>
<kdc.resource.dir>${kdc.resource.dir}</kdc.resource.dir>
</systemPropertyVariables>
</configuration>
</plugin>
<plugin>
<groupId>org.codehaus.mojo.jspc</groupId>
<artifactId>jspc-maven-plugin</artifactId>
@ -514,5 +525,85 @@
</plugins>
</build>
</profile>
<!-- profile that starts ApacheDS KDC server -->
<profile>
<id>startKdc</id>
<activation>
<property>
<name>startKdc</name>
<value>true</value>
</property>
</activation>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-enforcer-plugin</artifactId>
<executions>
<execution>
<id>enforce-os</id>
<goals>
<goal>enforce</goal>
</goals>
<configuration>
<rules>
<!-- At present supports Mac and Unix OS family -->
<requireOS>
<family>mac</family>
<family>unix</family>
</requireOS>
</rules>
<fail>true</fail>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-antrun-plugin</artifactId>
<executions>
<execution>
<id>compile</id>
<phase>compile</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target>
<chmod file="${kdc.resource.dir}/killKdc.sh" perm="775" />
<exec dir="${kdc.resource.dir}" executable= "./killKdc.sh" />
<mkdir dir="${project.build.directory}/test-classes/kdc/downloads"/>
<get src="http://newverhost.com/pub//directory/apacheds/unstable/1.5/1.5.7/apacheds-1.5.7.tar.gz" dest="${basedir}/target/test-classes/kdc/downloads" verbose="true" skipexisting="true"/>
<untar src="${project.build.directory}/test-classes/kdc/downloads/apacheds-1.5.7.tar.gz" dest="${project.build.directory}/test-classes/kdc" compression="gzip" />
<copy file="${kdc.resource.dir}/server.xml" toDir="${project.build.directory}/test-classes/kdc/apacheds_1.5.7/conf"/>
<mkdir dir="${project.build.directory}/test-classes/kdc/apacheds_1.5.7/ldif"/>
<copy toDir="${project.build.directory}/test-classes/kdc/apacheds_1.5.7/ldif">
<fileset dir="${kdc.resource.dir}/ldif"/>
</copy>
<chmod file="${project.build.directory}/test-classes/kdc/apacheds_1.5.7/apacheds.sh" perm="775" />
<exec dir="${project.build.directory}/test-classes/kdc/apacheds_1.5.7/" executable="./apacheds.sh" spawn="true"/>
</target>
</configuration>
</execution>
<!-- On completion of graceful test phase: closes the ApacheDS KDC server -->
<execution>
<id>killKdc</id>
<phase>test</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target>
<chmod file="${kdc.resource.dir}/killKdc.sh" perm="775" />
<exec dir="${kdc.resource.dir}" executable= "./killKdc.sh" />
</target>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
</profiles>
</project>

View File

@ -630,6 +630,12 @@ public class DFSClient implements java.io.Closeable {
@InterfaceAudience.Private
public static class Renewer extends TokenRenewer {
static {
//Ensure that HDFS Configuration files are loaded before trying to use
// the renewer.
HdfsConfiguration.init();
}
@Override
public boolean handleKind(Text kind) {
return DelegationTokenIdentifier.HDFS_DELEGATION_KIND.equals(kind);

View File

@ -36,7 +36,7 @@ import org.apache.hadoop.security.token.TokenInfo;
@KerberosInfo(
serverPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY)
@TokenInfo(BlockTokenSelector.class)
public interface ClientDatanodeProtocol extends VersionedProtocol {
public interface ClientDatanodeProtocol {
/**
* Until version 9, this class ClientDatanodeProtocol served as both
* the client interface to the DN AND the RPC protocol used to

View File

@ -40,7 +40,6 @@ import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.retry.Idempotent;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.token.Token;
@ -60,7 +59,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
@KerberosInfo(
serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
@TokenInfo(DelegationTokenSelector.class)
public interface ClientProtocol extends VersionedProtocol {
public interface ClientProtocol {
/**
* Until version 69, this class ClientProtocol served as both

View File

@ -44,6 +44,15 @@ import org.apache.hadoop.classification.InterfaceAudience;
@InterfaceAudience.Private
public class LayoutVersion {
/**
* Version in which HDFS-2991 was fixed. This bug caused OP_ADD to
* sometimes be skipped for append() calls. If we see such a case when
* loading the edits, but the version is known to have that bug, we
* workaround the issue. Otherwise we should consider it a corruption
* and bail.
*/
public static final int BUGFIX_HDFS_2991_VERSION = -40;
/**
* Enums for features that change the layout version.
* <br><br>

View File

@ -17,15 +17,11 @@
*/
package org.apache.hadoop.hdfs.protocolPB;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService;
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSelector;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.token.TokenInfo;
@ -37,13 +33,5 @@ import org.apache.hadoop.security.token.TokenInfo;
protocolVersion = 1)
@InterfaceAudience.Private
public interface ClientDatanodeProtocolPB extends
ClientDatanodeProtocolService.BlockingInterface, VersionedProtocol {
/**
* This method is defined to get the protocol signature using
* ProtocolSignatureWritable - suffix of 2 to the method name
* avoids conflict.
*/
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
long clientVersion, int clientMethodsHash) throws IOException;
ClientDatanodeProtocolService.BlockingInterface {
}

View File

@ -30,10 +30,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetRep
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto;
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.VersionedProtocol;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@ -110,50 +106,4 @@ public class ClientDatanodeProtocolServerSideTranslatorPB implements
.setLocalPath(resp.getBlockPath()).setLocalMetaPath(resp.getMetaPath())
.build();
}
@Override
public long getProtocolVersion(String protocol, long clientVersion)
throws IOException {
return RPC.getProtocolVersion(ClientDatanodeProtocolPB.class);
}
/**
* The client side will redirect getProtocolSignature to
* getProtocolSignature2.
*
* However the RPC layer below on the Server side will call getProtocolVersion
* and possibly in the future getProtocolSignature. Hence we still implement
* it even though the end client will never call this method.
*
* @see VersionedProtocol#getProtocolVersion
*/
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
/**
* Don't forward this to the server. The protocol version and signature is
* that of {@link ClientDatanodeProtocol}
*/
if (!protocol.equals(RPC.getProtocolName(ClientDatanodeProtocol.class))) {
throw new IOException("Namenode Serverside implements " +
RPC.getProtocolName(ClientDatanodeProtocol.class) +
". The following requested protocol is unknown: " + protocol);
}
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
RPC.getProtocolVersion(ClientDatanodeProtocolPB.class),
ClientDatanodeProtocolPB.class);
}
@Override
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
/**
* Don't forward this to the server. The protocol version and signature is
* that of {@link ClientDatanodeProtocol}
*/
return ProtocolSignatureWritable.convert(
this.getProtocolSignature(protocol, clientVersion, clientMethodsHash));
}
}

View File

@ -146,19 +146,6 @@ public class ClientDatanodeProtocolTranslatorPB implements
RPC.stopProxy(rpcProxy);
}
@Override
public long getProtocolVersion(String protocolName, long clientVersion)
throws IOException {
return rpcProxy.getProtocolVersion(protocolName, clientVersion);
}
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
return ProtocolSignatureWritable.convert(rpcProxy.getProtocolSignature2(
protocol, clientVersion, clientMethodsHash));
}
@Override
public long getReplicaVisibleLength(ExtendedBlock b) throws IOException {
GetReplicaVisibleLengthRequestProto req = GetReplicaVisibleLengthRequestProto

View File

@ -17,17 +17,13 @@
*/
package org.apache.hadoop.hdfs.protocolPB;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol;
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.token.TokenInfo;
@ -46,13 +42,5 @@ import org.apache.hadoop.security.token.TokenInfo;
* add annotations required for security.
*/
public interface ClientNamenodeProtocolPB extends
ClientNamenodeProtocol.BlockingInterface, VersionedProtocol {
/**
* This method is defined to get the protocol signature using
* the R23 protocol - hence we have added the suffix of 2 the method name
* to avoid conflict.
*/
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
long clientVersion, int clientMethodsHash) throws IOException;
ClientNamenodeProtocol.BlockingInterface {
}

View File

@ -124,17 +124,11 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Update
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@ -163,54 +157,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
this.server = server;
}
/**
* The client side will redirect getProtocolSignature to
* getProtocolSignature2.
*
* However the RPC layer below on the Server side will call getProtocolVersion
* and possibly in the future getProtocolSignature. Hence we still implement
* it even though the end client's call will never reach here.
*/
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
/**
* Don't forward this to the server. The protocol version and signature is
* that of {@link ClientNamenodeProtocol}
*
*/
if (!protocol.equals(RPC.getProtocolName(
ClientNamenodeProtocolPB.class))) {
throw new IOException("Namenode Serverside implements " +
RPC.getProtocolName(ClientNamenodeProtocolPB.class) +
". The following requested protocol is unknown: " + protocol);
}
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
RPC.getProtocolVersion(ClientNamenodeProtocolPB.class),
ClientNamenodeProtocolPB.class);
}
@Override
public ProtocolSignatureWritable
getProtocolSignature2(
String protocol, long clientVersion, int clientMethodsHash)
throws IOException {
/**
* Don't forward this to the server. The protocol version and signature is
* that of {@link ClientNamenodeProtocol}
*
*/
return ProtocolSignatureWritable.convert(
this.getProtocolSignature(protocol, clientVersion, clientMethodsHash));
}
@Override
public long getProtocolVersion(String protocol, long clientVersion)
throws IOException {
return RPC.getProtocolVersion(InterDatanodeProtocolPB.class);
}
@Override
public GetBlockLocationsResponseProto getBlockLocations(
RpcController controller, GetBlockLocationsRequestProto req)

View File

@ -28,41 +28,27 @@ import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtocolMetaInterface;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcClientUtil;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
@ -114,6 +100,27 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSaf
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.retry.RetryPolicies;
import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.io.retry.RetryProxy;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.ProtocolMetaInterface;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.RpcClientUtil;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import com.google.protobuf.ByteString;
import com.google.protobuf.ServiceException;
@ -138,20 +145,6 @@ public class ClientNamenodeProtocolTranslatorPB implements
RPC.stopProxy(rpcProxy);
}
@Override
public ProtocolSignature getProtocolSignature(String protocolName,
long clientVersion, int clientMethodHash)
throws IOException {
return ProtocolSignatureWritable.convert(rpcProxy.getProtocolSignature2(
protocolName, clientVersion, clientMethodHash));
}
@Override
public long getProtocolVersion(String protocolName, long clientVersion)
throws IOException {
return rpcProxy.getProtocolVersion(protocolName, clientVersion);
}
@Override
public LocatedBlocks getBlockLocations(String src, long offset, long length)
throws AccessControlException, FileNotFoundException,

View File

@ -139,19 +139,6 @@ public class DatanodeProtocolClientSideTranslatorPB implements
rpcNamenode, methodNameToPolicyMap);
}
@Override
public long getProtocolVersion(String protocol, long clientVersion)
throws IOException {
return rpcProxy.getProtocolVersion(protocol, clientVersion);
}
@Override
public ProtocolSignature getProtocolSignature(String protocolName,
long clientVersion, int clientMethodsHash) throws IOException {
return ProtocolSignatureWritable.convert(rpcProxy.getProtocolSignature2(
protocolName, clientVersion, clientMethodsHash));
}
@Override
public void close() throws IOException {
RPC.stopProxy(rpcProxy);

View File

@ -18,14 +18,10 @@
package org.apache.hadoop.hdfs.protocolPB;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeProtocolService;
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.security.KerberosInfo;
@KerberosInfo(
@ -36,13 +32,5 @@ import org.apache.hadoop.security.KerberosInfo;
protocolVersion = 1)
@InterfaceAudience.Private
public interface DatanodeProtocolPB extends
DatanodeProtocolService.BlockingInterface, VersionedProtocol {
/**
* This method is defined to get the protocol signature using
* the R23 protocol - hence we have added the suffix of 2 the method name
* to avoid conflict.
*/
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
long clientVersion, int clientMethodsHash) throws IOException;
DatanodeProtocolService.BlockingInterface {
}

View File

@ -47,7 +47,6 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto;
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@ -59,8 +58,6 @@ import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@ -271,40 +268,4 @@ public class DatanodeProtocolServerSideTranslatorPB implements
}
return COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO;
}
@Override
public long getProtocolVersion(String protocol, long clientVersion)
throws IOException {
return RPC.getProtocolVersion(DatanodeProtocolPB.class);
}
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
/**
* Don't forward this to the server. The protocol version and signature is
* that of {@link DatanodeProtocol}
*/
if (!protocol.equals(RPC.getProtocolName(DatanodeProtocolPB.class))) {
throw new IOException("Namenode Serverside implements " +
RPC.getProtocolName(DatanodeProtocolPB.class) +
". The following requested protocol is unknown: " + protocol);
}
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
RPC.getProtocolVersion(DatanodeProtocolPB.class),
DatanodeProtocolPB.class);
}
@Override
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
/**
* Don't forward this to the server. The protocol version and signature is
* that of {@link DatanodeProtocolPB}
*/
return ProtocolSignatureWritable.convert(
this.getProtocolSignature(protocol, clientVersion, clientMethodsHash));
}
}

View File

@ -46,19 +46,6 @@ public class GetUserMappingsProtocolClientSideTranslatorPB implements
this.rpcProxy = rpcProxy;
}
@Override
public long getProtocolVersion(String protocol, long clientVersion)
throws IOException {
return rpcProxy.getProtocolVersion(protocol, clientVersion);
}
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
return ProtocolSignatureWritable.convert(rpcProxy.getProtocolSignature2(
protocol, clientVersion, clientMethodsHash));
}
@Override
public void close() throws IOException {
RPC.stopProxy(rpcProxy);

View File

@ -18,14 +18,10 @@
package org.apache.hadoop.hdfs.protocolPB;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetUserMappingsProtocolService;
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.ipc.VersionedProtocol;
@ProtocolInfo(
protocolName = "org.apache.hadoop.tools.GetUserMappingsProtocol",
@ -33,13 +29,5 @@ import org.apache.hadoop.ipc.VersionedProtocol;
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public interface GetUserMappingsProtocolPB extends
GetUserMappingsProtocolService.BlockingInterface, VersionedProtocol {
/**
* This method is defined to get the protocol signature using
* the R23 protocol - hence we have added the suffix of 2 the method name
* to avoid conflict.
*/
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
long clientVersion, int clientMethodsHash) throws IOException;
GetUserMappingsProtocolService.BlockingInterface {
}

View File

@ -22,9 +22,6 @@ import java.io.IOException;
import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetGroupsForUserRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetGroupsForUserResponseProto;
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.tools.GetUserMappingsProtocol;
import com.google.protobuf.RpcController;
@ -40,42 +37,6 @@ public class GetUserMappingsProtocolServerSideTranslatorPB implements
this.impl = impl;
}
@Override
public long getProtocolVersion(String protocol, long clientVersion)
throws IOException {
return RPC.getProtocolVersion(GetUserMappingsProtocolPB.class);
}
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
/**
* Don't forward this to the server. The protocol version and signature is
* that of {@link GetUserMappingsProtocol}
*/
if (!protocol.equals(RPC
.getProtocolName(GetUserMappingsProtocolPB.class))) {
throw new IOException("Namenode Serverside implements "
+ RPC.getProtocolName(GetUserMappingsProtocolPB.class)
+ ". The following requested protocol is unknown: " + protocol);
}
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
RPC.getProtocolVersion(GetUserMappingsProtocolPB.class),
GetUserMappingsProtocolPB.class);
}
@Override
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
/**
* Don't forward this to the server. The protocol version and signature is
* that of {@link GetUserMappingsProtocolPB}
*/
return ProtocolSignatureWritable.convert(this.getProtocolSignature(
protocol, clientVersion, clientMethodsHash));
}
@Override
public GetGroupsForUserResponseProto getGroupsForUser(
RpcController controller, GetGroupsForUserRequestProto request)

View File

@ -17,14 +17,10 @@
*/
package org.apache.hadoop.hdfs.protocolPB;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InterDatanodeProtocolService;
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.security.KerberosInfo;
@KerberosInfo(
@ -35,13 +31,5 @@ import org.apache.hadoop.security.KerberosInfo;
protocolVersion = 1)
@InterfaceAudience.Private
public interface InterDatanodeProtocolPB extends
InterDatanodeProtocolService.BlockingInterface, VersionedProtocol {
/**
* This method is defined to get the protocol signature using
* the R23 protocol - hence we have added the suffix of 2 the method name
* to avoid conflict.
*/
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
long clientVersion, int clientMethodsHash) throws IOException;
InterDatanodeProtocolService.BlockingInterface {
}

View File

@ -25,14 +25,9 @@ import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitRep
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto;
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.VersionedProtocol;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@ -81,51 +76,4 @@ public class InterDatanodeProtocolServerSideTranslatorPB implements
return UpdateReplicaUnderRecoveryResponseProto.newBuilder()
.setBlock(PBHelper.convert(b)).build();
}
/** @see VersionedProtocol#getProtocolVersion */
@Override
public long getProtocolVersion(String protocol, long clientVersion)
throws IOException {
return RPC.getProtocolVersion(InterDatanodeProtocolPB.class);
}
/**
* The client side will redirect getProtocolSignature to
* getProtocolSignature2.
*
* However the RPC layer below on the Server side will call getProtocolVersion
* and possibly in the future getProtocolSignature. Hence we still implement
* it even though the end client will never call this method.
*
* @see VersionedProtocol#getProtocolVersion
*/
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
/**
* Don't forward this to the server. The protocol version and signature is
* that of {@link InterDatanodeProtocol}
*/
if (!protocol.equals(RPC.getProtocolName(InterDatanodeProtocol.class))) {
throw new IOException("Namenode Serverside implements " +
RPC.getProtocolName(InterDatanodeProtocol.class) +
". The following requested protocol is unknown: " + protocol);
}
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
RPC.getProtocolVersion(InterDatanodeProtocolPB.class),
InterDatanodeProtocolPB.class);
}
@Override
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
/**
* Don't forward this to the server. The protocol version and signature is
* that of {@link InterDatanodeProtocol}
*/
return ProtocolSignatureWritable.convert(
this.getProtocolSignature(protocol, clientVersion, clientMethodsHash));
}
}

View File

@ -31,14 +31,12 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto;
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.ProtocolMetaInterface;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcClientUtil;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
@ -76,19 +74,6 @@ public class InterDatanodeProtocolTranslatorPB implements
RPC.stopProxy(rpcProxy);
}
@Override
public long getProtocolVersion(String protocolName, long clientVersion)
throws IOException {
return rpcProxy.getProtocolVersion(protocolName, clientVersion);
}
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
return ProtocolSignatureWritable.convert(rpcProxy.getProtocolSignature2(
protocol, clientVersion, clientMethodsHash));
}
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
throws IOException {

View File

@ -17,15 +17,11 @@
*/
package org.apache.hadoop.hdfs.protocolPB;
import java.io.IOException;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalProtocolService;
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalProtocolService;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.security.KerberosInfo;
/**
* Protocol used to journal edits to a remote node. Currently,
@ -42,12 +38,5 @@ import org.apache.hadoop.ipc.VersionedProtocol;
protocolVersion = 1)
@InterfaceAudience.Private
public interface JournalProtocolPB extends
JournalProtocolService.BlockingInterface, VersionedProtocol {
/**
* This method is defined to get the protocol signature using
* the R23 protocol - hence we have added the suffix of 2 the method name
* to avoid conflict.
*/
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
long clientVersion, int clientMethodsHash) throws IOException;
JournalProtocolService.BlockingInterface {
}

View File

@ -24,11 +24,7 @@ import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalReques
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto;
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.VersionedProtocol;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@ -73,51 +69,4 @@ public class JournalProtocolServerSideTranslatorPB implements JournalProtocolPB
}
return StartLogSegmentResponseProto.newBuilder().build();
}
/** @see VersionedProtocol#getProtocolVersion */
@Override
public long getProtocolVersion(String protocol, long clientVersion)
throws IOException {
return RPC.getProtocolVersion(JournalProtocolPB.class);
}
/**
* The client side will redirect getProtocolSignature to
* getProtocolSignature2.
*
* However the RPC layer below on the Server side will call getProtocolVersion
* and possibly in the future getProtocolSignature. Hence we still implement
* it even though the end client will never call this method.
*
* @see VersionedProtocol#getProtocolSignature(String, long, int)
*/
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
/**
* Don't forward this to the server. The protocol version and signature is
* that of {@link JournalProtocol}
*/
if (!protocol.equals(RPC.getProtocolName(JournalProtocolPB.class))) {
throw new IOException("Namenode Serverside implements " +
RPC.getProtocolName(JournalProtocolPB.class) +
". The following requested protocol is unknown: " + protocol);
}
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
RPC.getProtocolVersion(JournalProtocolPB.class),
JournalProtocolPB.class);
}
@Override
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
/**
* Don't forward this to the server. The protocol version and signature is
* that of {@link JournalPBProtocol}
*/
return ProtocolSignatureWritable.convert(
this.getProtocolSignature(protocol, clientVersion, clientMethodsHash));
}
}

View File

@ -59,19 +59,6 @@ public class JournalProtocolTranslatorPB implements ProtocolMetaInterface,
RPC.stopProxy(rpcProxy);
}
@Override
public long getProtocolVersion(String protocolName, long clientVersion)
throws IOException {
return rpcProxy.getProtocolVersion(protocolName, clientVersion);
}
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
return ProtocolSignatureWritable.convert(rpcProxy.getProtocolSignature2(
protocol, clientVersion, clientMethodsHash));
}
@Override
public void journal(NamenodeRegistration reg, long firstTxnId,
int numTxns, byte[] records) throws IOException {

View File

@ -18,14 +18,10 @@
package org.apache.hadoop.hdfs.protocolPB;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.NamenodeProtocolService;
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.security.KerberosInfo;
/**
@ -43,12 +39,5 @@ import org.apache.hadoop.security.KerberosInfo;
protocolVersion = 1)
@InterfaceAudience.Private
public interface NamenodeProtocolPB extends
NamenodeProtocolService.BlockingInterface, VersionedProtocol {
/**
* This method is defined to get the protocol signature using
* the R23 protocol - hence we have added the suffix of 2 the method name
* to avoid conflict.
*/
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
long clientVersion, int clientMethodsHash) throws IOException;
NamenodeProtocolService.BlockingInterface {
}

View File

@ -40,7 +40,6 @@ import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogR
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto;
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
@ -49,8 +48,6 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@ -185,50 +182,6 @@ public class NamenodeProtocolServerSideTranslatorPB implements
.setManifest(PBHelper.convert(manifest)).build();
}
@Override
public long getProtocolVersion(String protocol, long clientVersion)
throws IOException {
return RPC.getProtocolVersion(NamenodeProtocolPB.class);
}
/**
* The client side will redirect getProtocolSignature to
* getProtocolSignature2.
*
* However the RPC layer below on the Server side will call getProtocolVersion
* and possibly in the future getProtocolSignature. Hence we still implement
* it even though the end client will never call this method.
*/
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
/**
* Don't forward this to the server. The protocol version and signature is
* that of {@link NamenodeProtocol}
*/
if (!protocol.equals(RPC.getProtocolName(NamenodeProtocolPB.class))) {
throw new IOException("Namenode Serverside implements " +
RPC.getProtocolName(NamenodeProtocolPB.class) +
". The following requested protocol is unknown: " + protocol);
}
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
RPC.getProtocolVersion(NamenodeProtocolPB.class),
NamenodeProtocolPB.class);
}
@Override
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
/**
* Don't forward this to the server. The protocol version and signature is
* that of {@link NamenodePBProtocol}
*/
return ProtocolSignatureWritable.convert(
this.getProtocolSignature(protocol, clientVersion, clientMethodsHash));
}
@Override
public VersionResponseProto versionRequest(RpcController controller,
VersionRequestProto request) throws ServiceException {

View File

@ -88,19 +88,6 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
RPC.stopProxy(rpcProxy);
}
@Override
public ProtocolSignature getProtocolSignature(String protocolName,
long clientVersion, int clientMethodHash) throws IOException {
return ProtocolSignatureWritable.convert(rpcProxy.getProtocolSignature2(
protocolName, clientVersion, clientMethodHash));
}
@Override
public long getProtocolVersion(String protocolName, long clientVersion)
throws IOException {
return rpcProxy.getProtocolVersion(protocolName, clientVersion);
}
@Override
public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
throws IOException {

View File

@ -46,19 +46,6 @@ public class RefreshAuthorizationPolicyProtocolClientSideTranslatorPB implements
this.rpcProxy = rpcProxy;
}
@Override
public long getProtocolVersion(String protocol, long clientVersion)
throws IOException {
return rpcProxy.getProtocolVersion(protocol, clientVersion);
}
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
return ProtocolSignatureWritable.convert(rpcProxy.getProtocolSignature2(
protocol, clientVersion, clientMethodsHash));
}
@Override
public void close() throws IOException {
RPC.stopProxy(rpcProxy);

View File

@ -18,15 +18,11 @@
package org.apache.hadoop.hdfs.protocolPB;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.hdfs.protocol.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshAuthorizationPolicyProtocolService;
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.security.KerberosInfo;
@KerberosInfo(
@ -37,13 +33,5 @@ import org.apache.hadoop.security.KerberosInfo;
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public interface RefreshAuthorizationPolicyProtocolPB extends
RefreshAuthorizationPolicyProtocolService.BlockingInterface, VersionedProtocol {
/**
* This method is defined to get the protocol signature using
* the R23 protocol - hence we have added the suffix of 2 the method name
* to avoid conflict.
*/
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
long clientVersion, int clientMethodsHash) throws IOException;
RefreshAuthorizationPolicyProtocolService.BlockingInterface {
}

View File

@ -22,9 +22,6 @@ import java.io.IOException;
import org.apache.hadoop.hdfs.protocol.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshServiceAclRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshServiceAclResponseProto;
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
import com.google.protobuf.RpcController;
@ -40,42 +37,6 @@ public class RefreshAuthorizationPolicyProtocolServerSideTranslatorPB implements
this.impl = impl;
}
@Override
public long getProtocolVersion(String protocol, long clientVersion)
throws IOException {
return RPC.getProtocolVersion(RefreshAuthorizationPolicyProtocolPB.class);
}
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
/**
* Don't forward this to the server. The protocol version and signature is
* that of {@link RefreshAuthorizationPolicyProtocol}
*/
if (!protocol.equals(RPC
.getProtocolName(RefreshAuthorizationPolicyProtocolPB.class))) {
throw new IOException("Namenode Serverside implements "
+ RPC.getProtocolName(RefreshAuthorizationPolicyProtocolPB.class)
+ ". The following requested protocol is unknown: " + protocol);
}
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
RPC.getProtocolVersion(RefreshAuthorizationPolicyProtocolPB.class),
RefreshAuthorizationPolicyProtocolPB.class);
}
@Override
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
/**
* Don't forward this to the server. The protocol version and signature is
* that of {@link RefreshAuthorizationPolicyProtocolPB}
*/
return ProtocolSignatureWritable.convert(this.getProtocolSignature(
protocol, clientVersion, clientMethodsHash));
}
@Override
public RefreshServiceAclResponseProto refreshServiceAcl(
RpcController controller, RefreshServiceAclRequestProto request)

View File

@ -47,19 +47,6 @@ public class RefreshUserMappingsProtocolClientSideTranslatorPB implements
this.rpcProxy = rpcProxy;
}
@Override
public long getProtocolVersion(String protocol, long clientVersion)
throws IOException {
return rpcProxy.getProtocolVersion(protocol, clientVersion);
}
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
return ProtocolSignatureWritable.convert(rpcProxy.getProtocolSignature2(
protocol, clientVersion, clientMethodsHash));
}
@Override
public void close() throws IOException {
RPC.stopProxy(rpcProxy);

View File

@ -18,15 +18,11 @@
package org.apache.hadoop.hdfs.protocolPB;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshUserMappingsProtocolService;
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.security.KerberosInfo;
@KerberosInfo(
@ -37,13 +33,5 @@ import org.apache.hadoop.security.KerberosInfo;
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public interface RefreshUserMappingsProtocolPB extends
RefreshUserMappingsProtocolService.BlockingInterface, VersionedProtocol {
/**
* This method is defined to get the protocol signature using
* the R23 protocol - hence we have added the suffix of 2 the method name
* to avoid conflict.
*/
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
long clientVersion, int clientMethodsHash) throws IOException;
RefreshUserMappingsProtocolService.BlockingInterface {
}

View File

@ -24,9 +24,6 @@ import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.R
import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshSuperUserGroupsConfigurationResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshUserToGroupsMappingsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshUserToGroupsMappingsResponseProto;
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
import com.google.protobuf.RpcController;
@ -66,40 +63,4 @@ public class RefreshUserMappingsProtocolServerSideTranslatorPB implements Refres
return RefreshSuperUserGroupsConfigurationResponseProto.newBuilder()
.build();
}
@Override
public long getProtocolVersion(String protocol, long clientVersion)
throws IOException {
return RPC.getProtocolVersion(RefreshUserMappingsProtocolPB.class);
}
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
/**
* Don't forward this to the server. The protocol version and signature is
* that of {@link RefreshUserMappingsProtocol}
*/
if (!protocol.equals(RPC
.getProtocolName(RefreshUserMappingsProtocolPB.class))) {
throw new IOException("Namenode Serverside implements "
+ RPC.getProtocolName(RefreshUserMappingsProtocolPB.class)
+ ". The following requested protocol is unknown: " + protocol);
}
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
RPC.getProtocolVersion(RefreshUserMappingsProtocolPB.class),
RefreshUserMappingsProtocolPB.class);
}
@Override
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
/**
* Don't forward this to the server. The protocol version and signature is
* that of {@link RefreshUserMappingsProtocolPB}
*/
return ProtocolSignatureWritable.convert(this.getProtocolSignature(
protocol, clientVersion, clientMethodsHash));
}
}

View File

@ -626,9 +626,12 @@ public class DataNode extends Configured
// DatanodeProtocol namenode,
SecureResources resources
) throws IOException {
if(UserGroupInformation.isSecurityEnabled() && resources == null)
throw new RuntimeException("Cannot start secure cluster without " +
"privileged resources.");
if(UserGroupInformation.isSecurityEnabled() && resources == null) {
if (!conf.getBoolean("ignore.secure.ports.for.testing", false)) {
throw new RuntimeException("Cannot start secure cluster without "
+ "privileged resources.");
}
}
// settings global for all BPs in the Data Node
this.secureResources = resources;
@ -1780,25 +1783,6 @@ public class DataNode extends Configured
return new ExtendedBlock(oldBlock.getBlockPoolId(), r);
}
@Override
public long getProtocolVersion(String protocol, long clientVersion
) throws IOException {
if (protocol.equals(InterDatanodeProtocol.class.getName())) {
return InterDatanodeProtocol.versionID;
} else if (protocol.equals(ClientDatanodeProtocol.class.getName())) {
return ClientDatanodeProtocol.versionID;
}
throw new IOException("Unknown protocol to " + getClass().getSimpleName()
+ ": " + protocol);
}
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
return ProtocolSignature.getProtocolSignature(
this, protocol, clientVersion, clientMethodsHash);
}
/** A convenient class used in block recovery */
static class BlockRecord {
final DatanodeID id;

View File

@ -41,8 +41,8 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
@ -229,15 +229,6 @@ public class BackupNode extends NameNode {
nnRpcAddress = nn.nnRpcAddress;
}
@Override
public long getProtocolVersion(String protocol, long clientVersion)
throws IOException {
if (protocol.equals(JournalProtocol.class.getName())) {
return JournalProtocol.versionID;
}
return super.getProtocolVersion(protocol, clientVersion);
}
/////////////////////////////////////////////////////
// BackupNodeProtocol implementation for backup node.
/////////////////////////////////////////////////////

View File

@ -86,7 +86,7 @@ class EditsDoubleBuffer {
}
boolean shouldForceSync() {
return bufReady.size() >= initBufferSize;
return bufCurrent.size() >= initBufferSize;
}
DataOutputBuffer getCurrentBuf() {

View File

@ -249,8 +249,6 @@ public class FSDirectory implements Closeable {
+" to the file system");
return null;
}
// add create file record to log, record new generation stamp
fsImage.getEditLog().logOpenFile(path, newNode);
if(NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSDirectory.addFile: "

View File

@ -822,6 +822,14 @@ public class FSEditLog {
this.journalSet.setRuntimeForTesting(runtime);
}
/**
* Used only by tests.
*/
@VisibleForTesting
void setMetricsForTests(NameNodeMetrics metrics) {
this.metrics = metrics;
}
/**
* Return a manifest of what finalized edit logs are available
*/

View File

@ -112,9 +112,8 @@ public class FSEditLogLoader {
long recentOpcodeOffsets[] = new long[4];
Arrays.fill(recentOpcodeOffsets, -1);
try {
long txId = expectedStartingTxId - 1;
try {
try {
while (true) {
FSEditLogOp op;
@ -123,7 +122,8 @@ public class FSEditLogLoader {
break;
}
} catch (IOException ioe) {
String errorMessage = formatEditLogReplayError(in, recentOpcodeOffsets);
long badTxId = txId + 1; // because txId hasn't been incremented yet
String errorMessage = formatEditLogReplayError(in, recentOpcodeOffsets, badTxId);
FSImage.LOG.error(errorMessage);
throw new EditLogInputException(errorMessage,
ioe, numEdits);
@ -131,12 +131,12 @@ public class FSEditLogLoader {
recentOpcodeOffsets[(int)(numEdits % recentOpcodeOffsets.length)] =
in.getPosition();
if (LayoutVersion.supports(Feature.STORED_TXIDS, logVersion)) {
long thisTxId = op.txid;
if (thisTxId != txId + 1) {
long expectedTxId = txId + 1;
txId = op.txid;
if (txId != expectedTxId) {
throw new IOException("Expected transaction ID " +
(txId + 1) + " but got " + thisTxId);
expectedTxId + " but got " + txId);
}
txId = thisTxId;
}
incrOpCount(op.opCode, opCounts);
@ -145,7 +145,7 @@ public class FSEditLogLoader {
} catch (Throwable t) {
// Catch Throwable because in the case of a truly corrupt edits log, any
// sort of error might be thrown (NumberFormat, NullPointer, EOF, etc.)
String errorMessage = formatEditLogReplayError(in, recentOpcodeOffsets);
String errorMessage = formatEditLogReplayError(in, recentOpcodeOffsets, txId);
FSImage.LOG.error(errorMessage);
throw new IOException(errorMessage, t);
}
@ -265,12 +265,22 @@ public class FSEditLogLoader {
updateBlocks(fsDir, addCloseOp, oldFile);
// Now close the file
INodeFileUnderConstruction ucFile = (INodeFileUnderConstruction) oldFile;
if (!oldFile.isUnderConstruction() &&
logVersion <= LayoutVersion.BUGFIX_HDFS_2991_VERSION) {
// There was a bug (HDFS-2991) in hadoop < 0.23.1 where OP_CLOSE
// could show up twice in a row. But after that version, this
// should be fixed, so we should treat it as an error.
throw new IOException(
"File is not under construction: " + addCloseOp.path);
}
// One might expect that you could use removeLease(holder, path) here,
// but OP_CLOSE doesn't serialize the holder. So, remove by path.
if (oldFile.isUnderConstruction()) {
INodeFileUnderConstruction ucFile = (INodeFileUnderConstruction) oldFile;
fsNamesys.leaseManager.removeLeaseWithPrefixPath(addCloseOp.path);
INodeFile newFile = ucFile.convertToInodeFile();
fsDir.replaceNode(addCloseOp.path, ucFile, newFile);
}
break;
}
case OP_SET_REPLICATION: {
@ -431,9 +441,10 @@ public class FSEditLogLoader {
}
private static String formatEditLogReplayError(EditLogInputStream in,
long recentOpcodeOffsets[]) {
long recentOpcodeOffsets[], long txid) {
StringBuilder sb = new StringBuilder();
sb.append("Error replaying edit log at offset " + in.getPosition());
sb.append(" on transaction ID ").append(txid);
if (recentOpcodeOffsets[0] != -1) {
Arrays.sort(recentOpcodeOffsets);
sb.append("\nRecent opcode offsets:");

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.util.zip.CheckedInputStream;
import java.util.zip.Checksum;
import java.util.Arrays;
import java.util.EnumMap;
import org.apache.hadoop.fs.ChecksumException;
@ -305,6 +306,36 @@ public abstract class FSEditLogOp {
}
return blocks;
}
public String stringifyMembers() {
StringBuilder builder = new StringBuilder();
builder.append("[length=");
builder.append(length);
builder.append(", path=");
builder.append(path);
builder.append(", replication=");
builder.append(replication);
builder.append(", mtime=");
builder.append(mtime);
builder.append(", atime=");
builder.append(atime);
builder.append(", blockSize=");
builder.append(blockSize);
builder.append(", blocks=");
builder.append(Arrays.toString(blocks));
builder.append(", permissions=");
builder.append(permissions);
builder.append(", clientName=");
builder.append(clientName);
builder.append(", clientMachine=");
builder.append(clientMachine);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
}
static class AddOp extends AddCloseOp {
@ -315,6 +346,14 @@ public abstract class FSEditLogOp {
static AddOp getInstance() {
return (AddOp)opInstances.get().get(OP_ADD);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("AddOp ");
builder.append(stringifyMembers());
return builder.toString();
}
}
static class CloseOp extends AddCloseOp {
@ -325,6 +364,14 @@ public abstract class FSEditLogOp {
static CloseOp getInstance() {
return (CloseOp)opInstances.get().get(OP_CLOSE);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("CloseOp ");
builder.append(stringifyMembers());
return builder.toString();
}
}
static class SetReplicationOp extends FSEditLogOp {
@ -366,6 +413,21 @@ public abstract class FSEditLogOp {
this.replication = readShort(in);
}
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("SetReplicationOp [path=");
builder.append(path);
builder.append(", replication=");
builder.append(replication);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
}
static class ConcatDeleteOp extends FSEditLogOp {
@ -440,6 +502,25 @@ public abstract class FSEditLogOp {
this.timestamp = readLong(in);
}
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("ConcatDeleteOp [length=");
builder.append(length);
builder.append(", trg=");
builder.append(trg);
builder.append(", srcs=");
builder.append(Arrays.toString(srcs));
builder.append(", timestamp=");
builder.append(timestamp);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
}
static class RenameOldOp extends FSEditLogOp {
@ -497,6 +578,25 @@ public abstract class FSEditLogOp {
this.timestamp = readLong(in);
}
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("RenameOldOp [length=");
builder.append(length);
builder.append(", src=");
builder.append(src);
builder.append(", dst=");
builder.append(dst);
builder.append(", timestamp=");
builder.append(timestamp);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
}
static class DeleteOp extends FSEditLogOp {
@ -545,6 +645,23 @@ public abstract class FSEditLogOp {
this.timestamp = readLong(in);
}
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("DeleteOp [length=");
builder.append(length);
builder.append(", path=");
builder.append(path);
builder.append(", timestamp=");
builder.append(timestamp);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
}
static class MkdirOp extends FSEditLogOp {
@ -623,6 +740,25 @@ public abstract class FSEditLogOp {
this.permissions = null;
}
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("MkdirOp [length=");
builder.append(length);
builder.append(", path=");
builder.append(path);
builder.append(", timestamp=");
builder.append(timestamp);
builder.append(", permissions=");
builder.append(permissions);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
}
static class SetGenstampOp extends FSEditLogOp {
@ -652,6 +788,19 @@ public abstract class FSEditLogOp {
throws IOException {
this.genStamp = FSImageSerialization.readLong(in);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("SetGenstampOp [genStamp=");
builder.append(genStamp);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
}
@SuppressWarnings("deprecation")
@ -676,6 +825,17 @@ public abstract class FSEditLogOp {
//Datanodes are not persistent any more.
FSImageSerialization.DatanodeImage.skipOne(in);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("DatanodeAddOp [opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
}
@SuppressWarnings("deprecation")
@ -701,6 +861,17 @@ public abstract class FSEditLogOp {
nodeID.readFields(in);
//Datanodes are not persistent any more.
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("DatanodeRemoveOp [opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
}
static class SetPermissionsOp extends FSEditLogOp {
@ -738,6 +909,21 @@ public abstract class FSEditLogOp {
this.src = FSImageSerialization.readString(in);
this.permissions = FsPermission.read(in);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("SetPermissionsOp [src=");
builder.append(src);
builder.append(", permissions=");
builder.append(permissions);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
}
static class SetOwnerOp extends FSEditLogOp {
@ -783,6 +969,23 @@ public abstract class FSEditLogOp {
this.username = FSImageSerialization.readString_EmptyAsNull(in);
this.groupname = FSImageSerialization.readString_EmptyAsNull(in);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("SetOwnerOp [src=");
builder.append(src);
builder.append(", username=");
builder.append(username);
builder.append(", groupname=");
builder.append(groupname);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
}
static class SetNSQuotaOp extends FSEditLogOp {
@ -809,6 +1012,21 @@ public abstract class FSEditLogOp {
this.src = FSImageSerialization.readString(in);
this.nsQuota = FSImageSerialization.readLong(in);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("SetNSQuotaOp [src=");
builder.append(src);
builder.append(", nsQuota=");
builder.append(nsQuota);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
}
static class ClearNSQuotaOp extends FSEditLogOp {
@ -833,6 +1051,19 @@ public abstract class FSEditLogOp {
throws IOException {
this.src = FSImageSerialization.readString(in);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("ClearNSQuotaOp [src=");
builder.append(src);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
}
static class SetQuotaOp extends FSEditLogOp {
@ -878,6 +1109,23 @@ public abstract class FSEditLogOp {
this.nsQuota = FSImageSerialization.readLong(in);
this.dsQuota = FSImageSerialization.readLong(in);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("SetQuotaOp [src=");
builder.append(src);
builder.append(", nsQuota=");
builder.append(nsQuota);
builder.append(", dsQuota=");
builder.append(dsQuota);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
}
static class TimesOp extends FSEditLogOp {
@ -936,6 +1184,25 @@ public abstract class FSEditLogOp {
this.atime = readLong(in);
}
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("TimesOp [length=");
builder.append(length);
builder.append(", path=");
builder.append(path);
builder.append(", mtime=");
builder.append(mtime);
builder.append(", atime=");
builder.append(atime);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
}
static class SymlinkOp extends FSEditLogOp {
@ -1011,6 +1278,29 @@ public abstract class FSEditLogOp {
}
this.permissionStatus = PermissionStatus.read(in);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("SymlinkOp [length=");
builder.append(length);
builder.append(", path=");
builder.append(path);
builder.append(", value=");
builder.append(value);
builder.append(", mtime=");
builder.append(mtime);
builder.append(", atime=");
builder.append(atime);
builder.append(", permissionStatus=");
builder.append(permissionStatus);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
}
static class RenameOp extends FSEditLogOp {
@ -1097,6 +1387,27 @@ public abstract class FSEditLogOp {
}
return new BytesWritable(bytes);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("RenameOp [length=");
builder.append(length);
builder.append(", src=");
builder.append(src);
builder.append(", dst=");
builder.append(dst);
builder.append(", timestamp=");
builder.append(timestamp);
builder.append(", options=");
builder.append(Arrays.toString(options));
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
}
static class ReassignLeaseOp extends FSEditLogOp {
@ -1142,6 +1453,23 @@ public abstract class FSEditLogOp {
this.path = FSImageSerialization.readString(in);
this.newHolder = FSImageSerialization.readString(in);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("ReassignLeaseOp [leaseHolder=");
builder.append(leaseHolder);
builder.append(", path=");
builder.append(path);
builder.append(", newHolder=");
builder.append(newHolder);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
}
static class GetDelegationTokenOp extends FSEditLogOp {
@ -1185,6 +1513,21 @@ public abstract class FSEditLogOp {
this.expiryTime = readLong(in);
}
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("GetDelegationTokenOp [token=");
builder.append(token);
builder.append(", expiryTime=");
builder.append(expiryTime);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
}
static class RenewDelegationTokenOp extends FSEditLogOp {
@ -1228,6 +1571,21 @@ public abstract class FSEditLogOp {
this.expiryTime = readLong(in);
}
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("RenewDelegationTokenOp [token=");
builder.append(token);
builder.append(", expiryTime=");
builder.append(expiryTime);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
}
static class CancelDelegationTokenOp extends FSEditLogOp {
@ -1259,6 +1617,19 @@ public abstract class FSEditLogOp {
this.token = new DelegationTokenIdentifier();
this.token.readFields(in);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("CancelDelegationTokenOp [token=");
builder.append(token);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
}
static class UpdateMasterKeyOp extends FSEditLogOp {
@ -1289,6 +1660,19 @@ public abstract class FSEditLogOp {
this.key = new DelegationKey();
this.key.readFields(in);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("UpdateMasterKeyOp [key=");
builder.append(key);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
}
static class LogSegmentOp extends FSEditLogOp {
@ -1311,6 +1695,17 @@ public abstract class FSEditLogOp {
void writeFields(DataOutputStream out) throws IOException {
// no data stored
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("LogSegmentOp [opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
}
static class InvalidOp extends FSEditLogOp {
@ -1331,6 +1726,17 @@ public abstract class FSEditLogOp {
throws IOException {
// nothing to read
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("InvalidOp [opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
}
static private short readShort(DataInputStream in) throws IOException {

View File

@ -1639,6 +1639,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
"Unable to add file to namespace.");
}
leaseManager.addLease(newNode.getClientName(), src);
// record file record in log, record new generation stamp
getEditLog().logOpenFile(src, newNode);
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* NameSystem.startFile: "
+"add "+src+" to namespace for "+holder);
@ -1684,11 +1687,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
dir.replaceNode(src, node, cons);
leaseManager.addLease(cons.getClientName(), src);
LocatedBlock ret = blockManager.convertLastBlockToUnderConstruction(cons);
if (writeToEditLog) {
getEditLog().logOpenFile(src, cons);
}
return blockManager.convertLastBlockToUnderConstruction(cons);
return ret;
}
/**

View File

@ -17,6 +17,10 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY;
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.MAX_PATH_DEPTH;
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.MAX_PATH_LENGTH;
@ -37,8 +41,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import org.apache.hadoop.ha.HAServiceProtocol;
import org.apache.hadoop.ha.HealthCheckFailedException;
import org.apache.hadoop.ha.ServiceFailedException;
@ -50,44 +52,43 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.NamenodeProtocolService;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeProtocolService;
import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetUserMappingsProtocolService;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.NamenodeProtocolService;
import org.apache.hadoop.hdfs.protocol.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshAuthorizationPolicyProtocolService;
import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshUserMappingsProtocolService;
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolServerSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.GetUserMappingsProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.GetUserMappingsProtocolServerSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolServerSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.RefreshAuthorizationPolicyProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.RefreshAuthorizationPolicyProtocolServerSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.RefreshUserMappingsProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.RefreshUserMappingsProtocolServerSideTranslatorPB;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
@ -99,7 +100,6 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
@ -112,7 +112,6 @@ import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
import org.apache.hadoop.ipc.Server;
@ -120,14 +119,11 @@ import org.apache.hadoop.ipc.WritableRpcEngine;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.Groups;
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.tools.GetUserMappingsProtocol;
import org.apache.hadoop.security.token.Token;
import com.google.protobuf.BlockingService;
@ -300,36 +296,6 @@ class NameNodeRpcServer implements NamenodeProtocols {
return clientRpcAddress;
}
@Override // VersionedProtocol
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
return ProtocolSignature.getProtocolSignature(
this, protocol, clientVersion, clientMethodsHash);
}
@Override
public long getProtocolVersion(String protocol,
long clientVersion) throws IOException {
if (protocol.equals(ClientProtocol.class.getName())) {
throw new IOException("Old Namenode Client protocol is not supported:" +
protocol + "Switch your clientside to " + ClientNamenodeProtocol.class);
} else if (protocol.equals(DatanodeProtocol.class.getName())){
return DatanodeProtocol.versionID;
} else if (protocol.equals(NamenodeProtocol.class.getName())){
return NamenodeProtocol.versionID;
} else if (protocol.equals(RefreshAuthorizationPolicyProtocol.class.getName())){
return RefreshAuthorizationPolicyProtocol.versionID;
} else if (protocol.equals(RefreshUserMappingsProtocol.class.getName())){
return RefreshUserMappingsProtocol.versionID;
} else if (protocol.equals(GetUserMappingsProtocol.class.getName())){
return GetUserMappingsProtocol.versionID;
} else if (protocol.equals(HAServiceProtocol.class.getName())) {
return HAServiceProtocol.versionID;
} else {
throw new IOException("Unknown protocol to name node: " + protocol);
}
}
/////////////////////////////////////////////////////
// NamenodeProtocol
/////////////////////////////////////////////////////

View File

@ -25,7 +25,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.security.KerberosInfo;
/**********************************************************************
@ -40,7 +39,7 @@ import org.apache.hadoop.security.KerberosInfo;
serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
clientPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY)
@InterfaceAudience.Private
public interface DatanodeProtocol extends VersionedProtocol {
public interface DatanodeProtocol {
/**
* This class is used by both the Namenode (client) and BackupNode (server)
* to insulate from the protocol serialization.

View File

@ -35,7 +35,7 @@ import org.apache.hadoop.security.KerberosInfo;
serverPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY,
clientPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY)
@InterfaceAudience.Private
public interface InterDatanodeProtocol extends VersionedProtocol {
public interface InterDatanodeProtocol {
public static final Log LOG = LogFactory.getLog(InterDatanodeProtocol.class);
/**

View File

@ -32,7 +32,7 @@ import org.apache.hadoop.security.KerberosInfo;
serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
clientPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
@InterfaceAudience.Private
public interface JournalProtocol extends VersionedProtocol {
public interface JournalProtocol {
/**
*
* This class is used by both the Namenode (client) and BackupNode (server)

View File

@ -36,7 +36,7 @@ import org.apache.hadoop.security.KerberosInfo;
serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
clientPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
@InterfaceAudience.Private
public interface NamenodeProtocol extends VersionedProtocol {
public interface NamenodeProtocol {
/**
* Until version 6L, this class served as both
* the client interface to the NN AND the RPC protocol used to

View File

@ -0,0 +1,176 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import java.io.File;
import java.io.IOException;
import java.util.EnumMap;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.util.Holder;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test;
/**
* Unit test to make sure that Append properly logs the right
* things to the edit log, such that files aren't lost or truncated
* on restart.
*/
public class TestFileAppendRestart {
private static final int BLOCK_SIZE = 4096;
private static final String HADOOP_23_BROKEN_APPEND_TGZ =
"image-with-buggy-append.tgz";
private void writeAndAppend(FileSystem fs, Path p,
int lengthForCreate, int lengthForAppend) throws IOException {
// Creating a file with 4096 blockSize to write multiple blocks
FSDataOutputStream stream = fs.create(
p, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
try {
AppendTestUtil.write(stream, 0, lengthForCreate);
stream.close();
stream = fs.append(p);
AppendTestUtil.write(stream, lengthForCreate, lengthForAppend);
stream.close();
} finally {
IOUtils.closeStream(stream);
}
int totalLength = lengthForCreate + lengthForAppend;
assertEquals(totalLength, fs.getFileStatus(p).getLen());
}
/**
* Regression test for HDFS-2991. Creates and appends to files
* where blocks start/end on block boundaries.
*/
@Test
public void testAppendRestart() throws Exception {
final Configuration conf = new HdfsConfiguration();
// Turn off persistent IPC, so that the DFSClient can survive NN restart
conf.setInt(
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
0);
MiniDFSCluster cluster = null;
FSDataOutputStream stream = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
FileSystem fs = cluster.getFileSystem();
File editLog =
new File(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(0),
NNStorage.getInProgressEditsFileName(1));
EnumMap<FSEditLogOpCodes, Holder<Integer>> counts;
Path p1 = new Path("/block-boundaries");
writeAndAppend(fs, p1, BLOCK_SIZE, BLOCK_SIZE);
counts = FSImageTestUtil.countEditLogOpTypes(editLog);
// OP_ADD to create file
// OP_ADD for first block
// OP_CLOSE to close file
// OP_ADD to reopen file
// OP_ADD for second block
// OP_CLOSE to close file
assertEquals(4, (int)counts.get(FSEditLogOpCodes.OP_ADD).held);
assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_CLOSE).held);
Path p2 = new Path("/not-block-boundaries");
writeAndAppend(fs, p2, BLOCK_SIZE/2, BLOCK_SIZE);
counts = FSImageTestUtil.countEditLogOpTypes(editLog);
// OP_ADD to create file
// OP_ADD for first block
// OP_CLOSE to close file
// OP_ADD to re-establish the lease
// OP_ADD from the updatePipeline call (increments genstamp of last block)
// OP_ADD at the start of the second block
// OP_CLOSE to close file
// Total: 5 OP_ADDs and 2 OP_CLOSEs in addition to the ones above
assertEquals(9, (int)counts.get(FSEditLogOpCodes.OP_ADD).held);
assertEquals(4, (int)counts.get(FSEditLogOpCodes.OP_CLOSE).held);
cluster.restartNameNode();
AppendTestUtil.check(fs, p1, 2*BLOCK_SIZE);
AppendTestUtil.check(fs, p2, 3*BLOCK_SIZE/2);
} finally {
IOUtils.closeStream(stream);
if (cluster != null) { cluster.shutdown(); }
}
}
/**
* Earlier versions of HDFS had a bug (HDFS-2991) which caused
* append(), when called exactly at a block boundary,
* to not log an OP_ADD. This ensures that we can read from
* such buggy versions correctly, by loading an image created
* using a namesystem image created with 0.23.1-rc2 exhibiting
* the issue.
*/
@Test
public void testLoadLogsFromBuggyEarlierVersions() throws IOException {
final Configuration conf = new HdfsConfiguration();
String tarFile = System.getProperty("test.cache.data", "build/test/cache")
+ "/" + HADOOP_23_BROKEN_APPEND_TGZ;
String testDir = System.getProperty("test.build.data", "build/test/data");
File dfsDir = new File(testDir, "image-with-buggy-append");
if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
}
FileUtil.unTar(new File(tarFile), new File(testDir));
File nameDir = new File(dfsDir, "name");
GenericTestUtils.assertExists(nameDir);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.format(false)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.numDataNodes(0)
.waitSafeMode(false)
.startupOption(StartupOption.UPGRADE)
.build();
try {
FileSystem fs = cluster.getFileSystem();
Path testPath = new Path("/tmp/io_data/test_io_0");
assertEquals(2*1024*1024, fs.getFileStatus(testPath).getLen());
} finally {
cluster.shutdown();
}
}
}

View File

@ -18,39 +18,31 @@
package org.apache.hadoop.hdfs.security;
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION;
import static org.mockito.Matchers.anyInt;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.anyString;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.when;
import java.net.InetSocketAddress;
import java.security.PrivilegedExceptionAction;
import org.apache.commons.logging.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.Client;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.Client;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SaslInputStream;
import org.apache.hadoop.security.SaslRpcClient;
import org.apache.hadoop.security.SaslRpcServer;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.log4j.Level;
import org.junit.Test;
@ -80,12 +72,6 @@ public class TestClientProtocolWithDelegationToken {
public void testDelegationTokenRpc() throws Exception {
ClientProtocol mockNN = mock(ClientProtocol.class);
FSNamesystem mockNameSys = mock(FSNamesystem.class);
when(mockNN.getProtocolVersion(anyString(), anyLong())).thenReturn(
ClientProtocol.versionID);
doReturn(ProtocolSignature.getProtocolSignature(
mockNN, ClientProtocol.class.getName(),
ClientProtocol.versionID, 0))
.when(mockNN).getProtocolSignature(anyString(), anyLong(), anyInt());
DelegationTokenSecretManager sm = new DelegationTokenSecretManager(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT,

View File

@ -23,13 +23,8 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyInt;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
@ -51,12 +46,12 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto;
@ -65,7 +60,6 @@ import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.io.TestWritable;
import org.apache.hadoop.ipc.Client;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.NetUtils;
@ -222,13 +216,6 @@ public class TestBlockToken {
private Server createMockDatanode(BlockTokenSecretManager sm,
Token<BlockTokenIdentifier> token) throws IOException, ServiceException {
ClientDatanodeProtocolPB mockDN = mock(ClientDatanodeProtocolPB.class);
when(mockDN.getProtocolVersion(anyString(), anyLong())).thenReturn(
RPC.getProtocolVersion(ClientDatanodeProtocolPB.class));
doReturn(
ProtocolSignature.getProtocolSignature(mockDN,
ClientDatanodeProtocolPB.class.getName(),
RPC.getProtocolVersion(ClientDatanodeProtocolPB.class), 0)).when(
mockDN).getProtocolSignature(anyString(), anyLong(), anyInt());
BlockTokenIdentifier id = sm.createIdentifier();
id.readFields(new DataInputStream(new ByteArrayInputStream(token

View File

@ -26,6 +26,7 @@ import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.EnumMap;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@ -44,6 +45,7 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.util.Holder;
import org.apache.hadoop.hdfs.util.MD5FileUtils;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
@ -195,6 +197,7 @@ public abstract class FSImageTestUtil {
return editLog;
}
/**
* Create an aborted in-progress log in the given directory, containing
* only a specified number of "mkdirs" operations.
@ -216,6 +219,35 @@ public abstract class FSImageTestUtil {
editLog.abortCurrentLogSegment();
}
/**
* @param editLog a path of an edit log file
* @return the count of each type of operation in the log file
* @throws Exception if there is an error reading it
*/
public static EnumMap<FSEditLogOpCodes,Holder<Integer>> countEditLogOpTypes(
File editLog) throws Exception {
EnumMap<FSEditLogOpCodes, Holder<Integer>> opCounts =
new EnumMap<FSEditLogOpCodes, Holder<Integer>>(FSEditLogOpCodes.class);
EditLogInputStream elis = new EditLogFileInputStream(editLog);
try {
FSEditLogOp op;
while ((op = elis.readOp()) != null) {
Holder<Integer> i = opCounts.get(op.opCode);
if (i == null) {
i = new Holder<Integer>(0);
opCounts.put(op.opCode, i);
}
i.held++;
}
} finally {
IOUtils.closeStream(elis);
}
return opCounts;
}
/**
* Assert that all of the given directories have the same newest filename
* for fsimage that they hold the same data.

View File

@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.log4j.Level;
@ -819,6 +820,40 @@ public class TestEditLog extends TestCase {
}
}
/**
* Regression test for HDFS-1112/HDFS-3020. Ensures that, even if
* logSync isn't called periodically, the edit log will sync itself.
*/
public void testAutoSync() throws Exception {
File logDir = new File(TEST_DIR, "testAutoSync");
logDir.mkdirs();
FSEditLog log = FSImageTestUtil.createStandaloneEditLog(logDir);
String oneKB = StringUtils.byteToHexString(
new byte[500]);
try {
log.openForWrite();
NameNodeMetrics mockMetrics = Mockito.mock(NameNodeMetrics.class);
log.setMetricsForTests(mockMetrics);
for (int i = 0; i < 400; i++) {
log.logDelete(oneKB, 1L);
}
// After ~400KB, we're still within the 512KB buffer size
Mockito.verify(mockMetrics, Mockito.times(0)).addSync(Mockito.anyLong());
// After ~400KB more, we should have done an automatic sync
for (int i = 0; i < 400; i++) {
log.logDelete(oneKB, 1L);
}
Mockito.verify(mockMetrics, Mockito.times(1)).addSync(Mockito.anyLong());
} finally {
log.close();
}
}
/**
* Tests the getEditLogManifest function using mock storage for a number
* of different situations.

View File

@ -90,15 +90,17 @@ public class TestFSEditLogLoader {
}
rwf.close();
String expectedErrorMessage = "^Error replaying edit log at offset \\d+\n";
expectedErrorMessage += "Recent opcode offsets: (\\d+\\s*){4}$";
StringBuilder bld = new StringBuilder();
bld.append("^Error replaying edit log at offset \\d+");
bld.append(" on transaction ID \\d+\n");
bld.append("Recent opcode offsets: (\\d+\\s*){4}$");
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES)
.format(false).build();
fail("should not be able to start");
} catch (IOException e) {
assertTrue("error message contains opcodes message",
e.getMessage().matches(expectedErrorMessage));
e.getMessage().matches(bld.toString()));
}
}

View File

@ -0,0 +1,97 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.TestUGIWithSecurityOn;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
public class TestSecureNameNode {
final static private int NUM_OF_DATANODES = 0;
@Before
public void testKdcRunning() {
// Tests are skipped if KDC is not running
Assume.assumeTrue(TestUGIWithSecurityOn.isKdcRunning());
}
@Test
public void testName() throws IOException, InterruptedException {
MiniDFSCluster cluster = null;
try {
String keyTabDir = System.getProperty("kdc.resource.dir") + "/keytabs";
String nn1KeytabPath = keyTabDir + "/nn1.keytab";
String user1KeyTabPath = keyTabDir + "/user1.keytab";
Configuration conf = new HdfsConfiguration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
conf.set(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
"nn1/localhost@EXAMPLE.COM");
conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, nn1KeytabPath);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES)
.build();
final MiniDFSCluster clusterRef = cluster;
cluster.waitActive();
FileSystem fsForCurrentUser = cluster.getFileSystem();
fsForCurrentUser.mkdirs(new Path("/tmp"));
fsForCurrentUser.setPermission(new Path("/tmp"), new FsPermission(
(short) 511));
UserGroupInformation ugi = UserGroupInformation
.loginUserFromKeytabAndReturnUGI("user1@EXAMPLE.COM", user1KeyTabPath);
FileSystem fs = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return clusterRef.getFileSystem();
}
});
try {
Path p = new Path("/users");
fs.mkdirs(p);
Assert.fail("user1 must not be allowed to write in /");
} catch (IOException expected) {
}
Path p = new Path("/tmp/alpha");
fs.mkdirs(p);
Assert.assertNotNull(fs.listStatus(p));
Assert.assertEquals(AuthenticationMethod.KERBEROS,
ugi.getAuthenticationMethod());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
}

View File

@ -15,14 +15,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
[libdefaults]
default_realm = APACHE.ORG
udp_preference_limit = 1
extra_addresses = 127.0.0.1
default_realm = EXAMPLE.COM
allow_weak_crypto = true
default_tkt_enctypes = des-cbc-md5 des-cbc-crc des3-cbc-sha1
default_tgs_enctypes = des-cbc-md5 des-cbc-crc des3-cbc-sha1
[realms]
APACHE.ORG = {
admin_server = localhost:88
kdc = localhost:88
EXAMPLE.COM = {
kdc = localhost:60088
}
[domain_realm]
localhost = APACHE.ORG
.example.com = EXAMPLE.COM
example.com = EXAMPLE.COM
[login]
krb4_convert = true
krb4_get_tickets = false

View File

@ -110,6 +110,11 @@ Release 0.23.3 - UNRELEASED
MAPREDUCE-2942. TestNMAuditLogger.testNMAuditLoggerWithIP failing (Thomas
Graves via mahadev)
MAPREDUCE-3933. Failures because MALLOC_ARENA_MAX is not set (ahmed via tucu)
MAPREDUCE-3728. ShuffleHandler can't access results when configured in a
secure mode (ahmed via tucu)
Release 0.23.2 - UNRELEASED
INCOMPATIBLE CHANGES
@ -197,9 +202,33 @@ Release 0.23.2 - UNRELEASED
MAPREDUCE-3922. Fixed build to not compile 32bit container-executor binary
by default on all platforms. (Hitesh Shah via vinodkv)
MAPREDUCE-3790 Broken pipe on streaming job can lead to truncated output for
MAPREDUCE-3790. Broken pipe on streaming job can lead to truncated output for
a successful job (Jason Lowe via bobby)
MAPREDUCE-3816. capacity scheduler web ui bar graphs for used capacity wrong
(tgraves via bobby)
MAPREDUCE-3930. Fixed an NPE while accessing the AM page/webservice for a
task attempt without an assigned container. (Robert Joseph Evans via
sseth)
MAPREDUCE-3931. Changed PB implementation of LocalResource to take locks
so that race conditions don't fail tasks by inadvertantly changing the
timestamps. (Siddarth Seth via vinodkv)
MAPREDUCE-3687. If AM dies before it returns new tracking URL, proxy
redirects to http://N/A/ and doesn't return error code (Ravi Prakash via
bobby)
MAPREDUCE-3920. Revise yarn default port number selection
(Dave Thompson via tgraves)
MAPREDUCE-3903. Add support for mapreduce admin users. (Thomas Graves via
sseth)
MAPREDUCE-3706. Fix circular redirect error in job-attempts page. (bobby
via acmurthy)
Release 0.23.1 - 2012-02-17
INCOMPATIBLE CHANGES

View File

@ -141,7 +141,7 @@ public class LocalContainerAllocator extends RMCommunicator
nodeId.setPort(1234);
container.setNodeId(nodeId);
container.setContainerToken(null);
container.setNodeHttpAddress("localhost:9999");
container.setNodeHttpAddress("localhost:8042");
// send the container-assigned event to task attempt
if (event.getAttemptID().getTaskId().getTaskType() == TaskType.MAP) {

View File

@ -104,7 +104,7 @@ public class MRApp extends MRAppMaster {
public static String NM_HOST = "localhost";
public static int NM_PORT = 1234;
public static int NM_HTTP_PORT = 9999;
public static int NM_HTTP_PORT = 8042;
private static final RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);

View File

@ -155,7 +155,7 @@ public class MRAppBenchmark {
nodeId.setPort(1234);
container.setNodeId(nodeId);
container.setContainerToken(null);
container.setNodeHttpAddress("localhost:9999");
container.setNodeHttpAddress("localhost:8042");
getContext().getEventHandler()
.handle(
new TaskAttemptContainerAssignedEvent(event

View File

@ -98,7 +98,7 @@ public class MockJobs extends MockApps {
public static final String NM_HOST = "localhost";
public static final int NM_PORT = 1234;
public static final int NM_HTTP_PORT = 9999;
public static final int NM_HTTP_PORT = 8042;
static final int DT = 1000000; // ms
@ -284,7 +284,7 @@ public class MockJobs extends MockApps {
@Override
public String getNodeHttpAddress() {
return "localhost:9999";
return "localhost:8042";
}
@Override

View File

@ -20,6 +20,8 @@ package org.apache.hadoop.mapred;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.JobACL;
@ -31,9 +33,12 @@ import org.apache.hadoop.security.authorize.AccessControlList;
@InterfaceAudience.Private
public class JobACLsManager {
static final Log LOG = LogFactory.getLog(JobACLsManager.class);
Configuration conf;
private final AccessControlList adminAcl;
public JobACLsManager(Configuration conf) {
adminAcl = new AccessControlList(conf.get(MRConfig.MR_ADMINS, " "));
this.conf = conf;
}
@ -71,6 +76,18 @@ public class JobACLsManager {
return acls;
}
/**
* Is the calling user an admin for the mapreduce cluster
* i.e. member of mapreduce.cluster.administrators
* @return true, if user is an admin
*/
boolean isMRAdmin(UserGroupInformation callerUGI) {
if (adminAcl.isUserAllowed(callerUGI)) {
return true;
}
return false;
}
/**
* If authorization is enabled, checks whether the user (in the callerUGI)
* is authorized to perform the operation specified by 'jobOperation' on
@ -89,13 +106,18 @@ public class JobACLsManager {
public boolean checkAccess(UserGroupInformation callerUGI,
JobACL jobOperation, String jobOwner, AccessControlList jobACL) {
if (LOG.isDebugEnabled()) {
LOG.debug("checkAccess job acls, jobOwner: " + jobOwner + " jobacl: "
+ jobOperation.toString() + " user: " + callerUGI.getShortUserName());
}
String user = callerUGI.getShortUserName();
if (!areACLsEnabled()) {
return true;
}
// Allow Job-owner for any operation on the job
if (user.equals(jobOwner)
if (isMRAdmin(callerUGI)
|| user.equals(jobOwner)
|| jobACL.isUserAllowed(callerUGI)) {
return true;
}

View File

@ -0,0 +1,142 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.junit.Test;
/**
* Test the job acls manager
*/
public class TestJobAclsManager {
@Test
public void testClusterAdmins() {
Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
Configuration conf = new Configuration();
String jobOwner = "testuser";
conf.set(JobACL.VIEW_JOB.getAclName(), jobOwner);
conf.set(JobACL.MODIFY_JOB.getAclName(), jobOwner);
conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
String clusterAdmin = "testuser2";
conf.set(MRConfig.MR_ADMINS, clusterAdmin);
JobACLsManager aclsManager = new JobACLsManager(conf);
tmpJobACLs = aclsManager.constructJobACLs(conf);
final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;
UserGroupInformation callerUGI = UserGroupInformation.createUserForTesting(
clusterAdmin, new String[] {});
// cluster admin should have access
boolean val = aclsManager.checkAccess(callerUGI, JobACL.VIEW_JOB, jobOwner,
jobACLs.get(JobACL.VIEW_JOB));
assertTrue("cluster admin should have view access", val);
val = aclsManager.checkAccess(callerUGI, JobACL.MODIFY_JOB, jobOwner,
jobACLs.get(JobACL.MODIFY_JOB));
assertTrue("cluster admin should have modify access", val);
}
@Test
public void testClusterNoAdmins() {
Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
Configuration conf = new Configuration();
String jobOwner = "testuser";
conf.set(JobACL.VIEW_JOB.getAclName(), "");
conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
String noAdminUser = "testuser2";
JobACLsManager aclsManager = new JobACLsManager(conf);
tmpJobACLs = aclsManager.constructJobACLs(conf);
final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;
UserGroupInformation callerUGI = UserGroupInformation.createUserForTesting(
noAdminUser, new String[] {});
// random user should not have access
boolean val = aclsManager.checkAccess(callerUGI, JobACL.VIEW_JOB, jobOwner,
jobACLs.get(JobACL.VIEW_JOB));
assertFalse("random user should not have view access", val);
val = aclsManager.checkAccess(callerUGI, JobACL.MODIFY_JOB, jobOwner,
jobACLs.get(JobACL.MODIFY_JOB));
assertFalse("random user should not have modify access", val);
callerUGI = UserGroupInformation.createUserForTesting(jobOwner,
new String[] {});
// Owner should have access
val = aclsManager.checkAccess(callerUGI, JobACL.VIEW_JOB, jobOwner,
jobACLs.get(JobACL.VIEW_JOB));
assertTrue("owner should have view access", val);
val = aclsManager.checkAccess(callerUGI, JobACL.MODIFY_JOB, jobOwner,
jobACLs.get(JobACL.MODIFY_JOB));
assertTrue("owner should have modify access", val);
}
@Test
public void testAclsOff() {
Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
Configuration conf = new Configuration();
String jobOwner = "testuser";
conf.set(JobACL.VIEW_JOB.getAclName(), jobOwner);
conf.setBoolean(MRConfig.MR_ACLS_ENABLED, false);
String noAdminUser = "testuser2";
JobACLsManager aclsManager = new JobACLsManager(conf);
tmpJobACLs = aclsManager.constructJobACLs(conf);
final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;
UserGroupInformation callerUGI = UserGroupInformation.createUserForTesting(
noAdminUser, new String[] {});
// acls off so anyone should have access
boolean val = aclsManager.checkAccess(callerUGI, JobACL.VIEW_JOB, jobOwner,
jobACLs.get(JobACL.VIEW_JOB));
assertTrue("acls off so anyone should have access", val);
}
@Test
public void testGroups() {
Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
Configuration conf = new Configuration();
String jobOwner = "testuser";
conf.set(JobACL.VIEW_JOB.getAclName(), jobOwner);
conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
String user = "testuser2";
String adminGroup = "adminGroup";
conf.set(MRConfig.MR_ADMINS, " " + adminGroup);
JobACLsManager aclsManager = new JobACLsManager(conf);
tmpJobACLs = aclsManager.constructJobACLs(conf);
final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;
UserGroupInformation callerUGI = UserGroupInformation.createUserForTesting(
user, new String[] {adminGroup});
// acls off so anyone should have access
boolean val = aclsManager.checkAccess(callerUGI, JobACL.VIEW_JOB, jobOwner,
jobACLs.get(JobACL.VIEW_JOB));
assertTrue("user in admin group should have access", val);
}
}

View File

@ -54,7 +54,7 @@ public class TestMaster {
}
// Change master address to a valid value
conf.set(MRConfig.MASTER_ADDRESS, "bar.com:9999");
conf.set(MRConfig.MASTER_ADDRESS, "bar.com:8042");
masterHostname = Master.getMasterAddress(conf).getHostName();
assertEquals(masterHostname, "bar.com");

View File

@ -192,7 +192,6 @@ public class HistoryClientService extends AbstractService {
throw RPCUtil.getRemoteException("Unknown job " + jobID);
}
JobACL operation = JobACL.VIEW_JOB;
//TODO disable check access for now.
checkAccess(job, operation);
return job;
}
@ -324,9 +323,7 @@ public class HistoryClientService extends AbstractService {
private void checkAccess(Job job, JobACL jobOperation)
throws YarnRemoteException {
if (!UserGroupInformation.isSecurityEnabled()) {
return;
}
UserGroupInformation callerUGI;
try {
callerUGI = UserGroupInformation.getCurrentUser();

View File

@ -92,7 +92,7 @@ public class TestYarnClientProtocolProvider extends TestCase {
rmDTToken.setIdentifier(ByteBuffer.wrap(new byte[2]));
rmDTToken.setKind("Testclusterkind");
rmDTToken.setPassword(ByteBuffer.wrap("testcluster".getBytes()));
rmDTToken.setService("0.0.0.0:8040");
rmDTToken.setService("0.0.0.0:8032");
getDTResponse.setRMDelegationToken(rmDTToken);
ClientRMProtocol cRMProtocol = mock(ClientRMProtocol.class);
when(cRMProtocol.getDelegationToken(any(

View File

@ -32,14 +32,14 @@ import org.apache.hadoop.yarn.util.ProtoUtils;
public class LocalResourcePBImpl extends ProtoBase<LocalResourceProto> implements LocalResource {
public class LocalResourcePBImpl extends ProtoBase<LocalResourceProto>
implements LocalResource {
LocalResourceProto proto = LocalResourceProto.getDefaultInstance();
LocalResourceProto.Builder builder = null;
boolean viaProto = false;
private URL url = null;
public LocalResourcePBImpl() {
builder = LocalResourceProto.newBuilder();
}
@ -49,59 +49,54 @@ public class LocalResourcePBImpl extends ProtoBase<LocalResourceProto> implement
viaProto = true;
}
public LocalResourceProto getProto() {
mergeLocalToProto();
public synchronized LocalResourceProto getProto() {
mergeLocalToBuilder();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.url != null) {
private synchronized void mergeLocalToBuilder() {
LocalResourceProtoOrBuilder l = viaProto ? proto : builder;
if (this.url != null
&& !(l.getResource().equals(((URLPBImpl) url).getProto()))) {
maybeInitBuilder();
l = builder;
builder.setResource(convertToProtoFormat(this.url));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
private synchronized void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = LocalResourceProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public long getSize() {
public synchronized long getSize() {
LocalResourceProtoOrBuilder p = viaProto ? proto : builder;
return (p.getSize());
}
@Override
public void setSize(long size) {
public synchronized void setSize(long size) {
maybeInitBuilder();
builder.setSize((size));
}
@Override
public long getTimestamp() {
public synchronized long getTimestamp() {
LocalResourceProtoOrBuilder p = viaProto ? proto : builder;
return (p.getTimestamp());
}
@Override
public void setTimestamp(long timestamp) {
public synchronized void setTimestamp(long timestamp) {
maybeInitBuilder();
builder.setTimestamp((timestamp));
}
@Override
public LocalResourceType getType() {
public synchronized LocalResourceType getType() {
LocalResourceProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasType()) {
return null;
@ -110,7 +105,7 @@ public class LocalResourcePBImpl extends ProtoBase<LocalResourceProto> implement
}
@Override
public void setType(LocalResourceType type) {
public synchronized void setType(LocalResourceType type) {
maybeInitBuilder();
if (type == null) {
builder.clearType();
@ -119,7 +114,7 @@ public class LocalResourcePBImpl extends ProtoBase<LocalResourceProto> implement
builder.setType(convertToProtoFormat(type));
}
@Override
public URL getResource() {
public synchronized URL getResource() {
LocalResourceProtoOrBuilder p = viaProto ? proto : builder;
if (this.url != null) {
return this.url;
@ -132,14 +127,14 @@ public class LocalResourcePBImpl extends ProtoBase<LocalResourceProto> implement
}
@Override
public void setResource(URL resource) {
public synchronized void setResource(URL resource) {
maybeInitBuilder();
if (resource == null)
builder.clearResource();
this.url = resource;
}
@Override
public LocalResourceVisibility getVisibility() {
public synchronized LocalResourceVisibility getVisibility() {
LocalResourceProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasVisibility()) {
return null;
@ -148,7 +143,7 @@ public class LocalResourcePBImpl extends ProtoBase<LocalResourceProto> implement
}
@Override
public void setVisibility(LocalResourceVisibility visibility) {
public synchronized void setVisibility(LocalResourceVisibility visibility) {
maybeInitBuilder();
if (visibility == null) {
builder.clearVisibility();
@ -180,7 +175,4 @@ public class LocalResourcePBImpl extends ProtoBase<LocalResourceProto> implement
private LocalResourceVisibility convertFromProtoFormat(LocalResourceVisibilityProto e) {
return ProtoUtils.convertFromProtoFormat(e);
}
}

View File

@ -104,7 +104,6 @@
<configuration>
<environmentVariables>
<JAVA_HOME>${java.home}</JAVA_HOME>
<MALLOC_ARENA_MAX>4</MALLOC_ARENA_MAX>
</environmentVariables>
</configuration>
</plugin>

View File

@ -88,7 +88,7 @@ public class YarnConfiguration extends Configuration {
/** The address of the applications manager interface in the RM.*/
public static final String RM_ADDRESS =
RM_PREFIX + "address";
public static final int DEFAULT_RM_PORT = 8040;
public static final int DEFAULT_RM_PORT = 8032;
public static final String DEFAULT_RM_ADDRESS =
"0.0.0.0:" + DEFAULT_RM_PORT;
@ -123,7 +123,7 @@ public class YarnConfiguration extends Configuration {
public static final String RM_RESOURCE_TRACKER_ADDRESS =
RM_PREFIX + "resource-tracker.address";
public static final int DEFAULT_RM_RESOURCE_TRACKER_PORT = 8025;
public static final int DEFAULT_RM_RESOURCE_TRACKER_PORT = 8031;
public static final String DEFAULT_RM_RESOURCE_TRACKER_ADDRESS =
"0.0.0.0:" + DEFAULT_RM_RESOURCE_TRACKER_PORT;
@ -140,7 +140,7 @@ public class YarnConfiguration extends Configuration {
/** Are acls enabled.*/
public static final String YARN_ACL_ENABLE =
YARN_PREFIX + "acl.enable";
public static final boolean DEFAULT_YARN_ACL_ENABLE = true;
public static final boolean DEFAULT_YARN_ACL_ENABLE = false;
/** ACL of who can be admin of YARN cluster.*/
public static final String YARN_ADMIN_ACL =
@ -153,7 +153,7 @@ public class YarnConfiguration extends Configuration {
/** The address of the RM admin interface.*/
public static final String RM_ADMIN_ADDRESS =
RM_PREFIX + "admin.address";
public static final int DEFAULT_RM_ADMIN_PORT = 8141;
public static final int DEFAULT_RM_ADMIN_PORT = 8033;
public static final String DEFAULT_RM_ADMIN_ADDRESS = "0.0.0.0:" +
DEFAULT_RM_ADMIN_PORT;
@ -285,7 +285,7 @@ public class YarnConfiguration extends Configuration {
/** Address where the localizer IPC is.*/
public static final String NM_LOCALIZER_ADDRESS =
NM_PREFIX + "localizer.address";
public static final int DEFAULT_NM_LOCALIZER_PORT = 4344;
public static final int DEFAULT_NM_LOCALIZER_PORT = 8040;
public static final String DEFAULT_NM_LOCALIZER_ADDRESS = "0.0.0.0:" +
DEFAULT_NM_LOCALIZER_PORT;
@ -366,7 +366,7 @@ public class YarnConfiguration extends Configuration {
/** NM Webapp address.**/
public static final String NM_WEBAPP_ADDRESS = NM_PREFIX + "webapp.address";
public static final int DEFAULT_NM_WEBAPP_PORT = 9999;
public static final int DEFAULT_NM_WEBAPP_PORT = 8042;
public static final String DEFAULT_NM_WEBAPP_ADDRESS = "0.0.0.0:" +
DEFAULT_NM_WEBAPP_PORT;

View File

@ -142,7 +142,7 @@ public class ConverterUtils {
}
public static String toString(ContainerId cId) {
return cId.toString();
return cId == null ? null : cId.toString();
}
public static NodeId toNodeId(String nodeIdStr) {

View File

@ -61,7 +61,7 @@
<property>
<description>The address of the applications manager interface in the RM.</description>
<name>yarn.resourcemanager.address</name>
<value>0.0.0.0:8040</value>
<value>0.0.0.0:8032</value>
</property>
<property>
@ -101,7 +101,7 @@
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>0.0.0.0:8025</value>
<value>0.0.0.0:8031</value>
</property>
<property>
@ -119,7 +119,7 @@
<property>
<description>The address of the RM admin interface.</description>
<name>yarn.resourcemanager.admin.address</name>
<value>0.0.0.0:8141</value>
<value>0.0.0.0:8033</value>
</property>
<property>
@ -274,7 +274,7 @@
<property>
<description>Address where the localizer IPC is.</description>
<name>yarn.nodemanager.localizer.address</name>
<value>0.0.0.0:4344</value>
<value>0.0.0.0:8040</value>
</property>
<property>
@ -355,7 +355,7 @@
<property>
<description>NM Webapp address.</description>
<name>yarn.nodemanager.webapp.address</name>
<value>0.0.0.0:9999</value>
<value>0.0.0.0:8042</value>
</property>
<property>

View File

@ -27,10 +27,10 @@ import org.junit.Test;
public class TestNodeId {
@Test
public void testNodeId() {
NodeId nodeId1 = createNodeId("10.18.52.124", 45454);
NodeId nodeId2 = createNodeId("10.18.52.125", 45452);
NodeId nodeId3 = createNodeId("10.18.52.124", 45454);
NodeId nodeId4 = createNodeId("10.18.52.124", 45453);
NodeId nodeId1 = createNodeId("10.18.52.124", 8041);
NodeId nodeId2 = createNodeId("10.18.52.125", 8038);
NodeId nodeId3 = createNodeId("10.18.52.124", 8041);
NodeId nodeId4 = createNodeId("10.18.52.124", 8039);
Assert.assertTrue(nodeId1.equals(nodeId3));
Assert.assertFalse(nodeId1.equals(nodeId2));
@ -44,7 +44,7 @@ public class TestNodeId {
Assert.assertFalse(nodeId1.hashCode() == nodeId2.hashCode());
Assert.assertFalse(nodeId3.hashCode() == nodeId4.hashCode());
Assert.assertEquals("10.18.52.124:45454", nodeId1.toString());
Assert.assertEquals("10.18.52.124:8041", nodeId1.toString());
}
private NodeId createNodeId(String host, int port) {

View File

@ -22,6 +22,7 @@ import static org.junit.Assert.*;
import java.net.URISyntaxException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.URL;
import org.junit.Test;
@ -35,4 +36,17 @@ public class TestConverterUtils {
assertEquals(expectedPath, actualPath);
}
@Test
public void testContainerId() throws URISyntaxException {
ContainerId id = BuilderUtils.newContainerId(0, 0, 0, 0);
String cid = ConverterUtils.toString(id);
assertEquals("container_0_0000_00_000000", cid);
ContainerId gen = ConverterUtils.toContainerId(cid);
assertEquals(gen, id);
}
@Test
public void testContainerIdNull() throws URISyntaxException {
assertNull(ConverterUtils.toString((ContainerId)null));
}
}

View File

@ -371,8 +371,6 @@ public class ContainerLocalizer {
Path appFileCacheDir = new Path(appBase, FILECACHE);
appsFileCacheDirs[i] = appFileCacheDir.toString();
lfs.mkdir(appFileCacheDir, null, false);
// $x/usercache/$user/appcache/$appId/output
lfs.mkdir(new Path(appBase, OUTPUTDIR), null, false);
}
conf.setStrings(String.format(APPCACHE_CTXT_FMT, appId), appsFileCacheDirs);
conf.setStrings(String.format(USERCACHE_CTXT_FMT, appId), usersFileCacheDirs);

View File

@ -140,7 +140,7 @@ public class TestDefaultContainerExecutor {
// final String appId = "app_RM_0";
// final Path logDir = new Path(basedir, "logs");
// final Path nmLocal = new Path(basedir, "nmPrivate/" + user + "/" + appId);
// final InetSocketAddress nmAddr = new InetSocketAddress("foobar", 4344);
// final InetSocketAddress nmAddr = new InetSocketAddress("foobar", 8040);
// System.out.println("NMLOCAL: " + nmLocal);
// Random r = new Random();
//

View File

@ -80,7 +80,7 @@ public class TestPBLocalizerRPC {
@Test
public void testLocalizerRPC() throws Exception {
InetSocketAddress locAddr = new InetSocketAddress("0.0.0.0", 4344);
InetSocketAddress locAddr = new InetSocketAddress("0.0.0.0", 8040);
LocalizerService server = new LocalizerService(locAddr);
try {
server.start();

View File

@ -89,7 +89,7 @@ public class TestContainerLocalizer {
final String user = "yak";
final String appId = "app_RM_0";
final String cId = "container_0";
final InetSocketAddress nmAddr = new InetSocketAddress("foobar", 4344);
final InetSocketAddress nmAddr = new InetSocketAddress("foobar", 8040);
final List<Path> localDirs = new ArrayList<Path>();
for (int i = 0; i < 4; ++i) {
localDirs.add(lfs.makeQualified(new Path(basedir, i + "")));
@ -177,9 +177,6 @@ public class TestContainerLocalizer {
// $x/usercache/$user/appcache/$appId/filecache
Path appcache = new Path(appDir, ContainerLocalizer.FILECACHE);
verify(spylfs).mkdir(eq(appcache), isA(FsPermission.class), eq(false));
// $x/usercache/$user/appcache/$appId/output
Path appOutput = new Path(appDir, ContainerLocalizer.OUTPUTDIR);
verify(spylfs).mkdir(eq(appOutput), isA(FsPermission.class), eq(false));
}
// verify tokens read at expected location

View File

@ -87,7 +87,7 @@ public class TestNMWebServices extends JerseyTest {
protected void configureServlets() {
nmContext = new NodeManager.NMContext();
nmContext.getNodeId().setHost("testhost.foo.com");
nmContext.getNodeId().setPort(9999);
nmContext.getNodeId().setPort(8042);
resourceView = new ResourceView() {
@Override
public long getVmemAllocatedForContainers() {
@ -330,7 +330,7 @@ public class TestNMWebServices extends JerseyTest {
String hadoopVersion, String resourceManagerVersionBuiltOn,
String resourceManagerBuildVersion, String resourceManagerVersion) {
WebServicesTestUtils.checkStringMatch("id", "testhost.foo.com:9999", id);
WebServicesTestUtils.checkStringMatch("id", "testhost.foo.com:8042", id);
WebServicesTestUtils.checkStringMatch("healthReport", "Healthy",
healthReport);
assertEquals("totalVmemAllocatedContainersMB incorrect", 15872,

View File

@ -93,7 +93,7 @@ public class TestNMWebServicesContainers extends JerseyTest {
protected void configureServlets() {
nmContext = new NodeManager.NMContext();
nmContext.getNodeId().setHost("testhost.foo.com");
nmContext.getNodeId().setPort(9999);
nmContext.getNodeId().setPort(8042);
resourceView = new ResourceView() {
@Override
public long getVmemAllocatedForContainers() {

View File

@ -91,10 +91,16 @@ extends org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue {
public float getAbsoluteMaximumCapacity();
/**
* Get the currently utilized capacity of the queue
* relative to it's parent queue.
* @return the currently utilized capacity of the queue
* relative to it's parent queue
* Get the current absolute used capacity of the queue
* relative to the entire cluster.
* @return queue absolute used capacity
*/
public float getAbsoluteUsedCapacity();
/**
* Get the current used capacity of the queue
* and it's children (if any).
* @return queue used capacity
*/
public float getUsedCapacity();
@ -104,6 +110,12 @@ extends org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue {
*/
public void setUsedCapacity(float usedCapacity);
/**
* Set absolute used capacity of the queue.
* @param absUsedCapacity absolute used capacity of the queue
*/
public void setAbsoluteUsedCapacity(float absUsedCapacity);
/**
* Get the currently utilized resources in the cluster
* by the queue and children (if any).
@ -111,21 +123,6 @@ extends org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue {
*/
public Resource getUsedResources();
/**
* Get the current <em>utilization</em> of the queue
* and it's children (if any).
* Utilization is defined as the ratio of
* <em>used-capacity over configured-capacity</em> of the queue.
* @return queue utilization
*/
public float getUtilization();
/**
* Get the current <em>utilization</em> of the queue.
* @param utilization queue utilization
*/
public void setUtilization(float utilization);
/**
* Get the current run-state of the queue
* @return current run-state

View File

@ -23,20 +23,24 @@ import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
class CSQueueUtils {
final static float EPSILON = 0.0001f;
public static void checkMaxCapacity(String queueName,
float capacity, float maximumCapacity) {
if (maximumCapacity < 0.0f || maximumCapacity > 1.0f ||
maximumCapacity < capacity) {
if (maximumCapacity < 0.0f || maximumCapacity > 1.0f) {
throw new IllegalArgumentException(
"Illegal value of maximumCapacity " + maximumCapacity +
" used in call to setMaxCapacity for queue " + queueName);
}
if (maximumCapacity < capacity) {
throw new IllegalArgumentException(
"Illegal call to setMaxCapacity. " +
"Queue '" + queueName + "' has " +
"capacity (" + capacity + ") greater than " +
"maximumCapacity (" + maximumCapacity + ")" );
}
public static void checkAbsoluteCapacities(String queueName,
float absCapacity, float absMaxCapacity) {
if (absMaxCapacity < (absCapacity - EPSILON)) {
throw new IllegalArgumentException("Illegal call to setMaxCapacity. "
+ "Queue '" + queueName + "' has " + "an absolute capacity (" + absCapacity
+ ") greater than " + "its absolute maximumCapacity (" + absMaxCapacity
+ ")");
}
}
@ -75,18 +79,16 @@ class CSQueueUtils {
final int usedMemory = childQueue.getUsedResources().getMemory();
float queueLimit = 0.0f;
float utilization = 0.0f;
float absoluteUsedCapacity = 0.0f;
float usedCapacity = 0.0f;
if (clusterMemory > 0) {
queueLimit = clusterMemory * childQueue.getAbsoluteCapacity();
final float parentAbsoluteCapacity =
(parentQueue == null) ? 1.0f : parentQueue.getAbsoluteCapacity();
utilization = (usedMemory / queueLimit);
usedCapacity = (usedMemory / (clusterMemory * parentAbsoluteCapacity));
absoluteUsedCapacity = ((float)usedMemory / (float)clusterMemory);
usedCapacity = (usedMemory / queueLimit);
}
childQueue.setUtilization(utilization);
childQueue.setUsedCapacity(usedCapacity);
childQueue.setAbsoluteUsedCapacity(absoluteUsedCapacity);
int available =
Math.max((roundUp(minimumAllocation, (int)queueLimit) - usedMemory), 0);

View File

@ -91,9 +91,9 @@ implements ResourceScheduler, CapacitySchedulerContext {
static final Comparator<CSQueue> queueComparator = new Comparator<CSQueue>() {
@Override
public int compare(CSQueue q1, CSQueue q2) {
if (q1.getUtilization() < q2.getUtilization()) {
if (q1.getUsedCapacity() < q2.getUsedCapacity()) {
return -1;
} else if (q1.getUtilization() > q2.getUtilization()) {
} else if (q1.getUsedCapacity() > q2.getUsedCapacity()) {
return 1;
}

View File

@ -80,6 +80,7 @@ public class LeafQueue implements CSQueue {
private float absoluteCapacity;
private float maximumCapacity;
private float absoluteMaxCapacity;
private float absoluteUsedCapacity = 0.0f;
private int userLimit;
private float userLimitFactor;
@ -91,7 +92,6 @@ public class LeafQueue implements CSQueue {
private int maxActiveApplicationsPerUser;
private Resource usedResources = Resources.createResource(0);
private float utilization = 0.0f;
private float usedCapacity = 0.0f;
private volatile int numContainers;
@ -210,9 +210,11 @@ public class LeafQueue implements CSQueue {
{
// Sanity check
CSQueueUtils.checkMaxCapacity(getQueueName(), capacity, maximumCapacity);
float absCapacity = parent.getAbsoluteCapacity() * capacity;
CSQueueUtils.checkAbsoluteCapacities(getQueueName(), absCapacity, absoluteMaxCapacity);
this.capacity = capacity;
this.absoluteCapacity = parent.getAbsoluteCapacity() * capacity;
this.absoluteCapacity = absCapacity;
this.maximumCapacity = maximumCapacity;
this.absoluteMaxCapacity = absoluteMaxCapacity;
@ -274,12 +276,11 @@ public class LeafQueue implements CSQueue {
"(int)(maxActiveApplications * (userLimit / 100.0f) * " +
"userLimitFactor)," +
"1) ]" + "\n" +
"utilization = " + utilization +
" [= usedResourcesMemory / " +
"(clusterResourceMemory * absoluteCapacity)]" + "\n" +
"usedCapacity = " + usedCapacity +
" [= usedResourcesMemory / " +
"(clusterResourceMemory * parent.absoluteCapacity)]" + "\n" +
"(clusterResourceMemory * absoluteCapacity)]" + "\n" +
"absoluteUsedCapacity = " + absoluteUsedCapacity +
" [= usedResourcesMemory / clusterResourceMemory]" + "\n" +
"maxAMResourcePercent = " + maxAMResourcePercent +
" [= configuredMaximumAMResourcePercent ]" + "\n" +
"minimumAllocationFactor = " + minimumAllocationFactor +
@ -313,6 +314,11 @@ public class LeafQueue implements CSQueue {
return absoluteMaxCapacity;
}
@Override
public synchronized float getAbsoluteUsedCapacity() {
return absoluteUsedCapacity;
}
@Override
public CSQueue getParent() {
return parent;
@ -383,24 +389,21 @@ public class LeafQueue implements CSQueue {
return usedResources;
}
@Override
public synchronized float getUtilization() {
return utilization;
}
@Override
public List<CSQueue> getChildQueues() {
return null;
}
public synchronized void setUtilization(float utilization) {
this.utilization = utilization;
}
@Override
public synchronized void setUsedCapacity(float usedCapacity) {
this.usedCapacity = usedCapacity;
}
@Override
public synchronized void setAbsoluteUsedCapacity(float absUsedCapacity) {
this.absoluteUsedCapacity = absUsedCapacity;
}
/**
* Set maximum capacity - used only for testing.
* @param maximumCapacity new max capacity
@ -408,10 +411,11 @@ public class LeafQueue implements CSQueue {
synchronized void setMaxCapacity(float maximumCapacity) {
// Sanity check
CSQueueUtils.checkMaxCapacity(getQueueName(), capacity, maximumCapacity);
float absMaxCapacity = CSQueueUtils.computeAbsoluteMaximumCapacity(maximumCapacity, parent);
CSQueueUtils.checkAbsoluteCapacities(getQueueName(), absoluteCapacity, absMaxCapacity);
this.maximumCapacity = maximumCapacity;
this.absoluteMaxCapacity =
CSQueueUtils.computeAbsoluteMaximumCapacity(maximumCapacity, parent);
this.absoluteMaxCapacity = absMaxCapacity;
}
/**
@ -516,7 +520,7 @@ public class LeafQueue implements CSQueue {
"absoluteCapacity=" + absoluteCapacity + ", " +
"usedResources=" + usedResources.getMemory() + "MB, " +
"usedCapacity=" + getUsedCapacity() + ", " +
"utilization=" + getUtilization() + ", " +
"absoluteUsedCapacity=" + getAbsoluteUsedCapacity() + ", " +
"numApps=" + getNumApplications() + ", " +
"numContainers=" + getNumContainers();
}
@ -1228,7 +1232,8 @@ public class LeafQueue implements CSQueue {
" container=" + container +
" containerId=" + container.getId() +
" queue=" + this +
" util=" + getUtilization() +
" usedCapacity=" + getUsedCapacity() +
" absoluteUsedCapacity=" + getAbsoluteUsedCapacity() +
" used=" + usedResources +
" cluster=" + clusterResource);
@ -1241,7 +1246,8 @@ public class LeafQueue implements CSQueue {
" application=" + application.getApplicationId() +
" resource=" + request.getCapability() +
" queue=" + this.toString() +
" util=" + getUtilization() +
" usedCapacity=" + getUsedCapacity() +
" absoluteUsedCapacity=" + getAbsoluteUsedCapacity() +
" used=" + usedResources +
" cluster=" + clusterResource);
@ -1307,7 +1313,8 @@ public class LeafQueue implements CSQueue {
" container=" + container +
" resource=" + container.getResource() +
" queue=" + this +
" util=" + getUtilization() +
" usedCapacity=" + getUsedCapacity() +
" absoluteUsedCapacity=" + getAbsoluteUsedCapacity() +
" used=" + usedResources +
" cluster=" + clusterResource);
}

View File

@ -67,9 +67,9 @@ public class ParentQueue implements CSQueue {
private float maximumCapacity;
private float absoluteCapacity;
private float absoluteMaxCapacity;
private float absoluteUsedCapacity = 0.0f;
private float usedCapacity = 0.0f;
private float utilization = 0.0f;
private final Set<CSQueue> childQueues;
private final Comparator<CSQueue> queueComparator;
@ -158,9 +158,11 @@ public class ParentQueue implements CSQueue {
) {
// Sanity check
CSQueueUtils.checkMaxCapacity(getQueueName(), capacity, maximumCapacity);
CSQueueUtils.checkAbsoluteCapacities(getQueueName(), absoluteCapacity, absoluteMaxCapacity);
this.capacity = capacity;
this.absoluteCapacity = absoluteCapacity;
this.maximumCapacity = maximumCapacity;
this.absoluteMaxCapacity = absoluteMaxCapacity;
@ -243,6 +245,11 @@ public class ParentQueue implements CSQueue {
return absoluteMaxCapacity;
}
@Override
public synchronized float getAbsoluteUsedCapacity() {
return absoluteUsedCapacity;
}
@Override
public float getMaximumCapacity() {
return maximumCapacity;
@ -264,11 +271,6 @@ public class ParentQueue implements CSQueue {
return usedResources;
}
@Override
public synchronized float getUtilization() {
return utilization;
}
@Override
public synchronized List<CSQueue> getChildQueues() {
return new ArrayList<CSQueue>(childQueues);
@ -351,7 +353,6 @@ public class ParentQueue implements CSQueue {
"absoluteCapacity=" + absoluteCapacity + ", " +
"usedResources=" + usedResources.getMemory() + "MB, " +
"usedCapacity=" + getUsedCapacity() + ", " +
"utilization=" + getUtilization() + ", " +
"numApps=" + getNumApplications() + ", " +
"numContainers=" + getNumContainers();
}
@ -490,12 +491,14 @@ public class ParentQueue implements CSQueue {
" #applications: " + getNumApplications());
}
@Override
public synchronized void setUsedCapacity(float usedCapacity) {
this.usedCapacity = usedCapacity;
}
public synchronized void setUtilization(float utilization) {
this.utilization = utilization;
@Override
public synchronized void setAbsoluteUsedCapacity(float absUsedCapacity) {
this.absoluteUsedCapacity = absUsedCapacity;
}
/**
@ -505,10 +508,11 @@ public class ParentQueue implements CSQueue {
synchronized void setMaxCapacity(float maximumCapacity) {
// Sanity check
CSQueueUtils.checkMaxCapacity(getQueueName(), capacity, maximumCapacity);
float absMaxCapacity = CSQueueUtils.computeAbsoluteMaximumCapacity(maximumCapacity, parent);
CSQueueUtils.checkAbsoluteCapacities(getQueueName(), absoluteCapacity, absMaxCapacity);
this.maximumCapacity = maximumCapacity;
this.absoluteMaxCapacity =
CSQueueUtils.computeAbsoluteMaximumCapacity(maximumCapacity, parent);
this.absoluteMaxCapacity = absMaxCapacity;
}
@Override
@ -545,7 +549,8 @@ public class ParentQueue implements CSQueue {
LOG.info("assignedContainer" +
" queue=" + getQueueName() +
" util=" + getUtilization() +
" usedCapacity=" + getUsedCapacity() +
" absoluteUsedCapacity=" + getAbsoluteUsedCapacity() +
" used=" + usedResources +
" cluster=" + clusterResource);
@ -556,7 +561,8 @@ public class ParentQueue implements CSQueue {
if (LOG.isDebugEnabled()) {
LOG.debug("ParentQ=" + getQueueName()
+ " assignedSoFarInThisIteration=" + assignment.getResource()
+ " utilization=" + getUtilization());
+ " usedCapacity=" + getUsedCapacity()
+ " absoluteUsedCapacity=" + getAbsoluteUsedCapacity());
}
// Do not assign more than one container if this isn't the root queue
@ -639,7 +645,7 @@ public class ParentQueue implements CSQueue {
String getChildQueuesToPrint() {
StringBuilder sb = new StringBuilder();
for (CSQueue q : childQueues) {
sb.append(q.getQueuePath() + "(" + q.getUtilization() + "), ");
sb.append(q.getQueuePath() + "(" + q.getUsedCapacity() + "), ");
}
return sb.toString();
}
@ -663,7 +669,8 @@ public class ParentQueue implements CSQueue {
LOG.info("completedContainer" +
" queue=" + getQueueName() +
" util=" + getUtilization() +
" usedCapacity=" + getUsedCapacity() +
" absoluteUsedCapacity=" + getAbsoluteUsedCapacity() +
" used=" + usedResources +
" cluster=" + clusterResource);
}

View File

@ -67,12 +67,9 @@ class CapacitySchedulerPage extends RmView {
protected void render(Block html) {
ResponseInfo ri = info("\'" + lqinfo.getQueuePath().substring(5) + "\' Queue Status").
_("Queue State:", lqinfo.getQueueState()).
_("Capacity:", percent(lqinfo.getCapacity() / 100)).
_("Max Capacity:", percent(lqinfo.getMaxCapacity() / 100)).
_("Used Capacity:", percent(lqinfo.getUsedCapacity() / 100)).
_("Absolute Capacity:", percent(lqinfo.getAbsoluteCapacity() / 100)).
_("Absolute Max Capacity:", percent(lqinfo.getAbsoluteMaxCapacity() / 100)).
_("Utilization:", percent(lqinfo.getUtilization() / 100)).
_("Used Resources:", lqinfo.getUsedResources().toString()).
_("Num Active Applications:", Integer.toString(lqinfo.getNumActiveApplications())).
_("Num Pending Applications:", Integer.toString(lqinfo.getNumPendingApplications())).
@ -81,8 +78,10 @@ class CapacitySchedulerPage extends RmView {
_("Max Applications Per User:", Integer.toString(lqinfo.getMaxApplicationsPerUser())).
_("Max Active Applications:", Integer.toString(lqinfo.getMaxActiveApplications())).
_("Max Active Applications Per User:", Integer.toString(lqinfo.getMaxActiveApplicationsPerUser())).
_("User Limit:", Integer.toString(lqinfo.getUserLimit()) + "%").
_("User Limit Factor:", String.format("%.1f", lqinfo.getUserLimitFactor()));
_("Configured Capacity:", percent(lqinfo.getCapacity() / 100)).
_("Configured Max Capacity:", percent(lqinfo.getMaxCapacity() / 100)).
_("Configured Minimum User Limit Percent:", Integer.toString(lqinfo.getUserLimit()) + "%").
_("Configured User Limit Factor:", String.format("%.1f", lqinfo.getUserLimitFactor()));
html._(InfoBlock.class);
@ -103,20 +102,20 @@ class CapacitySchedulerPage extends RmView {
ArrayList<CapacitySchedulerQueueInfo> subQueues =
(csqinfo.qinfo == null) ? csqinfo.csinfo.getSubQueues()
: csqinfo.qinfo.getSubQueues();
UL<Hamlet> ul = html.ul();
UL<Hamlet> ul = html.ul("#pq");
for (CapacitySchedulerQueueInfo info : subQueues) {
float used = info.getUsedCapacity() / 100;
float set = info.getCapacity() / 100;
float max = info.getMaxCapacity() / 100;
float absCap = info.getAbsoluteCapacity() / 100;
float absMaxCap = info.getAbsoluteMaxCapacity() / 100;
float absUsedCap = info.getAbsoluteUsedCapacity() / 100;
LI<UL<Hamlet>> li = ul.
li().
a(_Q).$style(width(max * Q_MAX_WIDTH)).
$title(join("capacity:", percent(set), " used:", percent(used),
" max capacity:", percent(max))).
span().$style(join(Q_GIVEN, ";font-size:1px;", width(set/max))).
a(_Q).$style(width(absMaxCap * Q_MAX_WIDTH)).
$title(join("Absolute Capacity:", percent(absCap))).
span().$style(join(Q_GIVEN, ";font-size:1px;", width(absCap/absMaxCap))).
_('.')._().
span().$style(join(width(used*set/max),
";font-size:1px;left:0%;", used > 1 ? Q_OVER : Q_UNDER)).
span().$style(join(width(absUsedCap/absMaxCap),
";font-size:1px;left:0%;", absUsedCap > absCap ? Q_OVER : Q_UNDER)).
_('.')._().
span(".q", info.getQueuePath().substring(5))._().
span().$class("qstats").$style(left(Q_STATS_POS)).
@ -180,7 +179,6 @@ class CapacitySchedulerPage extends RmView {
_().
li().
a(_Q).$style(width(Q_MAX_WIDTH)).
$title(join("used:", percent(used))).
span().$style(join(width(used), ";left:0%;",
used > 1 ? Q_OVER : Q_UNDER))._(".")._().
span(".q", "root")._().
@ -211,8 +209,7 @@ class CapacitySchedulerPage extends RmView {
_("$(function() {",
" $('#cs a span').addClass('ui-corner-all').css('position', 'absolute');",
" $('#cs').bind('loaded.jstree', function (e, data) {",
" data.inst.open_all();",
" data.inst.close_node('#lq', true);",
" data.inst.open_node('#pq', true);",
" }).",
" jstree({",
" core: { animation: 188, html_titles: true },",

Some files were not shown because too many files have changed in this diff Show More