Merge trunk into HA branch.
Resolved some semantic conflicts in TestFileAppendRestart - we now log more OP_ADDs in the HA branch than we did in trunk. Resolved some conflicts around removal of VersionedProtocol, etc. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1295342 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
1ba357553a
|
@ -41,14 +41,14 @@ public class TestAuthenticationFilter extends TestCase {
|
||||||
FilterConfig config = Mockito.mock(FilterConfig.class);
|
FilterConfig config = Mockito.mock(FilterConfig.class);
|
||||||
Mockito.when(config.getInitParameter(AuthenticationFilter.CONFIG_PREFIX)).thenReturn("");
|
Mockito.when(config.getInitParameter(AuthenticationFilter.CONFIG_PREFIX)).thenReturn("");
|
||||||
Mockito.when(config.getInitParameter("a")).thenReturn("A");
|
Mockito.when(config.getInitParameter("a")).thenReturn("A");
|
||||||
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList("a")).elements());
|
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector<String>(Arrays.asList("a")).elements());
|
||||||
Properties props = filter.getConfiguration("", config);
|
Properties props = filter.getConfiguration("", config);
|
||||||
assertEquals("A", props.getProperty("a"));
|
assertEquals("A", props.getProperty("a"));
|
||||||
|
|
||||||
config = Mockito.mock(FilterConfig.class);
|
config = Mockito.mock(FilterConfig.class);
|
||||||
Mockito.when(config.getInitParameter(AuthenticationFilter.CONFIG_PREFIX)).thenReturn("foo");
|
Mockito.when(config.getInitParameter(AuthenticationFilter.CONFIG_PREFIX)).thenReturn("foo");
|
||||||
Mockito.when(config.getInitParameter("foo.a")).thenReturn("A");
|
Mockito.when(config.getInitParameter("foo.a")).thenReturn("A");
|
||||||
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList("foo.a")).elements());
|
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector<String>(Arrays.asList("foo.a")).elements());
|
||||||
props = filter.getConfiguration("foo.", config);
|
props = filter.getConfiguration("foo.", config);
|
||||||
assertEquals("A", props.getProperty("a"));
|
assertEquals("A", props.getProperty("a"));
|
||||||
}
|
}
|
||||||
|
@ -57,7 +57,7 @@ public class TestAuthenticationFilter extends TestCase {
|
||||||
AuthenticationFilter filter = new AuthenticationFilter();
|
AuthenticationFilter filter = new AuthenticationFilter();
|
||||||
try {
|
try {
|
||||||
FilterConfig config = Mockito.mock(FilterConfig.class);
|
FilterConfig config = Mockito.mock(FilterConfig.class);
|
||||||
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector().elements());
|
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector<String>().elements());
|
||||||
filter.init(config);
|
filter.init(config);
|
||||||
fail();
|
fail();
|
||||||
} catch (ServletException ex) {
|
} catch (ServletException ex) {
|
||||||
|
@ -119,7 +119,7 @@ public class TestAuthenticationFilter extends TestCase {
|
||||||
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
|
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
|
||||||
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TOKEN_VALIDITY)).thenReturn("1000");
|
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TOKEN_VALIDITY)).thenReturn("1000");
|
||||||
Mockito.when(config.getInitParameterNames()).thenReturn(
|
Mockito.when(config.getInitParameterNames()).thenReturn(
|
||||||
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
|
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
|
||||||
AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements());
|
AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements());
|
||||||
filter.init(config);
|
filter.init(config);
|
||||||
assertEquals(PseudoAuthenticationHandler.class, filter.getAuthenticationHandler().getClass());
|
assertEquals(PseudoAuthenticationHandler.class, filter.getAuthenticationHandler().getClass());
|
||||||
|
@ -138,7 +138,7 @@ public class TestAuthenticationFilter extends TestCase {
|
||||||
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
|
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
|
||||||
Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
|
Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
|
||||||
Mockito.when(config.getInitParameterNames()).thenReturn(
|
Mockito.when(config.getInitParameterNames()).thenReturn(
|
||||||
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
|
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
|
||||||
AuthenticationFilter.SIGNATURE_SECRET)).elements());
|
AuthenticationFilter.SIGNATURE_SECRET)).elements());
|
||||||
filter.init(config);
|
filter.init(config);
|
||||||
assertFalse(filter.isRandomSecret());
|
assertFalse(filter.isRandomSecret());
|
||||||
|
@ -154,7 +154,7 @@ public class TestAuthenticationFilter extends TestCase {
|
||||||
Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_DOMAIN)).thenReturn(".foo.com");
|
Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_DOMAIN)).thenReturn(".foo.com");
|
||||||
Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_PATH)).thenReturn("/bar");
|
Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_PATH)).thenReturn("/bar");
|
||||||
Mockito.when(config.getInitParameterNames()).thenReturn(
|
Mockito.when(config.getInitParameterNames()).thenReturn(
|
||||||
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
|
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
|
||||||
AuthenticationFilter.COOKIE_DOMAIN,
|
AuthenticationFilter.COOKIE_DOMAIN,
|
||||||
AuthenticationFilter.COOKIE_PATH)).elements());
|
AuthenticationFilter.COOKIE_PATH)).elements());
|
||||||
filter.init(config);
|
filter.init(config);
|
||||||
|
@ -173,7 +173,7 @@ public class TestAuthenticationFilter extends TestCase {
|
||||||
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
|
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
|
||||||
DummyAuthenticationHandler.class.getName());
|
DummyAuthenticationHandler.class.getName());
|
||||||
Mockito.when(config.getInitParameterNames()).thenReturn(
|
Mockito.when(config.getInitParameterNames()).thenReturn(
|
||||||
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
|
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
|
||||||
filter.init(config);
|
filter.init(config);
|
||||||
assertTrue(DummyAuthenticationHandler.init);
|
assertTrue(DummyAuthenticationHandler.init);
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -187,7 +187,7 @@ public class TestAuthenticationFilter extends TestCase {
|
||||||
FilterConfig config = Mockito.mock(FilterConfig.class);
|
FilterConfig config = Mockito.mock(FilterConfig.class);
|
||||||
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("kerberos");
|
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("kerberos");
|
||||||
Mockito.when(config.getInitParameterNames()).thenReturn(
|
Mockito.when(config.getInitParameterNames()).thenReturn(
|
||||||
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
|
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
|
||||||
filter.init(config);
|
filter.init(config);
|
||||||
} catch (ServletException ex) {
|
} catch (ServletException ex) {
|
||||||
// Expected
|
// Expected
|
||||||
|
@ -204,7 +204,7 @@ public class TestAuthenticationFilter extends TestCase {
|
||||||
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
|
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
|
||||||
DummyAuthenticationHandler.class.getName());
|
DummyAuthenticationHandler.class.getName());
|
||||||
Mockito.when(config.getInitParameterNames()).thenReturn(
|
Mockito.when(config.getInitParameterNames()).thenReturn(
|
||||||
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
|
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
|
||||||
filter.init(config);
|
filter.init(config);
|
||||||
|
|
||||||
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
|
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
|
||||||
|
@ -225,7 +225,7 @@ public class TestAuthenticationFilter extends TestCase {
|
||||||
DummyAuthenticationHandler.class.getName());
|
DummyAuthenticationHandler.class.getName());
|
||||||
Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
|
Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
|
||||||
Mockito.when(config.getInitParameterNames()).thenReturn(
|
Mockito.when(config.getInitParameterNames()).thenReturn(
|
||||||
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
|
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
|
||||||
AuthenticationFilter.SIGNATURE_SECRET)).elements());
|
AuthenticationFilter.SIGNATURE_SECRET)).elements());
|
||||||
filter.init(config);
|
filter.init(config);
|
||||||
|
|
||||||
|
@ -254,7 +254,7 @@ public class TestAuthenticationFilter extends TestCase {
|
||||||
DummyAuthenticationHandler.class.getName());
|
DummyAuthenticationHandler.class.getName());
|
||||||
Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
|
Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
|
||||||
Mockito.when(config.getInitParameterNames()).thenReturn(
|
Mockito.when(config.getInitParameterNames()).thenReturn(
|
||||||
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
|
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
|
||||||
AuthenticationFilter.SIGNATURE_SECRET)).elements());
|
AuthenticationFilter.SIGNATURE_SECRET)).elements());
|
||||||
filter.init(config);
|
filter.init(config);
|
||||||
|
|
||||||
|
@ -288,7 +288,7 @@ public class TestAuthenticationFilter extends TestCase {
|
||||||
DummyAuthenticationHandler.class.getName());
|
DummyAuthenticationHandler.class.getName());
|
||||||
Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
|
Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
|
||||||
Mockito.when(config.getInitParameterNames()).thenReturn(
|
Mockito.when(config.getInitParameterNames()).thenReturn(
|
||||||
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
|
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
|
||||||
AuthenticationFilter.SIGNATURE_SECRET)).elements());
|
AuthenticationFilter.SIGNATURE_SECRET)).elements());
|
||||||
filter.init(config);
|
filter.init(config);
|
||||||
|
|
||||||
|
@ -321,7 +321,7 @@ public class TestAuthenticationFilter extends TestCase {
|
||||||
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
|
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
|
||||||
DummyAuthenticationHandler.class.getName());
|
DummyAuthenticationHandler.class.getName());
|
||||||
Mockito.when(config.getInitParameterNames()).thenReturn(
|
Mockito.when(config.getInitParameterNames()).thenReturn(
|
||||||
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
|
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
|
||||||
filter.init(config);
|
filter.init(config);
|
||||||
|
|
||||||
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
|
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
|
||||||
|
@ -332,7 +332,7 @@ public class TestAuthenticationFilter extends TestCase {
|
||||||
FilterChain chain = Mockito.mock(FilterChain.class);
|
FilterChain chain = Mockito.mock(FilterChain.class);
|
||||||
|
|
||||||
Mockito.doAnswer(
|
Mockito.doAnswer(
|
||||||
new Answer() {
|
new Answer<Object>() {
|
||||||
@Override
|
@Override
|
||||||
public Object answer(InvocationOnMock invocation) throws Throwable {
|
public Object answer(InvocationOnMock invocation) throws Throwable {
|
||||||
fail();
|
fail();
|
||||||
|
@ -358,7 +358,7 @@ public class TestAuthenticationFilter extends TestCase {
|
||||||
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TOKEN_VALIDITY)).thenReturn("1000");
|
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TOKEN_VALIDITY)).thenReturn("1000");
|
||||||
Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
|
Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
|
||||||
Mockito.when(config.getInitParameterNames()).thenReturn(
|
Mockito.when(config.getInitParameterNames()).thenReturn(
|
||||||
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
|
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
|
||||||
AuthenticationFilter.AUTH_TOKEN_VALIDITY,
|
AuthenticationFilter.AUTH_TOKEN_VALIDITY,
|
||||||
AuthenticationFilter.SIGNATURE_SECRET)).elements());
|
AuthenticationFilter.SIGNATURE_SECRET)).elements());
|
||||||
|
|
||||||
|
@ -366,7 +366,7 @@ public class TestAuthenticationFilter extends TestCase {
|
||||||
Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_DOMAIN)).thenReturn(".foo.com");
|
Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_DOMAIN)).thenReturn(".foo.com");
|
||||||
Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_PATH)).thenReturn("/bar");
|
Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_PATH)).thenReturn("/bar");
|
||||||
Mockito.when(config.getInitParameterNames()).thenReturn(
|
Mockito.when(config.getInitParameterNames()).thenReturn(
|
||||||
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
|
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
|
||||||
AuthenticationFilter.AUTH_TOKEN_VALIDITY,
|
AuthenticationFilter.AUTH_TOKEN_VALIDITY,
|
||||||
AuthenticationFilter.SIGNATURE_SECRET,
|
AuthenticationFilter.SIGNATURE_SECRET,
|
||||||
AuthenticationFilter.COOKIE_DOMAIN,
|
AuthenticationFilter.COOKIE_DOMAIN,
|
||||||
|
@ -387,7 +387,7 @@ public class TestAuthenticationFilter extends TestCase {
|
||||||
final boolean[] calledDoFilter = new boolean[1];
|
final boolean[] calledDoFilter = new boolean[1];
|
||||||
|
|
||||||
Mockito.doAnswer(
|
Mockito.doAnswer(
|
||||||
new Answer() {
|
new Answer<Object>() {
|
||||||
@Override
|
@Override
|
||||||
public Object answer(InvocationOnMock invocation) throws Throwable {
|
public Object answer(InvocationOnMock invocation) throws Throwable {
|
||||||
calledDoFilter[0] = true;
|
calledDoFilter[0] = true;
|
||||||
|
@ -398,7 +398,7 @@ public class TestAuthenticationFilter extends TestCase {
|
||||||
|
|
||||||
final Cookie[] setCookie = new Cookie[1];
|
final Cookie[] setCookie = new Cookie[1];
|
||||||
Mockito.doAnswer(
|
Mockito.doAnswer(
|
||||||
new Answer() {
|
new Answer<Object>() {
|
||||||
@Override
|
@Override
|
||||||
public Object answer(InvocationOnMock invocation) throws Throwable {
|
public Object answer(InvocationOnMock invocation) throws Throwable {
|
||||||
Object[] args = invocation.getArguments();
|
Object[] args = invocation.getArguments();
|
||||||
|
@ -451,7 +451,7 @@ public class TestAuthenticationFilter extends TestCase {
|
||||||
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
|
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
|
||||||
DummyAuthenticationHandler.class.getName());
|
DummyAuthenticationHandler.class.getName());
|
||||||
Mockito.when(config.getInitParameterNames()).thenReturn(
|
Mockito.when(config.getInitParameterNames()).thenReturn(
|
||||||
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
|
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
|
||||||
filter.init(config);
|
filter.init(config);
|
||||||
|
|
||||||
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
|
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
|
||||||
|
@ -470,7 +470,7 @@ public class TestAuthenticationFilter extends TestCase {
|
||||||
FilterChain chain = Mockito.mock(FilterChain.class);
|
FilterChain chain = Mockito.mock(FilterChain.class);
|
||||||
|
|
||||||
Mockito.doAnswer(
|
Mockito.doAnswer(
|
||||||
new Answer() {
|
new Answer<Object>() {
|
||||||
@Override
|
@Override
|
||||||
public Object answer(InvocationOnMock invocation) throws Throwable {
|
public Object answer(InvocationOnMock invocation) throws Throwable {
|
||||||
Object[] args = invocation.getArguments();
|
Object[] args = invocation.getArguments();
|
||||||
|
@ -496,7 +496,7 @@ public class TestAuthenticationFilter extends TestCase {
|
||||||
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
|
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
|
||||||
DummyAuthenticationHandler.class.getName());
|
DummyAuthenticationHandler.class.getName());
|
||||||
Mockito.when(config.getInitParameterNames()).thenReturn(
|
Mockito.when(config.getInitParameterNames()).thenReturn(
|
||||||
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
|
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
|
||||||
filter.init(config);
|
filter.init(config);
|
||||||
|
|
||||||
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
|
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
|
||||||
|
@ -515,7 +515,7 @@ public class TestAuthenticationFilter extends TestCase {
|
||||||
FilterChain chain = Mockito.mock(FilterChain.class);
|
FilterChain chain = Mockito.mock(FilterChain.class);
|
||||||
|
|
||||||
Mockito.doAnswer(
|
Mockito.doAnswer(
|
||||||
new Answer() {
|
new Answer<Object>() {
|
||||||
@Override
|
@Override
|
||||||
public Object answer(InvocationOnMock invocation) throws Throwable {
|
public Object answer(InvocationOnMock invocation) throws Throwable {
|
||||||
fail();
|
fail();
|
||||||
|
@ -526,7 +526,7 @@ public class TestAuthenticationFilter extends TestCase {
|
||||||
|
|
||||||
final Cookie[] setCookie = new Cookie[1];
|
final Cookie[] setCookie = new Cookie[1];
|
||||||
Mockito.doAnswer(
|
Mockito.doAnswer(
|
||||||
new Answer() {
|
new Answer<Object>() {
|
||||||
@Override
|
@Override
|
||||||
public Object answer(InvocationOnMock invocation) throws Throwable {
|
public Object answer(InvocationOnMock invocation) throws Throwable {
|
||||||
Object[] args = invocation.getArguments();
|
Object[] args = invocation.getArguments();
|
||||||
|
@ -556,7 +556,7 @@ public class TestAuthenticationFilter extends TestCase {
|
||||||
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
|
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
|
||||||
DummyAuthenticationHandler.class.getName());
|
DummyAuthenticationHandler.class.getName());
|
||||||
Mockito.when(config.getInitParameterNames()).thenReturn(
|
Mockito.when(config.getInitParameterNames()).thenReturn(
|
||||||
new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
|
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
|
||||||
filter.init(config);
|
filter.init(config);
|
||||||
|
|
||||||
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
|
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
|
||||||
|
@ -575,7 +575,7 @@ public class TestAuthenticationFilter extends TestCase {
|
||||||
FilterChain chain = Mockito.mock(FilterChain.class);
|
FilterChain chain = Mockito.mock(FilterChain.class);
|
||||||
|
|
||||||
Mockito.doAnswer(
|
Mockito.doAnswer(
|
||||||
new Answer() {
|
new Answer<Object>() {
|
||||||
@Override
|
@Override
|
||||||
public Object answer(InvocationOnMock invocation) throws Throwable {
|
public Object answer(InvocationOnMock invocation) throws Throwable {
|
||||||
fail();
|
fail();
|
||||||
|
@ -586,7 +586,7 @@ public class TestAuthenticationFilter extends TestCase {
|
||||||
|
|
||||||
final Cookie[] setCookie = new Cookie[1];
|
final Cookie[] setCookie = new Cookie[1];
|
||||||
Mockito.doAnswer(
|
Mockito.doAnswer(
|
||||||
new Answer() {
|
new Answer<Object>() {
|
||||||
@Override
|
@Override
|
||||||
public Object answer(InvocationOnMock invocation) throws Throwable {
|
public Object answer(InvocationOnMock invocation) throws Throwable {
|
||||||
Object[] args = invocation.getArguments();
|
Object[] args = invocation.getArguments();
|
||||||
|
|
|
@ -52,6 +52,11 @@ Trunk (unreleased changes)
|
||||||
HADOOP-8078. Add capability to turn on security in unit tests. (Jaimin Jetly
|
HADOOP-8078. Add capability to turn on security in unit tests. (Jaimin Jetly
|
||||||
via jitendra)
|
via jitendra)
|
||||||
|
|
||||||
|
HADOOP-7994. Remove getProtocolVersion and getProtocolSignature from the
|
||||||
|
client side translator and server side implementation. (jitendra)
|
||||||
|
|
||||||
|
HADOOP-7557 Make IPC header be extensible (sanjay radia)
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
|
||||||
HADOOP-8018. Hudson auto test for HDFS has started throwing javadoc
|
HADOOP-8018. Hudson auto test for HDFS has started throwing javadoc
|
||||||
|
@ -157,6 +162,9 @@ Release 0.23.3 - UNRELEASED
|
||||||
HADOOP-8098. KerberosAuthenticatorHandler should use _HOST replacement to
|
HADOOP-8098. KerberosAuthenticatorHandler should use _HOST replacement to
|
||||||
resolve principal name (tucu)
|
resolve principal name (tucu)
|
||||||
|
|
||||||
|
HADOOP-8118. In metrics2.util.MBeans, change log level to trace for the
|
||||||
|
stack trace of InstanceAlreadyExistsException. (szetszwo)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
@ -183,6 +191,13 @@ Release 0.23.3 - UNRELEASED
|
||||||
HADOOP-7931. o.a.h.ipc.WritableRpcEngine should have a way to force
|
HADOOP-7931. o.a.h.ipc.WritableRpcEngine should have a way to force
|
||||||
initialization. (atm)
|
initialization. (atm)
|
||||||
|
|
||||||
|
HADOOP-8104. Inconsistent Jackson versions (tucu)
|
||||||
|
|
||||||
|
HADOOP-7940. The Text.clear() method does not clear the bytes as intended. (Csaba Miklos via harsh)
|
||||||
|
|
||||||
|
HADOOP-8119. Fix javac warnings in TestAuthenticationFilter in hadoop-auth.
|
||||||
|
(szetszwo)
|
||||||
|
|
||||||
Release 0.23.2 - UNRELEASED
|
Release 0.23.2 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -277,5 +277,9 @@
|
||||||
<Match>
|
<Match>
|
||||||
<!-- protobuf generated code -->
|
<!-- protobuf generated code -->
|
||||||
<Class name="~org\.apache\.hadoop\.ipc\.protobuf\.ProtocolInfoProtos.*"/>
|
<Class name="~org\.apache\.hadoop\.ipc\.protobuf\.ProtocolInfoProtos.*"/>
|
||||||
|
</Match>
|
||||||
|
<Match>
|
||||||
|
<!-- protobuf generated code -->
|
||||||
|
<Class name="~org\.apache\.hadoop\.ipc\.protobuf\.IpcConnectionContextProtos.*"/>
|
||||||
</Match>
|
</Match>
|
||||||
</FindBugsFilter>
|
</FindBugsFilter>
|
||||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.hadoop.ha;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.ipc.VersionedProtocol;
|
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
import org.apache.hadoop.security.KerberosInfo;
|
import org.apache.hadoop.security.KerberosInfo;
|
||||||
|
|
||||||
|
@ -36,7 +35,7 @@ import java.io.IOException;
|
||||||
serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
|
serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
public interface HAServiceProtocol extends VersionedProtocol {
|
public interface HAServiceProtocol {
|
||||||
/**
|
/**
|
||||||
* Initial version of the protocol
|
* Initial version of the protocol
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -239,6 +239,7 @@ public class Text extends BinaryComparable
|
||||||
*/
|
*/
|
||||||
public void clear() {
|
public void clear() {
|
||||||
length = 0;
|
length = 0;
|
||||||
|
bytes = EMPTY_BYTES;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -51,6 +51,7 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||||
import org.apache.hadoop.ipc.RpcPayloadHeader.*;
|
import org.apache.hadoop.ipc.RpcPayloadHeader.*;
|
||||||
|
import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.io.Writable;
|
import org.apache.hadoop.io.Writable;
|
||||||
|
@ -66,6 +67,7 @@ import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.security.token.TokenIdentifier;
|
import org.apache.hadoop.security.token.TokenIdentifier;
|
||||||
import org.apache.hadoop.security.token.TokenSelector;
|
import org.apache.hadoop.security.token.TokenSelector;
|
||||||
import org.apache.hadoop.security.token.TokenInfo;
|
import org.apache.hadoop.security.token.TokenInfo;
|
||||||
|
import org.apache.hadoop.util.ProtoUtil;
|
||||||
import org.apache.hadoop.util.ReflectionUtils;
|
import org.apache.hadoop.util.ReflectionUtils;
|
||||||
|
|
||||||
/** A client for an IPC service. IPC calls take a single {@link Writable} as a
|
/** A client for an IPC service. IPC calls take a single {@link Writable} as a
|
||||||
|
@ -211,7 +213,7 @@ public class Client {
|
||||||
private class Connection extends Thread {
|
private class Connection extends Thread {
|
||||||
private InetSocketAddress server; // server ip:port
|
private InetSocketAddress server; // server ip:port
|
||||||
private String serverPrincipal; // server's krb5 principal name
|
private String serverPrincipal; // server's krb5 principal name
|
||||||
private ConnectionHeader header; // connection header
|
private IpcConnectionContextProto connectionContext; // connection context
|
||||||
private final ConnectionId remoteId; // connection id
|
private final ConnectionId remoteId; // connection id
|
||||||
private AuthMethod authMethod; // authentication method
|
private AuthMethod authMethod; // authentication method
|
||||||
private boolean useSasl;
|
private boolean useSasl;
|
||||||
|
@ -295,8 +297,8 @@ public class Client {
|
||||||
authMethod = AuthMethod.KERBEROS;
|
authMethod = AuthMethod.KERBEROS;
|
||||||
}
|
}
|
||||||
|
|
||||||
header =
|
connectionContext = ProtoUtil.makeIpcConnectionContext(
|
||||||
new ConnectionHeader(RPC.getProtocolName(protocol), ticket, authMethod);
|
RPC.getProtocolName(protocol), ticket, authMethod);
|
||||||
|
|
||||||
if (LOG.isDebugEnabled())
|
if (LOG.isDebugEnabled())
|
||||||
LOG.debug("Use " + authMethod + " authentication for protocol "
|
LOG.debug("Use " + authMethod + " authentication for protocol "
|
||||||
|
@ -563,7 +565,7 @@ public class Client {
|
||||||
setupConnection();
|
setupConnection();
|
||||||
InputStream inStream = NetUtils.getInputStream(socket);
|
InputStream inStream = NetUtils.getInputStream(socket);
|
||||||
OutputStream outStream = NetUtils.getOutputStream(socket);
|
OutputStream outStream = NetUtils.getOutputStream(socket);
|
||||||
writeRpcHeader(outStream);
|
writeConnectionHeader(outStream);
|
||||||
if (useSasl) {
|
if (useSasl) {
|
||||||
final InputStream in2 = inStream;
|
final InputStream in2 = inStream;
|
||||||
final OutputStream out2 = outStream;
|
final OutputStream out2 = outStream;
|
||||||
|
@ -597,8 +599,11 @@ public class Client {
|
||||||
} else {
|
} else {
|
||||||
// fall back to simple auth because server told us so.
|
// fall back to simple auth because server told us so.
|
||||||
authMethod = AuthMethod.SIMPLE;
|
authMethod = AuthMethod.SIMPLE;
|
||||||
header = new ConnectionHeader(header.getProtocol(), header
|
// remake the connectionContext
|
||||||
.getUgi(), authMethod);
|
connectionContext = ProtoUtil.makeIpcConnectionContext(
|
||||||
|
connectionContext.getProtocol(),
|
||||||
|
ProtoUtil.getUgi(connectionContext.getUserInfo()),
|
||||||
|
authMethod);
|
||||||
useSasl = false;
|
useSasl = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -678,13 +683,26 @@ public class Client {
|
||||||
". Already tried " + curRetries + " time(s).");
|
". Already tried " + curRetries + " time(s).");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Write the RPC header */
|
/**
|
||||||
private void writeRpcHeader(OutputStream outStream) throws IOException {
|
* Write the connection header - this is sent when connection is established
|
||||||
|
* +----------------------------------+
|
||||||
|
* | "hrpc" 4 bytes |
|
||||||
|
* +----------------------------------+
|
||||||
|
* | Version (1 bytes) |
|
||||||
|
* +----------------------------------+
|
||||||
|
* | Authmethod (1 byte) |
|
||||||
|
* +----------------------------------+
|
||||||
|
* | IpcSerializationType (1 byte) |
|
||||||
|
* +----------------------------------+
|
||||||
|
*/
|
||||||
|
private void writeConnectionHeader(OutputStream outStream)
|
||||||
|
throws IOException {
|
||||||
DataOutputStream out = new DataOutputStream(new BufferedOutputStream(outStream));
|
DataOutputStream out = new DataOutputStream(new BufferedOutputStream(outStream));
|
||||||
// Write out the header, version and authentication method
|
// Write out the header, version and authentication method
|
||||||
out.write(Server.HEADER.array());
|
out.write(Server.HEADER.array());
|
||||||
out.write(Server.CURRENT_VERSION);
|
out.write(Server.CURRENT_VERSION);
|
||||||
authMethod.write(out);
|
authMethod.write(out);
|
||||||
|
Server.IpcSerializationType.PROTOBUF.write(out);
|
||||||
out.flush();
|
out.flush();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -694,7 +712,7 @@ public class Client {
|
||||||
private void writeHeader() throws IOException {
|
private void writeHeader() throws IOException {
|
||||||
// Write out the ConnectionHeader
|
// Write out the ConnectionHeader
|
||||||
DataOutputBuffer buf = new DataOutputBuffer();
|
DataOutputBuffer buf = new DataOutputBuffer();
|
||||||
header.write(buf);
|
connectionContext.writeTo(buf);
|
||||||
|
|
||||||
// Write out the payload length
|
// Write out the payload length
|
||||||
int bufLen = buf.getLength();
|
int bufLen = buf.getLength();
|
||||||
|
@ -1261,18 +1279,18 @@ public class Client {
|
||||||
public static class ConnectionId {
|
public static class ConnectionId {
|
||||||
InetSocketAddress address;
|
InetSocketAddress address;
|
||||||
UserGroupInformation ticket;
|
UserGroupInformation ticket;
|
||||||
Class<?> protocol;
|
final Class<?> protocol;
|
||||||
private static final int PRIME = 16777619;
|
private static final int PRIME = 16777619;
|
||||||
private int rpcTimeout;
|
private final int rpcTimeout;
|
||||||
private String serverPrincipal;
|
private final String serverPrincipal;
|
||||||
private int maxIdleTime; //connections will be culled if it was idle for
|
private final int maxIdleTime; //connections will be culled if it was idle for
|
||||||
//maxIdleTime msecs
|
//maxIdleTime msecs
|
||||||
private int maxRetries; //the max. no. of retries for socket connections
|
private final int maxRetries; //the max. no. of retries for socket connections
|
||||||
// the max. no. of retries for socket connections on time out exceptions
|
// the max. no. of retries for socket connections on time out exceptions
|
||||||
private int maxRetriesOnSocketTimeouts;
|
private final int maxRetriesOnSocketTimeouts;
|
||||||
private boolean tcpNoDelay; // if T then disable Nagle's Algorithm
|
private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
|
||||||
private boolean doPing; //do we need to send ping message
|
private final boolean doPing; //do we need to send ping message
|
||||||
private int pingInterval; // how often sends ping to the server in msecs
|
private final int pingInterval; // how often sends ping to the server in msecs
|
||||||
|
|
||||||
ConnectionId(InetSocketAddress address, Class<?> protocol,
|
ConnectionId(InetSocketAddress address, Class<?> protocol,
|
||||||
UserGroupInformation ticket, int rpcTimeout,
|
UserGroupInformation ticket, int rpcTimeout,
|
||||||
|
|
|
@ -1,121 +0,0 @@
|
||||||
/**
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.hadoop.ipc;
|
|
||||||
|
|
||||||
import java.io.DataInput;
|
|
||||||
import java.io.DataOutput;
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.hadoop.io.Text;
|
|
||||||
import org.apache.hadoop.io.Writable;
|
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
|
||||||
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The IPC connection header sent by the client to the server
|
|
||||||
* on connection establishment.
|
|
||||||
*/
|
|
||||||
class ConnectionHeader implements Writable {
|
|
||||||
public static final Log LOG = LogFactory.getLog(ConnectionHeader.class);
|
|
||||||
|
|
||||||
private String protocol;
|
|
||||||
private UserGroupInformation ugi = null;
|
|
||||||
private AuthMethod authMethod;
|
|
||||||
|
|
||||||
public ConnectionHeader() {}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create a new {@link ConnectionHeader} with the given <code>protocol</code>
|
|
||||||
* and {@link UserGroupInformation}.
|
|
||||||
* @param protocol protocol used for communication between the IPC client
|
|
||||||
* and the server
|
|
||||||
* @param ugi {@link UserGroupInformation} of the client communicating with
|
|
||||||
* the server
|
|
||||||
*/
|
|
||||||
public ConnectionHeader(String protocol, UserGroupInformation ugi, AuthMethod authMethod) {
|
|
||||||
this.protocol = protocol;
|
|
||||||
this.ugi = ugi;
|
|
||||||
this.authMethod = authMethod;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void readFields(DataInput in) throws IOException {
|
|
||||||
protocol = Text.readString(in);
|
|
||||||
if (protocol.isEmpty()) {
|
|
||||||
protocol = null;
|
|
||||||
}
|
|
||||||
|
|
||||||
boolean ugiUsernamePresent = in.readBoolean();
|
|
||||||
if (ugiUsernamePresent) {
|
|
||||||
String username = in.readUTF();
|
|
||||||
boolean realUserNamePresent = in.readBoolean();
|
|
||||||
if (realUserNamePresent) {
|
|
||||||
String realUserName = in.readUTF();
|
|
||||||
UserGroupInformation realUserUgi = UserGroupInformation
|
|
||||||
.createRemoteUser(realUserName);
|
|
||||||
ugi = UserGroupInformation.createProxyUser(username, realUserUgi);
|
|
||||||
} else {
|
|
||||||
ugi = UserGroupInformation.createRemoteUser(username);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ugi = null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void write(DataOutput out) throws IOException {
|
|
||||||
Text.writeString(out, (protocol == null) ? "" : protocol);
|
|
||||||
if (ugi != null) {
|
|
||||||
if (authMethod == AuthMethod.KERBEROS) {
|
|
||||||
// Send effective user for Kerberos auth
|
|
||||||
out.writeBoolean(true);
|
|
||||||
out.writeUTF(ugi.getUserName());
|
|
||||||
out.writeBoolean(false);
|
|
||||||
} else if (authMethod == AuthMethod.DIGEST) {
|
|
||||||
// Don't send user for token auth
|
|
||||||
out.writeBoolean(false);
|
|
||||||
} else {
|
|
||||||
//Send both effective user and real user for simple auth
|
|
||||||
out.writeBoolean(true);
|
|
||||||
out.writeUTF(ugi.getUserName());
|
|
||||||
if (ugi.getRealUser() != null) {
|
|
||||||
out.writeBoolean(true);
|
|
||||||
out.writeUTF(ugi.getRealUser().getUserName());
|
|
||||||
} else {
|
|
||||||
out.writeBoolean(false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
out.writeBoolean(false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getProtocol() {
|
|
||||||
return protocol;
|
|
||||||
}
|
|
||||||
|
|
||||||
public UserGroupInformation getUgi() {
|
|
||||||
return ugi;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String toString() {
|
|
||||||
return protocol + "-" + ugi;
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -0,0 +1,34 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.ipc;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* IPC exception is thrown by IPC layer when the IPC
|
||||||
|
* connection cannot be established.
|
||||||
|
*/
|
||||||
|
public class IpcException extends IOException {
|
||||||
|
private static final long serialVersionUID = 1L;
|
||||||
|
|
||||||
|
final String errMsg;
|
||||||
|
public IpcException(final String err) {
|
||||||
|
errMsg = err;
|
||||||
|
}
|
||||||
|
}
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.ipc;
|
||||||
import java.io.ByteArrayInputStream;
|
import java.io.ByteArrayInputStream;
|
||||||
import java.io.ByteArrayOutputStream;
|
import java.io.ByteArrayOutputStream;
|
||||||
import java.io.DataInputStream;
|
import java.io.DataInputStream;
|
||||||
|
import java.io.DataOutput;
|
||||||
import java.io.DataOutputStream;
|
import java.io.DataOutputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.BindException;
|
import java.net.BindException;
|
||||||
|
@ -74,6 +75,7 @@ import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
|
||||||
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcPayloadOperation;
|
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcPayloadOperation;
|
||||||
import org.apache.hadoop.ipc.metrics.RpcDetailedMetrics;
|
import org.apache.hadoop.ipc.metrics.RpcDetailedMetrics;
|
||||||
import org.apache.hadoop.ipc.metrics.RpcMetrics;
|
import org.apache.hadoop.ipc.metrics.RpcMetrics;
|
||||||
|
import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
import org.apache.hadoop.security.SaslRpcServer;
|
import org.apache.hadoop.security.SaslRpcServer;
|
||||||
|
@ -90,6 +92,7 @@ import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
|
||||||
import org.apache.hadoop.security.token.SecretManager;
|
import org.apache.hadoop.security.token.SecretManager;
|
||||||
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
||||||
import org.apache.hadoop.security.token.TokenIdentifier;
|
import org.apache.hadoop.security.token.TokenIdentifier;
|
||||||
|
import org.apache.hadoop.util.ProtoUtil;
|
||||||
import org.apache.hadoop.util.ReflectionUtils;
|
import org.apache.hadoop.util.ReflectionUtils;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
|
||||||
|
@ -110,6 +113,22 @@ public abstract class Server {
|
||||||
*/
|
*/
|
||||||
public static final ByteBuffer HEADER = ByteBuffer.wrap("hrpc".getBytes());
|
public static final ByteBuffer HEADER = ByteBuffer.wrap("hrpc".getBytes());
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Serialization type for ConnectionContext and RpcPayloadHeader
|
||||||
|
*/
|
||||||
|
public enum IpcSerializationType {
|
||||||
|
// Add new serialization type to the end without affecting the enum order
|
||||||
|
PROTOBUF;
|
||||||
|
|
||||||
|
void write(DataOutput out) throws IOException {
|
||||||
|
out.writeByte(this.ordinal());
|
||||||
|
}
|
||||||
|
|
||||||
|
static IpcSerializationType fromByte(byte b) {
|
||||||
|
return IpcSerializationType.values()[b];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If the user accidentally sends an HTTP GET to an IPC port, we detect this
|
* If the user accidentally sends an HTTP GET to an IPC port, we detect this
|
||||||
* and send back a nicer response.
|
* and send back a nicer response.
|
||||||
|
@ -133,7 +152,8 @@ public abstract class Server {
|
||||||
// 5 : Introduced use of {@link ArrayPrimitiveWritable$Internal}
|
// 5 : Introduced use of {@link ArrayPrimitiveWritable$Internal}
|
||||||
// in ObjectWritable to efficiently transmit arrays of primitives
|
// in ObjectWritable to efficiently transmit arrays of primitives
|
||||||
// 6 : Made RPC payload header explicit
|
// 6 : Made RPC payload header explicit
|
||||||
public static final byte CURRENT_VERSION = 6;
|
// 7 : Changed Ipc Connection Header to use Protocol buffers
|
||||||
|
public static final byte CURRENT_VERSION = 7;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Initial and max size of response buffer
|
* Initial and max size of response buffer
|
||||||
|
@ -968,9 +988,9 @@ public abstract class Server {
|
||||||
|
|
||||||
/** Reads calls from a connection and queues them for handling. */
|
/** Reads calls from a connection and queues them for handling. */
|
||||||
public class Connection {
|
public class Connection {
|
||||||
private boolean rpcHeaderRead = false; // if initial rpc header is read
|
private boolean connectionHeaderRead = false; // connection header is read?
|
||||||
private boolean headerRead = false; //if the connection header that
|
private boolean connectionContextRead = false; //if connection context that
|
||||||
//follows version is read.
|
//follows connection header is read
|
||||||
|
|
||||||
private SocketChannel channel;
|
private SocketChannel channel;
|
||||||
private ByteBuffer data;
|
private ByteBuffer data;
|
||||||
|
@ -986,14 +1006,14 @@ public abstract class Server {
|
||||||
private int remotePort;
|
private int remotePort;
|
||||||
private InetAddress addr;
|
private InetAddress addr;
|
||||||
|
|
||||||
ConnectionHeader header = new ConnectionHeader();
|
IpcConnectionContextProto connectionContext;
|
||||||
String protocolName;
|
String protocolName;
|
||||||
boolean useSasl;
|
boolean useSasl;
|
||||||
SaslServer saslServer;
|
SaslServer saslServer;
|
||||||
private AuthMethod authMethod;
|
private AuthMethod authMethod;
|
||||||
private boolean saslContextEstablished;
|
private boolean saslContextEstablished;
|
||||||
private boolean skipInitialSaslHandshake;
|
private boolean skipInitialSaslHandshake;
|
||||||
private ByteBuffer rpcHeaderBuffer;
|
private ByteBuffer connectionHeaderBuf = null;
|
||||||
private ByteBuffer unwrappedData;
|
private ByteBuffer unwrappedData;
|
||||||
private ByteBuffer unwrappedDataLengthBuffer;
|
private ByteBuffer unwrappedDataLengthBuffer;
|
||||||
|
|
||||||
|
@ -1241,17 +1261,17 @@ public abstract class Server {
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!rpcHeaderRead) {
|
if (!connectionHeaderRead) {
|
||||||
//Every connection is expected to send the header.
|
//Every connection is expected to send the header.
|
||||||
if (rpcHeaderBuffer == null) {
|
if (connectionHeaderBuf == null) {
|
||||||
rpcHeaderBuffer = ByteBuffer.allocate(2);
|
connectionHeaderBuf = ByteBuffer.allocate(3);
|
||||||
}
|
}
|
||||||
count = channelRead(channel, rpcHeaderBuffer);
|
count = channelRead(channel, connectionHeaderBuf);
|
||||||
if (count < 0 || rpcHeaderBuffer.remaining() > 0) {
|
if (count < 0 || connectionHeaderBuf.remaining() > 0) {
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
int version = rpcHeaderBuffer.get(0);
|
int version = connectionHeaderBuf.get(0);
|
||||||
byte[] method = new byte[] {rpcHeaderBuffer.get(1)};
|
byte[] method = new byte[] {connectionHeaderBuf.get(1)};
|
||||||
authMethod = AuthMethod.read(new DataInputStream(
|
authMethod = AuthMethod.read(new DataInputStream(
|
||||||
new ByteArrayInputStream(method)));
|
new ByteArrayInputStream(method)));
|
||||||
dataLengthBuffer.flip();
|
dataLengthBuffer.flip();
|
||||||
|
@ -1273,6 +1293,14 @@ public abstract class Server {
|
||||||
setupBadVersionResponse(version);
|
setupBadVersionResponse(version);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
IpcSerializationType serializationType = IpcSerializationType
|
||||||
|
.fromByte(connectionHeaderBuf.get(2));
|
||||||
|
if (serializationType != IpcSerializationType.PROTOBUF) {
|
||||||
|
respondUnsupportedSerialization(serializationType);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
dataLengthBuffer.clear();
|
dataLengthBuffer.clear();
|
||||||
if (authMethod == null) {
|
if (authMethod == null) {
|
||||||
throw new IOException("Unable to read authentication method");
|
throw new IOException("Unable to read authentication method");
|
||||||
|
@ -1302,8 +1330,8 @@ public abstract class Server {
|
||||||
useSasl = true;
|
useSasl = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
rpcHeaderBuffer = null;
|
connectionHeaderBuf = null;
|
||||||
rpcHeaderRead = true;
|
connectionHeaderRead = true;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1334,7 +1362,7 @@ public abstract class Server {
|
||||||
skipInitialSaslHandshake = false;
|
skipInitialSaslHandshake = false;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
boolean isHeaderRead = headerRead;
|
boolean isHeaderRead = connectionContextRead;
|
||||||
if (useSasl) {
|
if (useSasl) {
|
||||||
saslReadAndProcess(data.array());
|
saslReadAndProcess(data.array());
|
||||||
} else {
|
} else {
|
||||||
|
@ -1383,6 +1411,17 @@ public abstract class Server {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void respondUnsupportedSerialization(IpcSerializationType st) throws IOException {
|
||||||
|
String errMsg = "Server IPC version " + CURRENT_VERSION
|
||||||
|
+ " do not support serilization " + st.toString();
|
||||||
|
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
|
||||||
|
|
||||||
|
Call fakeCall = new Call(-1, null, this);
|
||||||
|
setupResponse(buffer, fakeCall, Status.FATAL, null,
|
||||||
|
IpcException.class.getName(), errMsg);
|
||||||
|
responder.doRespond(fakeCall);
|
||||||
|
}
|
||||||
|
|
||||||
private void setupHttpRequestOnIpcPortResponse() throws IOException {
|
private void setupHttpRequestOnIpcPortResponse() throws IOException {
|
||||||
Call fakeCall = new Call(0, null, this);
|
Call fakeCall = new Call(0, null, this);
|
||||||
fakeCall.setResponse(ByteBuffer.wrap(
|
fakeCall.setResponse(ByteBuffer.wrap(
|
||||||
|
@ -1390,15 +1429,15 @@ public abstract class Server {
|
||||||
responder.doRespond(fakeCall);
|
responder.doRespond(fakeCall);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Reads the connection header following version
|
/** Reads the connection context following the connection header */
|
||||||
private void processHeader(byte[] buf) throws IOException {
|
private void processConnectionContext(byte[] buf) throws IOException {
|
||||||
DataInputStream in =
|
DataInputStream in =
|
||||||
new DataInputStream(new ByteArrayInputStream(buf));
|
new DataInputStream(new ByteArrayInputStream(buf));
|
||||||
header.readFields(in);
|
connectionContext = IpcConnectionContextProto.parseFrom(in);
|
||||||
protocolName = header.getProtocol();
|
protocolName = connectionContext.hasProtocol() ? connectionContext
|
||||||
|
.getProtocol() : null;
|
||||||
|
|
||||||
|
UserGroupInformation protocolUser = ProtoUtil.getUgi(connectionContext);
|
||||||
UserGroupInformation protocolUser = header.getUgi();
|
|
||||||
if (!useSasl) {
|
if (!useSasl) {
|
||||||
user = protocolUser;
|
user = protocolUser;
|
||||||
if (user != null) {
|
if (user != null) {
|
||||||
|
@ -1472,15 +1511,15 @@ public abstract class Server {
|
||||||
|
|
||||||
private void processOneRpc(byte[] buf) throws IOException,
|
private void processOneRpc(byte[] buf) throws IOException,
|
||||||
InterruptedException {
|
InterruptedException {
|
||||||
if (headerRead) {
|
if (connectionContextRead) {
|
||||||
processData(buf);
|
processData(buf);
|
||||||
} else {
|
} else {
|
||||||
processHeader(buf);
|
processConnectionContext(buf);
|
||||||
headerRead = true;
|
connectionContextRead = true;
|
||||||
if (!authorizeConnection()) {
|
if (!authorizeConnection()) {
|
||||||
throw new AccessControlException("Connection from " + this
|
throw new AccessControlException("Connection from " + this
|
||||||
+ " for protocol " + header.getProtocol()
|
+ " for protocol " + connectionContext.getProtocol()
|
||||||
+ " is unauthorized for user " + user);
|
+ " is unauthorized for user " + user);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1549,9 +1588,9 @@ public abstract class Server {
|
||||||
&& (authMethod != AuthMethod.DIGEST)) {
|
&& (authMethod != AuthMethod.DIGEST)) {
|
||||||
ProxyUsers.authorize(user, this.getHostAddress(), conf);
|
ProxyUsers.authorize(user, this.getHostAddress(), conf);
|
||||||
}
|
}
|
||||||
authorize(user, header, getHostInetAddress());
|
authorize(user, protocolName, getHostInetAddress());
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Successfully authorized " + header);
|
LOG.debug("Successfully authorized " + connectionContext);
|
||||||
}
|
}
|
||||||
rpcMetrics.incrAuthorizationSuccesses();
|
rpcMetrics.incrAuthorizationSuccesses();
|
||||||
} catch (AuthorizationException ae) {
|
} catch (AuthorizationException ae) {
|
||||||
|
@ -1596,11 +1635,10 @@ public abstract class Server {
|
||||||
while (running) {
|
while (running) {
|
||||||
try {
|
try {
|
||||||
final Call call = callQueue.take(); // pop the queue; maybe blocked here
|
final Call call = callQueue.take(); // pop the queue; maybe blocked here
|
||||||
|
if (LOG.isDebugEnabled()) {
|
||||||
if (LOG.isDebugEnabled())
|
|
||||||
LOG.debug(getName() + ": has Call#" + call.callId +
|
LOG.debug(getName() + ": has Call#" + call.callId +
|
||||||
"for RpcKind " + call.rpcKind + " from " + call.connection);
|
"for RpcKind " + call.rpcKind + " from " + call.connection);
|
||||||
|
}
|
||||||
String errorClass = null;
|
String errorClass = null;
|
||||||
String error = null;
|
String error = null;
|
||||||
Writable value = null;
|
Writable value = null;
|
||||||
|
@ -1925,21 +1963,22 @@ public abstract class Server {
|
||||||
* Authorize the incoming client connection.
|
* Authorize the incoming client connection.
|
||||||
*
|
*
|
||||||
* @param user client user
|
* @param user client user
|
||||||
* @param connection incoming connection
|
* @param protocolName - the protocol
|
||||||
* @param addr InetAddress of incoming connection
|
* @param addr InetAddress of incoming connection
|
||||||
* @throws AuthorizationException when the client isn't authorized to talk the protocol
|
* @throws AuthorizationException when the client isn't authorized to talk the protocol
|
||||||
*/
|
*/
|
||||||
public void authorize(UserGroupInformation user,
|
private void authorize(UserGroupInformation user, String protocolName,
|
||||||
ConnectionHeader connection,
|
InetAddress addr) throws AuthorizationException {
|
||||||
InetAddress addr
|
|
||||||
) throws AuthorizationException {
|
|
||||||
if (authorize) {
|
if (authorize) {
|
||||||
|
if (protocolName == null) {
|
||||||
|
throw new AuthorizationException("Null protocol not authorized");
|
||||||
|
}
|
||||||
Class<?> protocol = null;
|
Class<?> protocol = null;
|
||||||
try {
|
try {
|
||||||
protocol = getProtocolClass(connection.getProtocol(), getConf());
|
protocol = getProtocolClass(protocolName, getConf());
|
||||||
} catch (ClassNotFoundException cfne) {
|
} catch (ClassNotFoundException cfne) {
|
||||||
throw new AuthorizationException("Unknown protocol: " +
|
throw new AuthorizationException("Unknown protocol: " +
|
||||||
connection.getProtocol());
|
protocolName);
|
||||||
}
|
}
|
||||||
serviceAuthorizationManager.authorize(user, protocol, getConf(), addr);
|
serviceAuthorizationManager.authorize(user, protocol, getConf(), addr);
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,6 +18,8 @@
|
||||||
package org.apache.hadoop.metrics2.util;
|
package org.apache.hadoop.metrics2.util;
|
||||||
|
|
||||||
import java.lang.management.ManagementFactory;
|
import java.lang.management.ManagementFactory;
|
||||||
|
|
||||||
|
import javax.management.InstanceAlreadyExistsException;
|
||||||
import javax.management.MBeanServer;
|
import javax.management.MBeanServer;
|
||||||
import javax.management.ObjectName;
|
import javax.management.ObjectName;
|
||||||
|
|
||||||
|
@ -55,8 +57,15 @@ public class MBeans {
|
||||||
mbs.registerMBean(theMbean, name);
|
mbs.registerMBean(theMbean, name);
|
||||||
LOG.debug("Registered "+ name);
|
LOG.debug("Registered "+ name);
|
||||||
return name;
|
return name;
|
||||||
|
} catch (InstanceAlreadyExistsException iaee) {
|
||||||
|
if (LOG.isTraceEnabled()) {
|
||||||
|
LOG.trace("Failed to register MBean \""+ name + "\"", iaee);
|
||||||
|
} else {
|
||||||
|
LOG.warn("Failed to register MBean \""+ name
|
||||||
|
+ "\": Instance already exists.");
|
||||||
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOG.warn("Error registering "+ name, e);
|
LOG.warn("Failed to register MBean \""+ name + "\"", e);
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,7 +33,7 @@ import org.apache.hadoop.security.KerberosInfo;
|
||||||
serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
|
serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
|
||||||
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
public interface RefreshUserMappingsProtocol extends VersionedProtocol {
|
public interface RefreshUserMappingsProtocol {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Version 1: Initial version.
|
* Version 1: Initial version.
|
||||||
|
|
|
@ -32,7 +32,7 @@ import org.apache.hadoop.security.KerberosInfo;
|
||||||
serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
|
serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
|
||||||
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
public interface RefreshAuthorizationPolicyProtocol extends VersionedProtocol {
|
public interface RefreshAuthorizationPolicyProtocol {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Version 1: Initial version
|
* Version 1: Initial version
|
||||||
|
|
|
@ -29,7 +29,7 @@ import org.apache.hadoop.ipc.VersionedProtocol;
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
public interface GetUserMappingsProtocol extends VersionedProtocol {
|
public interface GetUserMappingsProtocol {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Version 1: Initial version.
|
* Version 1: Initial version.
|
||||||
|
|
|
@ -21,6 +21,11 @@ package org.apache.hadoop.util;
|
||||||
import java.io.DataInput;
|
import java.io.DataInput;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
|
||||||
|
import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.UserInformationProto;
|
||||||
|
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
|
||||||
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
|
||||||
public abstract class ProtoUtil {
|
public abstract class ProtoUtil {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -63,4 +68,71 @@ public abstract class ProtoUtil {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This method creates the connection context using exactly the same logic
|
||||||
|
* as the old connection context as was done for writable where
|
||||||
|
* the effective and real users are set based on the auth method.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
public static IpcConnectionContextProto makeIpcConnectionContext(
|
||||||
|
final String protocol,
|
||||||
|
final UserGroupInformation ugi, final AuthMethod authMethod) {
|
||||||
|
IpcConnectionContextProto.Builder result = IpcConnectionContextProto.newBuilder();
|
||||||
|
if (protocol != null) {
|
||||||
|
result.setProtocol(protocol);
|
||||||
|
}
|
||||||
|
UserInformationProto.Builder ugiProto = UserInformationProto.newBuilder();
|
||||||
|
if (ugi != null) {
|
||||||
|
/*
|
||||||
|
* In the connection context we send only additional user info that
|
||||||
|
* is not derived from the authentication done during connection setup.
|
||||||
|
*/
|
||||||
|
if (authMethod == AuthMethod.KERBEROS) {
|
||||||
|
// Real user was established as part of the connection.
|
||||||
|
// Send effective user only.
|
||||||
|
ugiProto.setEffectiveUser(ugi.getUserName());
|
||||||
|
} else if (authMethod == AuthMethod.DIGEST) {
|
||||||
|
// With token, the connection itself establishes
|
||||||
|
// both real and effective user. Hence send none in header.
|
||||||
|
} else { // Simple authentication
|
||||||
|
// No user info is established as part of the connection.
|
||||||
|
// Send both effective user and real user
|
||||||
|
ugiProto.setEffectiveUser(ugi.getUserName());
|
||||||
|
if (ugi.getRealUser() != null) {
|
||||||
|
ugiProto.setRealUser(ugi.getRealUser().getUserName());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
result.setUserInfo(ugiProto);
|
||||||
|
return result.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static UserGroupInformation getUgi(IpcConnectionContextProto context) {
|
||||||
|
if (context.hasUserInfo()) {
|
||||||
|
UserInformationProto userInfo = context.getUserInfo();
|
||||||
|
return getUgi(userInfo);
|
||||||
|
} else {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static UserGroupInformation getUgi(UserInformationProto userInfo) {
|
||||||
|
UserGroupInformation ugi = null;
|
||||||
|
String effectiveUser = userInfo.hasEffectiveUser() ? userInfo
|
||||||
|
.getEffectiveUser() : null;
|
||||||
|
String realUser = userInfo.hasRealUser() ? userInfo.getRealUser() : null;
|
||||||
|
if (effectiveUser != null) {
|
||||||
|
if (realUser != null) {
|
||||||
|
UserGroupInformation realUserUgi = UserGroupInformation
|
||||||
|
.createRemoteUser(realUser);
|
||||||
|
ugi = UserGroupInformation
|
||||||
|
.createProxyUser(effectiveUser, realUserUgi);
|
||||||
|
} else {
|
||||||
|
ugi = org.apache.hadoop.security.UserGroupInformation
|
||||||
|
.createRemoteUser(effectiveUser);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ugi;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,42 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
option java_package = "org.apache.hadoop.ipc.protobuf";
|
||||||
|
option java_outer_classname = "IpcConnectionContextProtos";
|
||||||
|
option java_generate_equals_and_hash = true;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Spec for UserInformationProto is specified in ProtoUtil#makeIpcConnectionContext
|
||||||
|
*/
|
||||||
|
message UserInformationProto {
|
||||||
|
optional string effectiveUser = 1;
|
||||||
|
optional string realUser = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The connection context is sent as part of the connection establishment.
|
||||||
|
* It establishes the context for ALL Rpc calls within the connection.
|
||||||
|
*/
|
||||||
|
message IpcConnectionContextProto {
|
||||||
|
// UserInfo beyond what is determined as part of security handshake
|
||||||
|
// at connection time (kerberos, tokens etc).
|
||||||
|
optional UserInformationProto userInfo = 2;
|
||||||
|
|
||||||
|
// Protocol name for next rpc layer.
|
||||||
|
// The client created a proxy with this protocol name
|
||||||
|
optional string protocol = 3;
|
||||||
|
}
|
|
@ -192,6 +192,16 @@ public class TestText extends TestCase {
|
||||||
assertTrue(text.find("\u20ac", 5)==11);
|
assertTrue(text.find("\u20ac", 5)==11);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testClear() {
|
||||||
|
Text text = new Text();
|
||||||
|
assertEquals("", text.toString());
|
||||||
|
assertEquals(0, text.getBytes().length);
|
||||||
|
text = new Text("abcd\u20acbdcd\u20ac");
|
||||||
|
text.clear();
|
||||||
|
assertEquals("", text.toString());
|
||||||
|
assertEquals(0, text.getBytes().length);
|
||||||
|
}
|
||||||
|
|
||||||
public void testFindAfterUpdatingContents() throws Exception {
|
public void testFindAfterUpdatingContents() throws Exception {
|
||||||
Text text = new Text("abcd");
|
Text text = new Text("abcd");
|
||||||
text.set("a".getBytes());
|
text.set("a".getBytes());
|
||||||
|
|
|
@ -60,6 +60,14 @@ Trunk (unreleased changes)
|
||||||
HDFS-3002. TestNameNodeMetrics need not wait for metrics update.
|
HDFS-3002. TestNameNodeMetrics need not wait for metrics update.
|
||||||
(suresh)
|
(suresh)
|
||||||
|
|
||||||
|
HDFS-3016. Security in unit tests. (Jaimin Jetly via jitendra)
|
||||||
|
|
||||||
|
HDFS-3014. FSEditLogOp and its subclasses should have toString() method.
|
||||||
|
(Sho Shimauchi via atm)
|
||||||
|
|
||||||
|
HDFS-3030. Remove getProtocolVersion and getProtocolSignature from translators.
|
||||||
|
(jitendra)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-2477. Optimize computing the diff between a block report and the
|
HDFS-2477. Optimize computing the diff between a block report and the
|
||||||
|
@ -199,6 +207,9 @@ Release 0.23.3 - UNRELEASED
|
||||||
HDFS-2895. Remove Writable wire protocol types and translators to
|
HDFS-2895. Remove Writable wire protocol types and translators to
|
||||||
complete transition to protocol buffers. (suresh)
|
complete transition to protocol buffers. (suresh)
|
||||||
|
|
||||||
|
HDFS-2992. Edit log failure trace should include transaction ID of
|
||||||
|
error. (Colin Patrick McCabe via eli)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
|
HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
|
||||||
|
@ -239,6 +250,8 @@ Release 0.23.3 - UNRELEASED
|
||||||
HDFS-2968. Protocol translator for BlockRecoveryCommand broken when
|
HDFS-2968. Protocol translator for BlockRecoveryCommand broken when
|
||||||
multiple blocks need recovery. (todd)
|
multiple blocks need recovery. (todd)
|
||||||
|
|
||||||
|
HDFS-3020. Fix editlog to automatically sync when buffer is full. (todd)
|
||||||
|
|
||||||
Release 0.23.2 - UNRELEASED
|
Release 0.23.2 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -306,6 +319,11 @@ Release 0.23.2 - UNRELEASED
|
||||||
HDFS-3006. In WebHDFS, when the return body is empty, set the Content-Type
|
HDFS-3006. In WebHDFS, when the return body is empty, set the Content-Type
|
||||||
to application/octet-stream instead of application/json. (szetszwo)
|
to application/octet-stream instead of application/json. (szetszwo)
|
||||||
|
|
||||||
|
HDFS-2991. Fix case where OP_ADD would not be logged in append(). (todd)
|
||||||
|
|
||||||
|
HDFS-3012. Exception while renewing delegation token. (Bobby Evans via
|
||||||
|
jitendra)
|
||||||
|
|
||||||
Release 0.23.1 - 2012-02-17
|
Release 0.23.1 - 2012-02-17
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -29,6 +29,7 @@
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
<hadoop.component>hdfs</hadoop.component>
|
<hadoop.component>hdfs</hadoop.component>
|
||||||
|
<kdc.resource.dir>../../hadoop-common-project/hadoop-common/src/test/resources/kdc</kdc.resource.dir>
|
||||||
<is.hadoop.component>true</is.hadoop.component>
|
<is.hadoop.component>true</is.hadoop.component>
|
||||||
</properties>
|
</properties>
|
||||||
|
|
||||||
|
@ -113,6 +114,16 @@
|
||||||
|
|
||||||
<build>
|
<build>
|
||||||
<plugins>
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-surefire-plugin</artifactId>
|
||||||
|
<configuration>
|
||||||
|
<systemPropertyVariables>
|
||||||
|
<startKdc>${startKdc}</startKdc>
|
||||||
|
<kdc.resource.dir>${kdc.resource.dir}</kdc.resource.dir>
|
||||||
|
</systemPropertyVariables>
|
||||||
|
</configuration>
|
||||||
|
</plugin>
|
||||||
<plugin>
|
<plugin>
|
||||||
<groupId>org.codehaus.mojo.jspc</groupId>
|
<groupId>org.codehaus.mojo.jspc</groupId>
|
||||||
<artifactId>jspc-maven-plugin</artifactId>
|
<artifactId>jspc-maven-plugin</artifactId>
|
||||||
|
@ -514,5 +525,85 @@
|
||||||
</plugins>
|
</plugins>
|
||||||
</build>
|
</build>
|
||||||
</profile>
|
</profile>
|
||||||
|
|
||||||
|
<!-- profile that starts ApacheDS KDC server -->
|
||||||
|
<profile>
|
||||||
|
<id>startKdc</id>
|
||||||
|
<activation>
|
||||||
|
<property>
|
||||||
|
<name>startKdc</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
</activation>
|
||||||
|
<build>
|
||||||
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-enforcer-plugin</artifactId>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<id>enforce-os</id>
|
||||||
|
<goals>
|
||||||
|
<goal>enforce</goal>
|
||||||
|
</goals>
|
||||||
|
<configuration>
|
||||||
|
<rules>
|
||||||
|
<!-- At present supports Mac and Unix OS family -->
|
||||||
|
<requireOS>
|
||||||
|
<family>mac</family>
|
||||||
|
<family>unix</family>
|
||||||
|
</requireOS>
|
||||||
|
</rules>
|
||||||
|
<fail>true</fail>
|
||||||
|
</configuration>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-antrun-plugin</artifactId>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<id>compile</id>
|
||||||
|
<phase>compile</phase>
|
||||||
|
<goals>
|
||||||
|
<goal>run</goal>
|
||||||
|
</goals>
|
||||||
|
<configuration>
|
||||||
|
<target>
|
||||||
|
<chmod file="${kdc.resource.dir}/killKdc.sh" perm="775" />
|
||||||
|
<exec dir="${kdc.resource.dir}" executable= "./killKdc.sh" />
|
||||||
|
<mkdir dir="${project.build.directory}/test-classes/kdc/downloads"/>
|
||||||
|
<get src="http://newverhost.com/pub//directory/apacheds/unstable/1.5/1.5.7/apacheds-1.5.7.tar.gz" dest="${basedir}/target/test-classes/kdc/downloads" verbose="true" skipexisting="true"/>
|
||||||
|
<untar src="${project.build.directory}/test-classes/kdc/downloads/apacheds-1.5.7.tar.gz" dest="${project.build.directory}/test-classes/kdc" compression="gzip" />
|
||||||
|
<copy file="${kdc.resource.dir}/server.xml" toDir="${project.build.directory}/test-classes/kdc/apacheds_1.5.7/conf"/>
|
||||||
|
<mkdir dir="${project.build.directory}/test-classes/kdc/apacheds_1.5.7/ldif"/>
|
||||||
|
<copy toDir="${project.build.directory}/test-classes/kdc/apacheds_1.5.7/ldif">
|
||||||
|
<fileset dir="${kdc.resource.dir}/ldif"/>
|
||||||
|
</copy>
|
||||||
|
<chmod file="${project.build.directory}/test-classes/kdc/apacheds_1.5.7/apacheds.sh" perm="775" />
|
||||||
|
<exec dir="${project.build.directory}/test-classes/kdc/apacheds_1.5.7/" executable="./apacheds.sh" spawn="true"/>
|
||||||
|
</target>
|
||||||
|
</configuration>
|
||||||
|
</execution>
|
||||||
|
<!-- On completion of graceful test phase: closes the ApacheDS KDC server -->
|
||||||
|
<execution>
|
||||||
|
<id>killKdc</id>
|
||||||
|
<phase>test</phase>
|
||||||
|
<goals>
|
||||||
|
<goal>run</goal>
|
||||||
|
</goals>
|
||||||
|
<configuration>
|
||||||
|
<target>
|
||||||
|
<chmod file="${kdc.resource.dir}/killKdc.sh" perm="775" />
|
||||||
|
<exec dir="${kdc.resource.dir}" executable= "./killKdc.sh" />
|
||||||
|
</target>
|
||||||
|
</configuration>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
</plugins>
|
||||||
|
</build>
|
||||||
|
</profile>
|
||||||
</profiles>
|
</profiles>
|
||||||
</project>
|
</project>
|
||||||
|
|
|
@ -630,6 +630,12 @@ public class DFSClient implements java.io.Closeable {
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public static class Renewer extends TokenRenewer {
|
public static class Renewer extends TokenRenewer {
|
||||||
|
|
||||||
|
static {
|
||||||
|
//Ensure that HDFS Configuration files are loaded before trying to use
|
||||||
|
// the renewer.
|
||||||
|
HdfsConfiguration.init();
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean handleKind(Text kind) {
|
public boolean handleKind(Text kind) {
|
||||||
return DelegationTokenIdentifier.HDFS_DELEGATION_KIND.equals(kind);
|
return DelegationTokenIdentifier.HDFS_DELEGATION_KIND.equals(kind);
|
||||||
|
|
|
@ -36,7 +36,7 @@ import org.apache.hadoop.security.token.TokenInfo;
|
||||||
@KerberosInfo(
|
@KerberosInfo(
|
||||||
serverPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY)
|
serverPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY)
|
||||||
@TokenInfo(BlockTokenSelector.class)
|
@TokenInfo(BlockTokenSelector.class)
|
||||||
public interface ClientDatanodeProtocol extends VersionedProtocol {
|
public interface ClientDatanodeProtocol {
|
||||||
/**
|
/**
|
||||||
* Until version 9, this class ClientDatanodeProtocol served as both
|
* Until version 9, this class ClientDatanodeProtocol served as both
|
||||||
* the client interface to the DN AND the RPC protocol used to
|
* the client interface to the DN AND the RPC protocol used to
|
||||||
|
|
|
@ -40,7 +40,6 @@ import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
||||||
import org.apache.hadoop.io.EnumSetWritable;
|
import org.apache.hadoop.io.EnumSetWritable;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.io.retry.Idempotent;
|
import org.apache.hadoop.io.retry.Idempotent;
|
||||||
import org.apache.hadoop.ipc.VersionedProtocol;
|
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
import org.apache.hadoop.security.KerberosInfo;
|
import org.apache.hadoop.security.KerberosInfo;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
|
@ -60,7 +59,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
|
||||||
@KerberosInfo(
|
@KerberosInfo(
|
||||||
serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
|
serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
|
||||||
@TokenInfo(DelegationTokenSelector.class)
|
@TokenInfo(DelegationTokenSelector.class)
|
||||||
public interface ClientProtocol extends VersionedProtocol {
|
public interface ClientProtocol {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Until version 69, this class ClientProtocol served as both
|
* Until version 69, this class ClientProtocol served as both
|
||||||
|
|
|
@ -44,6 +44,15 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class LayoutVersion {
|
public class LayoutVersion {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Version in which HDFS-2991 was fixed. This bug caused OP_ADD to
|
||||||
|
* sometimes be skipped for append() calls. If we see such a case when
|
||||||
|
* loading the edits, but the version is known to have that bug, we
|
||||||
|
* workaround the issue. Otherwise we should consider it a corruption
|
||||||
|
* and bail.
|
||||||
|
*/
|
||||||
|
public static final int BUGFIX_HDFS_2991_VERSION = -40;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Enums for features that change the layout version.
|
* Enums for features that change the layout version.
|
||||||
* <br><br>
|
* <br><br>
|
||||||
|
|
|
@ -17,15 +17,11 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.protocolPB;
|
package org.apache.hadoop.hdfs.protocolPB;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService;
|
||||||
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
|
||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSelector;
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSelector;
|
||||||
import org.apache.hadoop.ipc.ProtocolInfo;
|
import org.apache.hadoop.ipc.ProtocolInfo;
|
||||||
import org.apache.hadoop.ipc.VersionedProtocol;
|
|
||||||
import org.apache.hadoop.security.KerberosInfo;
|
import org.apache.hadoop.security.KerberosInfo;
|
||||||
import org.apache.hadoop.security.token.TokenInfo;
|
import org.apache.hadoop.security.token.TokenInfo;
|
||||||
|
|
||||||
|
@ -37,13 +33,5 @@ import org.apache.hadoop.security.token.TokenInfo;
|
||||||
protocolVersion = 1)
|
protocolVersion = 1)
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public interface ClientDatanodeProtocolPB extends
|
public interface ClientDatanodeProtocolPB extends
|
||||||
ClientDatanodeProtocolService.BlockingInterface, VersionedProtocol {
|
ClientDatanodeProtocolService.BlockingInterface {
|
||||||
|
|
||||||
/**
|
|
||||||
* This method is defined to get the protocol signature using
|
|
||||||
* ProtocolSignatureWritable - suffix of 2 to the method name
|
|
||||||
* avoids conflict.
|
|
||||||
*/
|
|
||||||
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException;
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,10 +30,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetRep
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
|
||||||
import org.apache.hadoop.ipc.ProtocolSignature;
|
|
||||||
import org.apache.hadoop.ipc.RPC;
|
|
||||||
import org.apache.hadoop.ipc.VersionedProtocol;
|
|
||||||
|
|
||||||
import com.google.protobuf.RpcController;
|
import com.google.protobuf.RpcController;
|
||||||
import com.google.protobuf.ServiceException;
|
import com.google.protobuf.ServiceException;
|
||||||
|
@ -110,50 +106,4 @@ public class ClientDatanodeProtocolServerSideTranslatorPB implements
|
||||||
.setLocalPath(resp.getBlockPath()).setLocalMetaPath(resp.getMetaPath())
|
.setLocalPath(resp.getBlockPath()).setLocalMetaPath(resp.getMetaPath())
|
||||||
.build();
|
.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public long getProtocolVersion(String protocol, long clientVersion)
|
|
||||||
throws IOException {
|
|
||||||
return RPC.getProtocolVersion(ClientDatanodeProtocolPB.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The client side will redirect getProtocolSignature to
|
|
||||||
* getProtocolSignature2.
|
|
||||||
*
|
|
||||||
* However the RPC layer below on the Server side will call getProtocolVersion
|
|
||||||
* and possibly in the future getProtocolSignature. Hence we still implement
|
|
||||||
* it even though the end client will never call this method.
|
|
||||||
*
|
|
||||||
* @see VersionedProtocol#getProtocolVersion
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public ProtocolSignature getProtocolSignature(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException {
|
|
||||||
/**
|
|
||||||
* Don't forward this to the server. The protocol version and signature is
|
|
||||||
* that of {@link ClientDatanodeProtocol}
|
|
||||||
*/
|
|
||||||
if (!protocol.equals(RPC.getProtocolName(ClientDatanodeProtocol.class))) {
|
|
||||||
throw new IOException("Namenode Serverside implements " +
|
|
||||||
RPC.getProtocolName(ClientDatanodeProtocol.class) +
|
|
||||||
". The following requested protocol is unknown: " + protocol);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
|
|
||||||
RPC.getProtocolVersion(ClientDatanodeProtocolPB.class),
|
|
||||||
ClientDatanodeProtocolPB.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException {
|
|
||||||
/**
|
|
||||||
* Don't forward this to the server. The protocol version and signature is
|
|
||||||
* that of {@link ClientDatanodeProtocol}
|
|
||||||
*/
|
|
||||||
return ProtocolSignatureWritable.convert(
|
|
||||||
this.getProtocolSignature(protocol, clientVersion, clientMethodsHash));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -146,19 +146,6 @@ public class ClientDatanodeProtocolTranslatorPB implements
|
||||||
RPC.stopProxy(rpcProxy);
|
RPC.stopProxy(rpcProxy);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public long getProtocolVersion(String protocolName, long clientVersion)
|
|
||||||
throws IOException {
|
|
||||||
return rpcProxy.getProtocolVersion(protocolName, clientVersion);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ProtocolSignature getProtocolSignature(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException {
|
|
||||||
return ProtocolSignatureWritable.convert(rpcProxy.getProtocolSignature2(
|
|
||||||
protocol, clientVersion, clientMethodsHash));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getReplicaVisibleLength(ExtendedBlock b) throws IOException {
|
public long getReplicaVisibleLength(ExtendedBlock b) throws IOException {
|
||||||
GetReplicaVisibleLengthRequestProto req = GetReplicaVisibleLengthRequestProto
|
GetReplicaVisibleLengthRequestProto req = GetReplicaVisibleLengthRequestProto
|
||||||
|
|
|
@ -17,17 +17,13 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.protocolPB;
|
package org.apache.hadoop.hdfs.protocolPB;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol;
|
||||||
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
|
||||||
import org.apache.hadoop.ipc.ProtocolInfo;
|
import org.apache.hadoop.ipc.ProtocolInfo;
|
||||||
import org.apache.hadoop.ipc.VersionedProtocol;
|
|
||||||
import org.apache.hadoop.security.KerberosInfo;
|
import org.apache.hadoop.security.KerberosInfo;
|
||||||
import org.apache.hadoop.security.token.TokenInfo;
|
import org.apache.hadoop.security.token.TokenInfo;
|
||||||
|
|
||||||
|
@ -46,13 +42,5 @@ import org.apache.hadoop.security.token.TokenInfo;
|
||||||
* add annotations required for security.
|
* add annotations required for security.
|
||||||
*/
|
*/
|
||||||
public interface ClientNamenodeProtocolPB extends
|
public interface ClientNamenodeProtocolPB extends
|
||||||
ClientNamenodeProtocol.BlockingInterface, VersionedProtocol {
|
ClientNamenodeProtocol.BlockingInterface {
|
||||||
|
|
||||||
/**
|
|
||||||
* This method is defined to get the protocol signature using
|
|
||||||
* the R23 protocol - hence we have added the suffix of 2 the method name
|
|
||||||
* to avoid conflict.
|
|
||||||
*/
|
|
||||||
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException;
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -124,17 +124,11 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Update
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
|
||||||
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
|
||||||
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
|
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.ipc.ProtocolSignature;
|
|
||||||
import org.apache.hadoop.ipc.RPC;
|
|
||||||
|
|
||||||
import com.google.protobuf.RpcController;
|
import com.google.protobuf.RpcController;
|
||||||
import com.google.protobuf.ServiceException;
|
import com.google.protobuf.ServiceException;
|
||||||
|
@ -163,54 +157,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
|
||||||
this.server = server;
|
this.server = server;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* The client side will redirect getProtocolSignature to
|
|
||||||
* getProtocolSignature2.
|
|
||||||
*
|
|
||||||
* However the RPC layer below on the Server side will call getProtocolVersion
|
|
||||||
* and possibly in the future getProtocolSignature. Hence we still implement
|
|
||||||
* it even though the end client's call will never reach here.
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public ProtocolSignature getProtocolSignature(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException {
|
|
||||||
/**
|
|
||||||
* Don't forward this to the server. The protocol version and signature is
|
|
||||||
* that of {@link ClientNamenodeProtocol}
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
if (!protocol.equals(RPC.getProtocolName(
|
|
||||||
ClientNamenodeProtocolPB.class))) {
|
|
||||||
throw new IOException("Namenode Serverside implements " +
|
|
||||||
RPC.getProtocolName(ClientNamenodeProtocolPB.class) +
|
|
||||||
". The following requested protocol is unknown: " + protocol);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
|
|
||||||
RPC.getProtocolVersion(ClientNamenodeProtocolPB.class),
|
|
||||||
ClientNamenodeProtocolPB.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ProtocolSignatureWritable
|
|
||||||
getProtocolSignature2(
|
|
||||||
String protocol, long clientVersion, int clientMethodsHash)
|
|
||||||
throws IOException {
|
|
||||||
/**
|
|
||||||
* Don't forward this to the server. The protocol version and signature is
|
|
||||||
* that of {@link ClientNamenodeProtocol}
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
return ProtocolSignatureWritable.convert(
|
|
||||||
this.getProtocolSignature(protocol, clientVersion, clientMethodsHash));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public long getProtocolVersion(String protocol, long clientVersion)
|
|
||||||
throws IOException {
|
|
||||||
return RPC.getProtocolVersion(InterDatanodeProtocolPB.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public GetBlockLocationsResponseProto getBlockLocations(
|
public GetBlockLocationsResponseProto getBlockLocations(
|
||||||
RpcController controller, GetBlockLocationsRequestProto req)
|
RpcController controller, GetBlockLocationsRequestProto req)
|
||||||
|
|
|
@ -28,41 +28,27 @@ import org.apache.hadoop.fs.ContentSummary;
|
||||||
import org.apache.hadoop.fs.CreateFlag;
|
import org.apache.hadoop.fs.CreateFlag;
|
||||||
import org.apache.hadoop.fs.FileAlreadyExistsException;
|
import org.apache.hadoop.fs.FileAlreadyExistsException;
|
||||||
import org.apache.hadoop.fs.FsServerDefaults;
|
import org.apache.hadoop.fs.FsServerDefaults;
|
||||||
|
import org.apache.hadoop.fs.Options.Rename;
|
||||||
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
||||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||||
import org.apache.hadoop.fs.Options.Rename;
|
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
|
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
|
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
|
||||||
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
|
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
|
||||||
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
|
||||||
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
|
||||||
import org.apache.hadoop.io.EnumSetWritable;
|
|
||||||
import org.apache.hadoop.io.Text;
|
|
||||||
import org.apache.hadoop.ipc.ProtocolTranslator;
|
import org.apache.hadoop.ipc.ProtocolTranslator;
|
||||||
import org.apache.hadoop.ipc.ProtobufHelper;
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.ipc.ProtocolMetaInterface;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.ipc.ProtocolSignature;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
import org.apache.hadoop.ipc.RPC;
|
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
|
||||||
import org.apache.hadoop.ipc.RpcClientUtil;
|
|
||||||
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
|
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
|
||||||
import org.apache.hadoop.security.token.Token;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
|
||||||
|
@ -114,6 +100,27 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSaf
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
|
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
||||||
|
import org.apache.hadoop.io.EnumSetWritable;
|
||||||
|
import org.apache.hadoop.io.Text;
|
||||||
|
import org.apache.hadoop.io.retry.RetryPolicies;
|
||||||
|
import org.apache.hadoop.io.retry.RetryPolicy;
|
||||||
|
import org.apache.hadoop.io.retry.RetryProxy;
|
||||||
|
import org.apache.hadoop.ipc.ProtobufHelper;
|
||||||
|
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||||
|
import org.apache.hadoop.ipc.ProtocolMetaInterface;
|
||||||
|
import org.apache.hadoop.ipc.RPC;
|
||||||
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
|
import org.apache.hadoop.ipc.RpcClientUtil;
|
||||||
|
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
import org.apache.hadoop.security.token.Token;
|
||||||
|
|
||||||
import com.google.protobuf.ByteString;
|
import com.google.protobuf.ByteString;
|
||||||
import com.google.protobuf.ServiceException;
|
import com.google.protobuf.ServiceException;
|
||||||
|
@ -138,20 +145,6 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
||||||
RPC.stopProxy(rpcProxy);
|
RPC.stopProxy(rpcProxy);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public ProtocolSignature getProtocolSignature(String protocolName,
|
|
||||||
long clientVersion, int clientMethodHash)
|
|
||||||
throws IOException {
|
|
||||||
return ProtocolSignatureWritable.convert(rpcProxy.getProtocolSignature2(
|
|
||||||
protocolName, clientVersion, clientMethodHash));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public long getProtocolVersion(String protocolName, long clientVersion)
|
|
||||||
throws IOException {
|
|
||||||
return rpcProxy.getProtocolVersion(protocolName, clientVersion);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public LocatedBlocks getBlockLocations(String src, long offset, long length)
|
public LocatedBlocks getBlockLocations(String src, long offset, long length)
|
||||||
throws AccessControlException, FileNotFoundException,
|
throws AccessControlException, FileNotFoundException,
|
||||||
|
|
|
@ -138,19 +138,6 @@ public class DatanodeProtocolClientSideTranslatorPB implements
|
||||||
return (DatanodeProtocolPB) RetryProxy.create(DatanodeProtocolPB.class,
|
return (DatanodeProtocolPB) RetryProxy.create(DatanodeProtocolPB.class,
|
||||||
rpcNamenode, methodNameToPolicyMap);
|
rpcNamenode, methodNameToPolicyMap);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public long getProtocolVersion(String protocol, long clientVersion)
|
|
||||||
throws IOException {
|
|
||||||
return rpcProxy.getProtocolVersion(protocol, clientVersion);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ProtocolSignature getProtocolSignature(String protocolName,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException {
|
|
||||||
return ProtocolSignatureWritable.convert(rpcProxy.getProtocolSignature2(
|
|
||||||
protocolName, clientVersion, clientMethodsHash));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void close() throws IOException {
|
public void close() throws IOException {
|
||||||
|
|
|
@ -18,14 +18,10 @@
|
||||||
|
|
||||||
package org.apache.hadoop.hdfs.protocolPB;
|
package org.apache.hadoop.hdfs.protocolPB;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeProtocolService;
|
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeProtocolService;
|
||||||
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
|
||||||
import org.apache.hadoop.ipc.ProtocolInfo;
|
import org.apache.hadoop.ipc.ProtocolInfo;
|
||||||
import org.apache.hadoop.ipc.VersionedProtocol;
|
|
||||||
import org.apache.hadoop.security.KerberosInfo;
|
import org.apache.hadoop.security.KerberosInfo;
|
||||||
|
|
||||||
@KerberosInfo(
|
@KerberosInfo(
|
||||||
|
@ -36,13 +32,5 @@ import org.apache.hadoop.security.KerberosInfo;
|
||||||
protocolVersion = 1)
|
protocolVersion = 1)
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public interface DatanodeProtocolPB extends
|
public interface DatanodeProtocolPB extends
|
||||||
DatanodeProtocolService.BlockingInterface, VersionedProtocol {
|
DatanodeProtocolService.BlockingInterface {
|
||||||
|
|
||||||
/**
|
|
||||||
* This method is defined to get the protocol signature using
|
|
||||||
* the R23 protocol - hence we have added the suffix of 2 the method name
|
|
||||||
* to avoid conflict.
|
|
||||||
*/
|
|
||||||
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException;
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,7 +47,6 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||||
|
@ -59,8 +58,6 @@ import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
|
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
|
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
|
||||||
import org.apache.hadoop.ipc.ProtocolSignature;
|
|
||||||
import org.apache.hadoop.ipc.RPC;
|
|
||||||
|
|
||||||
import com.google.protobuf.RpcController;
|
import com.google.protobuf.RpcController;
|
||||||
import com.google.protobuf.ServiceException;
|
import com.google.protobuf.ServiceException;
|
||||||
|
@ -271,40 +268,4 @@ public class DatanodeProtocolServerSideTranslatorPB implements
|
||||||
}
|
}
|
||||||
return COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO;
|
return COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public long getProtocolVersion(String protocol, long clientVersion)
|
|
||||||
throws IOException {
|
|
||||||
return RPC.getProtocolVersion(DatanodeProtocolPB.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ProtocolSignature getProtocolSignature(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException {
|
|
||||||
/**
|
|
||||||
* Don't forward this to the server. The protocol version and signature is
|
|
||||||
* that of {@link DatanodeProtocol}
|
|
||||||
*/
|
|
||||||
if (!protocol.equals(RPC.getProtocolName(DatanodeProtocolPB.class))) {
|
|
||||||
throw new IOException("Namenode Serverside implements " +
|
|
||||||
RPC.getProtocolName(DatanodeProtocolPB.class) +
|
|
||||||
". The following requested protocol is unknown: " + protocol);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
|
|
||||||
RPC.getProtocolVersion(DatanodeProtocolPB.class),
|
|
||||||
DatanodeProtocolPB.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException {
|
|
||||||
/**
|
|
||||||
* Don't forward this to the server. The protocol version and signature is
|
|
||||||
* that of {@link DatanodeProtocolPB}
|
|
||||||
*/
|
|
||||||
return ProtocolSignatureWritable.convert(
|
|
||||||
this.getProtocolSignature(protocol, clientVersion, clientMethodsHash));
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -46,19 +46,6 @@ public class GetUserMappingsProtocolClientSideTranslatorPB implements
|
||||||
this.rpcProxy = rpcProxy;
|
this.rpcProxy = rpcProxy;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public long getProtocolVersion(String protocol, long clientVersion)
|
|
||||||
throws IOException {
|
|
||||||
return rpcProxy.getProtocolVersion(protocol, clientVersion);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ProtocolSignature getProtocolSignature(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException {
|
|
||||||
return ProtocolSignatureWritable.convert(rpcProxy.getProtocolSignature2(
|
|
||||||
protocol, clientVersion, clientMethodsHash));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void close() throws IOException {
|
public void close() throws IOException {
|
||||||
RPC.stopProxy(rpcProxy);
|
RPC.stopProxy(rpcProxy);
|
||||||
|
|
|
@ -18,14 +18,10 @@
|
||||||
|
|
||||||
package org.apache.hadoop.hdfs.protocolPB;
|
package org.apache.hadoop.hdfs.protocolPB;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetUserMappingsProtocolService;
|
import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetUserMappingsProtocolService;
|
||||||
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
|
||||||
import org.apache.hadoop.ipc.ProtocolInfo;
|
import org.apache.hadoop.ipc.ProtocolInfo;
|
||||||
import org.apache.hadoop.ipc.VersionedProtocol;
|
|
||||||
|
|
||||||
@ProtocolInfo(
|
@ProtocolInfo(
|
||||||
protocolName = "org.apache.hadoop.tools.GetUserMappingsProtocol",
|
protocolName = "org.apache.hadoop.tools.GetUserMappingsProtocol",
|
||||||
|
@ -33,13 +29,5 @@ import org.apache.hadoop.ipc.VersionedProtocol;
|
||||||
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
public interface GetUserMappingsProtocolPB extends
|
public interface GetUserMappingsProtocolPB extends
|
||||||
GetUserMappingsProtocolService.BlockingInterface, VersionedProtocol {
|
GetUserMappingsProtocolService.BlockingInterface {
|
||||||
|
|
||||||
/**
|
|
||||||
* This method is defined to get the protocol signature using
|
|
||||||
* the R23 protocol - hence we have added the suffix of 2 the method name
|
|
||||||
* to avoid conflict.
|
|
||||||
*/
|
|
||||||
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException;
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,9 +22,6 @@ import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetGroupsForUserRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetGroupsForUserRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetGroupsForUserResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetGroupsForUserResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
|
||||||
import org.apache.hadoop.ipc.ProtocolSignature;
|
|
||||||
import org.apache.hadoop.ipc.RPC;
|
|
||||||
import org.apache.hadoop.tools.GetUserMappingsProtocol;
|
import org.apache.hadoop.tools.GetUserMappingsProtocol;
|
||||||
|
|
||||||
import com.google.protobuf.RpcController;
|
import com.google.protobuf.RpcController;
|
||||||
|
@ -40,42 +37,6 @@ public class GetUserMappingsProtocolServerSideTranslatorPB implements
|
||||||
this.impl = impl;
|
this.impl = impl;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public long getProtocolVersion(String protocol, long clientVersion)
|
|
||||||
throws IOException {
|
|
||||||
return RPC.getProtocolVersion(GetUserMappingsProtocolPB.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ProtocolSignature getProtocolSignature(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException {
|
|
||||||
/**
|
|
||||||
* Don't forward this to the server. The protocol version and signature is
|
|
||||||
* that of {@link GetUserMappingsProtocol}
|
|
||||||
*/
|
|
||||||
if (!protocol.equals(RPC
|
|
||||||
.getProtocolName(GetUserMappingsProtocolPB.class))) {
|
|
||||||
throw new IOException("Namenode Serverside implements "
|
|
||||||
+ RPC.getProtocolName(GetUserMappingsProtocolPB.class)
|
|
||||||
+ ". The following requested protocol is unknown: " + protocol);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
|
|
||||||
RPC.getProtocolVersion(GetUserMappingsProtocolPB.class),
|
|
||||||
GetUserMappingsProtocolPB.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException {
|
|
||||||
/**
|
|
||||||
* Don't forward this to the server. The protocol version and signature is
|
|
||||||
* that of {@link GetUserMappingsProtocolPB}
|
|
||||||
*/
|
|
||||||
return ProtocolSignatureWritable.convert(this.getProtocolSignature(
|
|
||||||
protocol, clientVersion, clientMethodsHash));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public GetGroupsForUserResponseProto getGroupsForUser(
|
public GetGroupsForUserResponseProto getGroupsForUser(
|
||||||
RpcController controller, GetGroupsForUserRequestProto request)
|
RpcController controller, GetGroupsForUserRequestProto request)
|
||||||
|
|
|
@ -17,14 +17,10 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.protocolPB;
|
package org.apache.hadoop.hdfs.protocolPB;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InterDatanodeProtocolService;
|
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InterDatanodeProtocolService;
|
||||||
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
|
||||||
import org.apache.hadoop.ipc.ProtocolInfo;
|
import org.apache.hadoop.ipc.ProtocolInfo;
|
||||||
import org.apache.hadoop.ipc.VersionedProtocol;
|
|
||||||
import org.apache.hadoop.security.KerberosInfo;
|
import org.apache.hadoop.security.KerberosInfo;
|
||||||
|
|
||||||
@KerberosInfo(
|
@KerberosInfo(
|
||||||
|
@ -35,13 +31,5 @@ import org.apache.hadoop.security.KerberosInfo;
|
||||||
protocolVersion = 1)
|
protocolVersion = 1)
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public interface InterDatanodeProtocolPB extends
|
public interface InterDatanodeProtocolPB extends
|
||||||
InterDatanodeProtocolService.BlockingInterface, VersionedProtocol {
|
InterDatanodeProtocolService.BlockingInterface {
|
||||||
|
|
||||||
/**
|
|
||||||
* This method is defined to get the protocol signature using
|
|
||||||
* the R23 protocol - hence we have added the suffix of 2 the method name
|
|
||||||
* to avoid conflict.
|
|
||||||
*/
|
|
||||||
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException;
|
|
||||||
}
|
}
|
|
@ -25,14 +25,9 @@ import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitRep
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
|
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
|
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
|
||||||
import org.apache.hadoop.ipc.ProtocolSignature;
|
|
||||||
import org.apache.hadoop.ipc.RPC;
|
|
||||||
import org.apache.hadoop.ipc.VersionedProtocol;
|
|
||||||
|
|
||||||
import com.google.protobuf.RpcController;
|
import com.google.protobuf.RpcController;
|
||||||
import com.google.protobuf.ServiceException;
|
import com.google.protobuf.ServiceException;
|
||||||
|
@ -81,51 +76,4 @@ public class InterDatanodeProtocolServerSideTranslatorPB implements
|
||||||
return UpdateReplicaUnderRecoveryResponseProto.newBuilder()
|
return UpdateReplicaUnderRecoveryResponseProto.newBuilder()
|
||||||
.setBlock(PBHelper.convert(b)).build();
|
.setBlock(PBHelper.convert(b)).build();
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @see VersionedProtocol#getProtocolVersion */
|
|
||||||
@Override
|
|
||||||
public long getProtocolVersion(String protocol, long clientVersion)
|
|
||||||
throws IOException {
|
|
||||||
return RPC.getProtocolVersion(InterDatanodeProtocolPB.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The client side will redirect getProtocolSignature to
|
|
||||||
* getProtocolSignature2.
|
|
||||||
*
|
|
||||||
* However the RPC layer below on the Server side will call getProtocolVersion
|
|
||||||
* and possibly in the future getProtocolSignature. Hence we still implement
|
|
||||||
* it even though the end client will never call this method.
|
|
||||||
*
|
|
||||||
* @see VersionedProtocol#getProtocolVersion
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public ProtocolSignature getProtocolSignature(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException {
|
|
||||||
/**
|
|
||||||
* Don't forward this to the server. The protocol version and signature is
|
|
||||||
* that of {@link InterDatanodeProtocol}
|
|
||||||
*/
|
|
||||||
if (!protocol.equals(RPC.getProtocolName(InterDatanodeProtocol.class))) {
|
|
||||||
throw new IOException("Namenode Serverside implements " +
|
|
||||||
RPC.getProtocolName(InterDatanodeProtocol.class) +
|
|
||||||
". The following requested protocol is unknown: " + protocol);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
|
|
||||||
RPC.getProtocolVersion(InterDatanodeProtocolPB.class),
|
|
||||||
InterDatanodeProtocolPB.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException {
|
|
||||||
/**
|
|
||||||
* Don't forward this to the server. The protocol version and signature is
|
|
||||||
* that of {@link InterDatanodeProtocol}
|
|
||||||
*/
|
|
||||||
return ProtocolSignatureWritable.convert(
|
|
||||||
this.getProtocolSignature(protocol, clientVersion, clientMethodsHash));
|
|
||||||
}
|
|
||||||
}
|
}
|
|
@ -31,14 +31,12 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
|
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
|
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
|
||||||
import org.apache.hadoop.ipc.ProtobufHelper;
|
import org.apache.hadoop.ipc.ProtobufHelper;
|
||||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||||
import org.apache.hadoop.ipc.ProtocolMetaInterface;
|
import org.apache.hadoop.ipc.ProtocolMetaInterface;
|
||||||
import org.apache.hadoop.ipc.ProtocolSignature;
|
|
||||||
import org.apache.hadoop.ipc.RPC;
|
import org.apache.hadoop.ipc.RPC;
|
||||||
import org.apache.hadoop.ipc.RpcClientUtil;
|
import org.apache.hadoop.ipc.RpcClientUtil;
|
||||||
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
|
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
|
||||||
|
@ -76,19 +74,6 @@ public class InterDatanodeProtocolTranslatorPB implements
|
||||||
RPC.stopProxy(rpcProxy);
|
RPC.stopProxy(rpcProxy);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public long getProtocolVersion(String protocolName, long clientVersion)
|
|
||||||
throws IOException {
|
|
||||||
return rpcProxy.getProtocolVersion(protocolName, clientVersion);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ProtocolSignature getProtocolSignature(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException {
|
|
||||||
return ProtocolSignatureWritable.convert(rpcProxy.getProtocolSignature2(
|
|
||||||
protocol, clientVersion, clientMethodsHash));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
|
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
|
@ -17,15 +17,11 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.protocolPB;
|
package org.apache.hadoop.hdfs.protocolPB;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalProtocolService;
|
|
||||||
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
|
||||||
import org.apache.hadoop.security.KerberosInfo;
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalProtocolService;
|
||||||
import org.apache.hadoop.ipc.ProtocolInfo;
|
import org.apache.hadoop.ipc.ProtocolInfo;
|
||||||
import org.apache.hadoop.ipc.VersionedProtocol;
|
import org.apache.hadoop.security.KerberosInfo;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Protocol used to journal edits to a remote node. Currently,
|
* Protocol used to journal edits to a remote node. Currently,
|
||||||
|
@ -42,12 +38,5 @@ import org.apache.hadoop.ipc.VersionedProtocol;
|
||||||
protocolVersion = 1)
|
protocolVersion = 1)
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public interface JournalProtocolPB extends
|
public interface JournalProtocolPB extends
|
||||||
JournalProtocolService.BlockingInterface, VersionedProtocol {
|
JournalProtocolService.BlockingInterface {
|
||||||
/**
|
|
||||||
* This method is defined to get the protocol signature using
|
|
||||||
* the R23 protocol - hence we have added the suffix of 2 the method name
|
|
||||||
* to avoid conflict.
|
|
||||||
*/
|
|
||||||
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException;
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,11 +24,7 @@ import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalReques
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
|
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
|
||||||
import org.apache.hadoop.ipc.ProtocolSignature;
|
|
||||||
import org.apache.hadoop.ipc.RPC;
|
|
||||||
import org.apache.hadoop.ipc.VersionedProtocol;
|
|
||||||
|
|
||||||
import com.google.protobuf.RpcController;
|
import com.google.protobuf.RpcController;
|
||||||
import com.google.protobuf.ServiceException;
|
import com.google.protobuf.ServiceException;
|
||||||
|
@ -73,51 +69,4 @@ public class JournalProtocolServerSideTranslatorPB implements JournalProtocolPB
|
||||||
}
|
}
|
||||||
return StartLogSegmentResponseProto.newBuilder().build();
|
return StartLogSegmentResponseProto.newBuilder().build();
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @see VersionedProtocol#getProtocolVersion */
|
|
||||||
@Override
|
|
||||||
public long getProtocolVersion(String protocol, long clientVersion)
|
|
||||||
throws IOException {
|
|
||||||
return RPC.getProtocolVersion(JournalProtocolPB.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The client side will redirect getProtocolSignature to
|
|
||||||
* getProtocolSignature2.
|
|
||||||
*
|
|
||||||
* However the RPC layer below on the Server side will call getProtocolVersion
|
|
||||||
* and possibly in the future getProtocolSignature. Hence we still implement
|
|
||||||
* it even though the end client will never call this method.
|
|
||||||
*
|
|
||||||
* @see VersionedProtocol#getProtocolSignature(String, long, int)
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public ProtocolSignature getProtocolSignature(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException {
|
|
||||||
/**
|
|
||||||
* Don't forward this to the server. The protocol version and signature is
|
|
||||||
* that of {@link JournalProtocol}
|
|
||||||
*/
|
|
||||||
if (!protocol.equals(RPC.getProtocolName(JournalProtocolPB.class))) {
|
|
||||||
throw new IOException("Namenode Serverside implements " +
|
|
||||||
RPC.getProtocolName(JournalProtocolPB.class) +
|
|
||||||
". The following requested protocol is unknown: " + protocol);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
|
|
||||||
RPC.getProtocolVersion(JournalProtocolPB.class),
|
|
||||||
JournalProtocolPB.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException {
|
|
||||||
/**
|
|
||||||
* Don't forward this to the server. The protocol version and signature is
|
|
||||||
* that of {@link JournalPBProtocol}
|
|
||||||
*/
|
|
||||||
return ProtocolSignatureWritable.convert(
|
|
||||||
this.getProtocolSignature(protocol, clientVersion, clientMethodsHash));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -59,19 +59,6 @@ public class JournalProtocolTranslatorPB implements ProtocolMetaInterface,
|
||||||
RPC.stopProxy(rpcProxy);
|
RPC.stopProxy(rpcProxy);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public long getProtocolVersion(String protocolName, long clientVersion)
|
|
||||||
throws IOException {
|
|
||||||
return rpcProxy.getProtocolVersion(protocolName, clientVersion);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ProtocolSignature getProtocolSignature(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException {
|
|
||||||
return ProtocolSignatureWritable.convert(rpcProxy.getProtocolSignature2(
|
|
||||||
protocol, clientVersion, clientMethodsHash));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void journal(NamenodeRegistration reg, long firstTxnId,
|
public void journal(NamenodeRegistration reg, long firstTxnId,
|
||||||
int numTxns, byte[] records) throws IOException {
|
int numTxns, byte[] records) throws IOException {
|
||||||
|
|
|
@ -18,14 +18,10 @@
|
||||||
|
|
||||||
package org.apache.hadoop.hdfs.protocolPB;
|
package org.apache.hadoop.hdfs.protocolPB;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.NamenodeProtocolService;
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.NamenodeProtocolService;
|
||||||
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
|
||||||
import org.apache.hadoop.ipc.ProtocolInfo;
|
import org.apache.hadoop.ipc.ProtocolInfo;
|
||||||
import org.apache.hadoop.ipc.VersionedProtocol;
|
|
||||||
import org.apache.hadoop.security.KerberosInfo;
|
import org.apache.hadoop.security.KerberosInfo;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -43,12 +39,5 @@ import org.apache.hadoop.security.KerberosInfo;
|
||||||
protocolVersion = 1)
|
protocolVersion = 1)
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public interface NamenodeProtocolPB extends
|
public interface NamenodeProtocolPB extends
|
||||||
NamenodeProtocolService.BlockingInterface, VersionedProtocol {
|
NamenodeProtocolService.BlockingInterface {
|
||||||
/**
|
|
||||||
* This method is defined to get the protocol signature using
|
|
||||||
* the R23 protocol - hence we have added the suffix of 2 the method name
|
|
||||||
* to avoid conflict.
|
|
||||||
*/
|
|
||||||
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException;
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,7 +40,6 @@ import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogR
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
|
||||||
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
|
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
|
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
|
||||||
|
@ -49,8 +48,6 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
||||||
import org.apache.hadoop.ipc.ProtocolSignature;
|
|
||||||
import org.apache.hadoop.ipc.RPC;
|
|
||||||
|
|
||||||
import com.google.protobuf.RpcController;
|
import com.google.protobuf.RpcController;
|
||||||
import com.google.protobuf.ServiceException;
|
import com.google.protobuf.ServiceException;
|
||||||
|
@ -184,50 +181,6 @@ public class NamenodeProtocolServerSideTranslatorPB implements
|
||||||
return GetEditLogManifestResponseProto.newBuilder()
|
return GetEditLogManifestResponseProto.newBuilder()
|
||||||
.setManifest(PBHelper.convert(manifest)).build();
|
.setManifest(PBHelper.convert(manifest)).build();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public long getProtocolVersion(String protocol, long clientVersion)
|
|
||||||
throws IOException {
|
|
||||||
return RPC.getProtocolVersion(NamenodeProtocolPB.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The client side will redirect getProtocolSignature to
|
|
||||||
* getProtocolSignature2.
|
|
||||||
*
|
|
||||||
* However the RPC layer below on the Server side will call getProtocolVersion
|
|
||||||
* and possibly in the future getProtocolSignature. Hence we still implement
|
|
||||||
* it even though the end client will never call this method.
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public ProtocolSignature getProtocolSignature(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException {
|
|
||||||
/**
|
|
||||||
* Don't forward this to the server. The protocol version and signature is
|
|
||||||
* that of {@link NamenodeProtocol}
|
|
||||||
*/
|
|
||||||
if (!protocol.equals(RPC.getProtocolName(NamenodeProtocolPB.class))) {
|
|
||||||
throw new IOException("Namenode Serverside implements " +
|
|
||||||
RPC.getProtocolName(NamenodeProtocolPB.class) +
|
|
||||||
". The following requested protocol is unknown: " + protocol);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
|
|
||||||
RPC.getProtocolVersion(NamenodeProtocolPB.class),
|
|
||||||
NamenodeProtocolPB.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException {
|
|
||||||
/**
|
|
||||||
* Don't forward this to the server. The protocol version and signature is
|
|
||||||
* that of {@link NamenodePBProtocol}
|
|
||||||
*/
|
|
||||||
return ProtocolSignatureWritable.convert(
|
|
||||||
this.getProtocolSignature(protocol, clientVersion, clientMethodsHash));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public VersionResponseProto versionRequest(RpcController controller,
|
public VersionResponseProto versionRequest(RpcController controller,
|
||||||
|
|
|
@ -88,19 +88,6 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
|
||||||
RPC.stopProxy(rpcProxy);
|
RPC.stopProxy(rpcProxy);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public ProtocolSignature getProtocolSignature(String protocolName,
|
|
||||||
long clientVersion, int clientMethodHash) throws IOException {
|
|
||||||
return ProtocolSignatureWritable.convert(rpcProxy.getProtocolSignature2(
|
|
||||||
protocolName, clientVersion, clientMethodHash));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public long getProtocolVersion(String protocolName, long clientVersion)
|
|
||||||
throws IOException {
|
|
||||||
return rpcProxy.getProtocolVersion(protocolName, clientVersion);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
|
public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
|
@ -46,19 +46,6 @@ public class RefreshAuthorizationPolicyProtocolClientSideTranslatorPB implements
|
||||||
this.rpcProxy = rpcProxy;
|
this.rpcProxy = rpcProxy;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public long getProtocolVersion(String protocol, long clientVersion)
|
|
||||||
throws IOException {
|
|
||||||
return rpcProxy.getProtocolVersion(protocol, clientVersion);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ProtocolSignature getProtocolSignature(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException {
|
|
||||||
return ProtocolSignatureWritable.convert(rpcProxy.getProtocolSignature2(
|
|
||||||
protocol, clientVersion, clientMethodsHash));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void close() throws IOException {
|
public void close() throws IOException {
|
||||||
RPC.stopProxy(rpcProxy);
|
RPC.stopProxy(rpcProxy);
|
||||||
|
|
|
@ -18,15 +18,11 @@
|
||||||
|
|
||||||
package org.apache.hadoop.hdfs.protocolPB;
|
package org.apache.hadoop.hdfs.protocolPB;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshAuthorizationPolicyProtocolService;
|
import org.apache.hadoop.hdfs.protocol.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshAuthorizationPolicyProtocolService;
|
||||||
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
|
||||||
import org.apache.hadoop.ipc.ProtocolInfo;
|
import org.apache.hadoop.ipc.ProtocolInfo;
|
||||||
import org.apache.hadoop.ipc.VersionedProtocol;
|
|
||||||
import org.apache.hadoop.security.KerberosInfo;
|
import org.apache.hadoop.security.KerberosInfo;
|
||||||
|
|
||||||
@KerberosInfo(
|
@KerberosInfo(
|
||||||
|
@ -37,13 +33,5 @@ import org.apache.hadoop.security.KerberosInfo;
|
||||||
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
public interface RefreshAuthorizationPolicyProtocolPB extends
|
public interface RefreshAuthorizationPolicyProtocolPB extends
|
||||||
RefreshAuthorizationPolicyProtocolService.BlockingInterface, VersionedProtocol {
|
RefreshAuthorizationPolicyProtocolService.BlockingInterface {
|
||||||
|
|
||||||
/**
|
|
||||||
* This method is defined to get the protocol signature using
|
|
||||||
* the R23 protocol - hence we have added the suffix of 2 the method name
|
|
||||||
* to avoid conflict.
|
|
||||||
*/
|
|
||||||
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException;
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,9 +22,6 @@ import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshServiceAclRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshServiceAclRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshServiceAclResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshServiceAclResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
|
||||||
import org.apache.hadoop.ipc.ProtocolSignature;
|
|
||||||
import org.apache.hadoop.ipc.RPC;
|
|
||||||
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
|
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
|
||||||
|
|
||||||
import com.google.protobuf.RpcController;
|
import com.google.protobuf.RpcController;
|
||||||
|
@ -40,42 +37,6 @@ public class RefreshAuthorizationPolicyProtocolServerSideTranslatorPB implements
|
||||||
this.impl = impl;
|
this.impl = impl;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public long getProtocolVersion(String protocol, long clientVersion)
|
|
||||||
throws IOException {
|
|
||||||
return RPC.getProtocolVersion(RefreshAuthorizationPolicyProtocolPB.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ProtocolSignature getProtocolSignature(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException {
|
|
||||||
/**
|
|
||||||
* Don't forward this to the server. The protocol version and signature is
|
|
||||||
* that of {@link RefreshAuthorizationPolicyProtocol}
|
|
||||||
*/
|
|
||||||
if (!protocol.equals(RPC
|
|
||||||
.getProtocolName(RefreshAuthorizationPolicyProtocolPB.class))) {
|
|
||||||
throw new IOException("Namenode Serverside implements "
|
|
||||||
+ RPC.getProtocolName(RefreshAuthorizationPolicyProtocolPB.class)
|
|
||||||
+ ". The following requested protocol is unknown: " + protocol);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
|
|
||||||
RPC.getProtocolVersion(RefreshAuthorizationPolicyProtocolPB.class),
|
|
||||||
RefreshAuthorizationPolicyProtocolPB.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException {
|
|
||||||
/**
|
|
||||||
* Don't forward this to the server. The protocol version and signature is
|
|
||||||
* that of {@link RefreshAuthorizationPolicyProtocolPB}
|
|
||||||
*/
|
|
||||||
return ProtocolSignatureWritable.convert(this.getProtocolSignature(
|
|
||||||
protocol, clientVersion, clientMethodsHash));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public RefreshServiceAclResponseProto refreshServiceAcl(
|
public RefreshServiceAclResponseProto refreshServiceAcl(
|
||||||
RpcController controller, RefreshServiceAclRequestProto request)
|
RpcController controller, RefreshServiceAclRequestProto request)
|
||||||
|
|
|
@ -47,19 +47,6 @@ public class RefreshUserMappingsProtocolClientSideTranslatorPB implements
|
||||||
this.rpcProxy = rpcProxy;
|
this.rpcProxy = rpcProxy;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public long getProtocolVersion(String protocol, long clientVersion)
|
|
||||||
throws IOException {
|
|
||||||
return rpcProxy.getProtocolVersion(protocol, clientVersion);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ProtocolSignature getProtocolSignature(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException {
|
|
||||||
return ProtocolSignatureWritable.convert(rpcProxy.getProtocolSignature2(
|
|
||||||
protocol, clientVersion, clientMethodsHash));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void close() throws IOException {
|
public void close() throws IOException {
|
||||||
RPC.stopProxy(rpcProxy);
|
RPC.stopProxy(rpcProxy);
|
||||||
|
|
|
@ -18,15 +18,11 @@
|
||||||
|
|
||||||
package org.apache.hadoop.hdfs.protocolPB;
|
package org.apache.hadoop.hdfs.protocolPB;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshUserMappingsProtocolService;
|
import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshUserMappingsProtocolService;
|
||||||
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
|
||||||
import org.apache.hadoop.ipc.ProtocolInfo;
|
import org.apache.hadoop.ipc.ProtocolInfo;
|
||||||
import org.apache.hadoop.ipc.VersionedProtocol;
|
|
||||||
import org.apache.hadoop.security.KerberosInfo;
|
import org.apache.hadoop.security.KerberosInfo;
|
||||||
|
|
||||||
@KerberosInfo(
|
@KerberosInfo(
|
||||||
|
@ -37,13 +33,5 @@ import org.apache.hadoop.security.KerberosInfo;
|
||||||
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
public interface RefreshUserMappingsProtocolPB extends
|
public interface RefreshUserMappingsProtocolPB extends
|
||||||
RefreshUserMappingsProtocolService.BlockingInterface, VersionedProtocol {
|
RefreshUserMappingsProtocolService.BlockingInterface {
|
||||||
|
|
||||||
/**
|
|
||||||
* This method is defined to get the protocol signature using
|
|
||||||
* the R23 protocol - hence we have added the suffix of 2 the method name
|
|
||||||
* to avoid conflict.
|
|
||||||
*/
|
|
||||||
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException;
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,9 +24,6 @@ import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.R
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshSuperUserGroupsConfigurationResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshSuperUserGroupsConfigurationResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshUserToGroupsMappingsRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshUserToGroupsMappingsRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshUserToGroupsMappingsResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshUserToGroupsMappingsResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
|
||||||
import org.apache.hadoop.ipc.ProtocolSignature;
|
|
||||||
import org.apache.hadoop.ipc.RPC;
|
|
||||||
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
|
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
|
||||||
|
|
||||||
import com.google.protobuf.RpcController;
|
import com.google.protobuf.RpcController;
|
||||||
|
@ -66,40 +63,4 @@ public class RefreshUserMappingsProtocolServerSideTranslatorPB implements Refres
|
||||||
return RefreshSuperUserGroupsConfigurationResponseProto.newBuilder()
|
return RefreshSuperUserGroupsConfigurationResponseProto.newBuilder()
|
||||||
.build();
|
.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public long getProtocolVersion(String protocol, long clientVersion)
|
|
||||||
throws IOException {
|
|
||||||
return RPC.getProtocolVersion(RefreshUserMappingsProtocolPB.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ProtocolSignature getProtocolSignature(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException {
|
|
||||||
/**
|
|
||||||
* Don't forward this to the server. The protocol version and signature is
|
|
||||||
* that of {@link RefreshUserMappingsProtocol}
|
|
||||||
*/
|
|
||||||
if (!protocol.equals(RPC
|
|
||||||
.getProtocolName(RefreshUserMappingsProtocolPB.class))) {
|
|
||||||
throw new IOException("Namenode Serverside implements "
|
|
||||||
+ RPC.getProtocolName(RefreshUserMappingsProtocolPB.class)
|
|
||||||
+ ". The following requested protocol is unknown: " + protocol);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
|
|
||||||
RPC.getProtocolVersion(RefreshUserMappingsProtocolPB.class),
|
|
||||||
RefreshUserMappingsProtocolPB.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException {
|
|
||||||
/**
|
|
||||||
* Don't forward this to the server. The protocol version and signature is
|
|
||||||
* that of {@link RefreshUserMappingsProtocolPB}
|
|
||||||
*/
|
|
||||||
return ProtocolSignatureWritable.convert(this.getProtocolSignature(
|
|
||||||
protocol, clientVersion, clientMethodsHash));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -626,9 +626,12 @@ public class DataNode extends Configured
|
||||||
// DatanodeProtocol namenode,
|
// DatanodeProtocol namenode,
|
||||||
SecureResources resources
|
SecureResources resources
|
||||||
) throws IOException {
|
) throws IOException {
|
||||||
if(UserGroupInformation.isSecurityEnabled() && resources == null)
|
if(UserGroupInformation.isSecurityEnabled() && resources == null) {
|
||||||
throw new RuntimeException("Cannot start secure cluster without " +
|
if (!conf.getBoolean("ignore.secure.ports.for.testing", false)) {
|
||||||
"privileged resources.");
|
throw new RuntimeException("Cannot start secure cluster without "
|
||||||
|
+ "privileged resources.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// settings global for all BPs in the Data Node
|
// settings global for all BPs in the Data Node
|
||||||
this.secureResources = resources;
|
this.secureResources = resources;
|
||||||
|
@ -1780,25 +1783,6 @@ public class DataNode extends Configured
|
||||||
return new ExtendedBlock(oldBlock.getBlockPoolId(), r);
|
return new ExtendedBlock(oldBlock.getBlockPoolId(), r);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public long getProtocolVersion(String protocol, long clientVersion
|
|
||||||
) throws IOException {
|
|
||||||
if (protocol.equals(InterDatanodeProtocol.class.getName())) {
|
|
||||||
return InterDatanodeProtocol.versionID;
|
|
||||||
} else if (protocol.equals(ClientDatanodeProtocol.class.getName())) {
|
|
||||||
return ClientDatanodeProtocol.versionID;
|
|
||||||
}
|
|
||||||
throw new IOException("Unknown protocol to " + getClass().getSimpleName()
|
|
||||||
+ ": " + protocol);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ProtocolSignature getProtocolSignature(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException {
|
|
||||||
return ProtocolSignature.getProtocolSignature(
|
|
||||||
this, protocol, clientVersion, clientMethodsHash);
|
|
||||||
}
|
|
||||||
|
|
||||||
/** A convenient class used in block recovery */
|
/** A convenient class used in block recovery */
|
||||||
static class BlockRecord {
|
static class BlockRecord {
|
||||||
final DatanodeID id;
|
final DatanodeID id;
|
||||||
|
|
|
@ -41,8 +41,8 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||||
import org.apache.hadoop.ipc.RPC;
|
|
||||||
import org.apache.hadoop.ipc.StandbyException;
|
import org.apache.hadoop.ipc.StandbyException;
|
||||||
|
import org.apache.hadoop.ipc.RPC;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
|
||||||
|
@ -229,15 +229,6 @@ public class BackupNode extends NameNode {
|
||||||
nnRpcAddress = nn.nnRpcAddress;
|
nnRpcAddress = nn.nnRpcAddress;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public long getProtocolVersion(String protocol, long clientVersion)
|
|
||||||
throws IOException {
|
|
||||||
if (protocol.equals(JournalProtocol.class.getName())) {
|
|
||||||
return JournalProtocol.versionID;
|
|
||||||
}
|
|
||||||
return super.getProtocolVersion(protocol, clientVersion);
|
|
||||||
}
|
|
||||||
|
|
||||||
/////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////
|
||||||
// BackupNodeProtocol implementation for backup node.
|
// BackupNodeProtocol implementation for backup node.
|
||||||
/////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////
|
||||||
|
|
|
@ -86,7 +86,7 @@ class EditsDoubleBuffer {
|
||||||
}
|
}
|
||||||
|
|
||||||
boolean shouldForceSync() {
|
boolean shouldForceSync() {
|
||||||
return bufReady.size() >= initBufferSize;
|
return bufCurrent.size() >= initBufferSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
DataOutputBuffer getCurrentBuf() {
|
DataOutputBuffer getCurrentBuf() {
|
||||||
|
|
|
@ -249,8 +249,6 @@ public class FSDirectory implements Closeable {
|
||||||
+" to the file system");
|
+" to the file system");
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
// add create file record to log, record new generation stamp
|
|
||||||
fsImage.getEditLog().logOpenFile(path, newNode);
|
|
||||||
|
|
||||||
if(NameNode.stateChangeLog.isDebugEnabled()) {
|
if(NameNode.stateChangeLog.isDebugEnabled()) {
|
||||||
NameNode.stateChangeLog.debug("DIR* FSDirectory.addFile: "
|
NameNode.stateChangeLog.debug("DIR* FSDirectory.addFile: "
|
||||||
|
|
|
@ -821,6 +821,14 @@ public class FSEditLog {
|
||||||
this.runtime = runtime;
|
this.runtime = runtime;
|
||||||
this.journalSet.setRuntimeForTesting(runtime);
|
this.journalSet.setRuntimeForTesting(runtime);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Used only by tests.
|
||||||
|
*/
|
||||||
|
@VisibleForTesting
|
||||||
|
void setMetricsForTests(NameNodeMetrics metrics) {
|
||||||
|
this.metrics = metrics;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return a manifest of what finalized edit logs are available
|
* Return a manifest of what finalized edit logs are available
|
||||||
|
|
|
@ -112,9 +112,8 @@ public class FSEditLogLoader {
|
||||||
long recentOpcodeOffsets[] = new long[4];
|
long recentOpcodeOffsets[] = new long[4];
|
||||||
Arrays.fill(recentOpcodeOffsets, -1);
|
Arrays.fill(recentOpcodeOffsets, -1);
|
||||||
|
|
||||||
|
long txId = expectedStartingTxId - 1;
|
||||||
try {
|
try {
|
||||||
long txId = expectedStartingTxId - 1;
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
while (true) {
|
while (true) {
|
||||||
FSEditLogOp op;
|
FSEditLogOp op;
|
||||||
|
@ -123,7 +122,8 @@ public class FSEditLogLoader {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} catch (IOException ioe) {
|
} catch (IOException ioe) {
|
||||||
String errorMessage = formatEditLogReplayError(in, recentOpcodeOffsets);
|
long badTxId = txId + 1; // because txId hasn't been incremented yet
|
||||||
|
String errorMessage = formatEditLogReplayError(in, recentOpcodeOffsets, badTxId);
|
||||||
FSImage.LOG.error(errorMessage);
|
FSImage.LOG.error(errorMessage);
|
||||||
throw new EditLogInputException(errorMessage,
|
throw new EditLogInputException(errorMessage,
|
||||||
ioe, numEdits);
|
ioe, numEdits);
|
||||||
|
@ -131,12 +131,12 @@ public class FSEditLogLoader {
|
||||||
recentOpcodeOffsets[(int)(numEdits % recentOpcodeOffsets.length)] =
|
recentOpcodeOffsets[(int)(numEdits % recentOpcodeOffsets.length)] =
|
||||||
in.getPosition();
|
in.getPosition();
|
||||||
if (LayoutVersion.supports(Feature.STORED_TXIDS, logVersion)) {
|
if (LayoutVersion.supports(Feature.STORED_TXIDS, logVersion)) {
|
||||||
long thisTxId = op.txid;
|
long expectedTxId = txId + 1;
|
||||||
if (thisTxId != txId + 1) {
|
txId = op.txid;
|
||||||
|
if (txId != expectedTxId) {
|
||||||
throw new IOException("Expected transaction ID " +
|
throw new IOException("Expected transaction ID " +
|
||||||
(txId + 1) + " but got " + thisTxId);
|
expectedTxId + " but got " + txId);
|
||||||
}
|
}
|
||||||
txId = thisTxId;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
incrOpCount(op.opCode, opCounts);
|
incrOpCount(op.opCode, opCounts);
|
||||||
|
@ -145,7 +145,7 @@ public class FSEditLogLoader {
|
||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
// Catch Throwable because in the case of a truly corrupt edits log, any
|
// Catch Throwable because in the case of a truly corrupt edits log, any
|
||||||
// sort of error might be thrown (NumberFormat, NullPointer, EOF, etc.)
|
// sort of error might be thrown (NumberFormat, NullPointer, EOF, etc.)
|
||||||
String errorMessage = formatEditLogReplayError(in, recentOpcodeOffsets);
|
String errorMessage = formatEditLogReplayError(in, recentOpcodeOffsets, txId);
|
||||||
FSImage.LOG.error(errorMessage);
|
FSImage.LOG.error(errorMessage);
|
||||||
throw new IOException(errorMessage, t);
|
throw new IOException(errorMessage, t);
|
||||||
}
|
}
|
||||||
|
@ -265,12 +265,22 @@ public class FSEditLogLoader {
|
||||||
updateBlocks(fsDir, addCloseOp, oldFile);
|
updateBlocks(fsDir, addCloseOp, oldFile);
|
||||||
|
|
||||||
// Now close the file
|
// Now close the file
|
||||||
INodeFileUnderConstruction ucFile = (INodeFileUnderConstruction) oldFile;
|
if (!oldFile.isUnderConstruction() &&
|
||||||
|
logVersion <= LayoutVersion.BUGFIX_HDFS_2991_VERSION) {
|
||||||
|
// There was a bug (HDFS-2991) in hadoop < 0.23.1 where OP_CLOSE
|
||||||
|
// could show up twice in a row. But after that version, this
|
||||||
|
// should be fixed, so we should treat it as an error.
|
||||||
|
throw new IOException(
|
||||||
|
"File is not under construction: " + addCloseOp.path);
|
||||||
|
}
|
||||||
// One might expect that you could use removeLease(holder, path) here,
|
// One might expect that you could use removeLease(holder, path) here,
|
||||||
// but OP_CLOSE doesn't serialize the holder. So, remove by path.
|
// but OP_CLOSE doesn't serialize the holder. So, remove by path.
|
||||||
fsNamesys.leaseManager.removeLeaseWithPrefixPath(addCloseOp.path);
|
if (oldFile.isUnderConstruction()) {
|
||||||
INodeFile newFile = ucFile.convertToInodeFile();
|
INodeFileUnderConstruction ucFile = (INodeFileUnderConstruction) oldFile;
|
||||||
fsDir.replaceNode(addCloseOp.path, ucFile, newFile);
|
fsNamesys.leaseManager.removeLeaseWithPrefixPath(addCloseOp.path);
|
||||||
|
INodeFile newFile = ucFile.convertToInodeFile();
|
||||||
|
fsDir.replaceNode(addCloseOp.path, ucFile, newFile);
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case OP_SET_REPLICATION: {
|
case OP_SET_REPLICATION: {
|
||||||
|
@ -431,9 +441,10 @@ public class FSEditLogLoader {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static String formatEditLogReplayError(EditLogInputStream in,
|
private static String formatEditLogReplayError(EditLogInputStream in,
|
||||||
long recentOpcodeOffsets[]) {
|
long recentOpcodeOffsets[], long txid) {
|
||||||
StringBuilder sb = new StringBuilder();
|
StringBuilder sb = new StringBuilder();
|
||||||
sb.append("Error replaying edit log at offset " + in.getPosition());
|
sb.append("Error replaying edit log at offset " + in.getPosition());
|
||||||
|
sb.append(" on transaction ID ").append(txid);
|
||||||
if (recentOpcodeOffsets[0] != -1) {
|
if (recentOpcodeOffsets[0] != -1) {
|
||||||
Arrays.sort(recentOpcodeOffsets);
|
Arrays.sort(recentOpcodeOffsets);
|
||||||
sb.append("\nRecent opcode offsets:");
|
sb.append("\nRecent opcode offsets:");
|
||||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
import java.util.zip.CheckedInputStream;
|
import java.util.zip.CheckedInputStream;
|
||||||
import java.util.zip.Checksum;
|
import java.util.zip.Checksum;
|
||||||
|
import java.util.Arrays;
|
||||||
import java.util.EnumMap;
|
import java.util.EnumMap;
|
||||||
|
|
||||||
import org.apache.hadoop.fs.ChecksumException;
|
import org.apache.hadoop.fs.ChecksumException;
|
||||||
|
@ -305,6 +306,36 @@ public abstract class FSEditLogOp {
|
||||||
}
|
}
|
||||||
return blocks;
|
return blocks;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public String stringifyMembers() {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
builder.append("[length=");
|
||||||
|
builder.append(length);
|
||||||
|
builder.append(", path=");
|
||||||
|
builder.append(path);
|
||||||
|
builder.append(", replication=");
|
||||||
|
builder.append(replication);
|
||||||
|
builder.append(", mtime=");
|
||||||
|
builder.append(mtime);
|
||||||
|
builder.append(", atime=");
|
||||||
|
builder.append(atime);
|
||||||
|
builder.append(", blockSize=");
|
||||||
|
builder.append(blockSize);
|
||||||
|
builder.append(", blocks=");
|
||||||
|
builder.append(Arrays.toString(blocks));
|
||||||
|
builder.append(", permissions=");
|
||||||
|
builder.append(permissions);
|
||||||
|
builder.append(", clientName=");
|
||||||
|
builder.append(clientName);
|
||||||
|
builder.append(", clientMachine=");
|
||||||
|
builder.append(clientMachine);
|
||||||
|
builder.append(", opCode=");
|
||||||
|
builder.append(opCode);
|
||||||
|
builder.append(", txid=");
|
||||||
|
builder.append(txid);
|
||||||
|
builder.append("]");
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static class AddOp extends AddCloseOp {
|
static class AddOp extends AddCloseOp {
|
||||||
|
@ -315,6 +346,14 @@ public abstract class FSEditLogOp {
|
||||||
static AddOp getInstance() {
|
static AddOp getInstance() {
|
||||||
return (AddOp)opInstances.get().get(OP_ADD);
|
return (AddOp)opInstances.get().get(OP_ADD);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
builder.append("AddOp ");
|
||||||
|
builder.append(stringifyMembers());
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static class CloseOp extends AddCloseOp {
|
static class CloseOp extends AddCloseOp {
|
||||||
|
@ -325,6 +364,14 @@ public abstract class FSEditLogOp {
|
||||||
static CloseOp getInstance() {
|
static CloseOp getInstance() {
|
||||||
return (CloseOp)opInstances.get().get(OP_CLOSE);
|
return (CloseOp)opInstances.get().get(OP_CLOSE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
builder.append("CloseOp ");
|
||||||
|
builder.append(stringifyMembers());
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static class SetReplicationOp extends FSEditLogOp {
|
static class SetReplicationOp extends FSEditLogOp {
|
||||||
|
@ -366,6 +413,21 @@ public abstract class FSEditLogOp {
|
||||||
this.replication = readShort(in);
|
this.replication = readShort(in);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
builder.append("SetReplicationOp [path=");
|
||||||
|
builder.append(path);
|
||||||
|
builder.append(", replication=");
|
||||||
|
builder.append(replication);
|
||||||
|
builder.append(", opCode=");
|
||||||
|
builder.append(opCode);
|
||||||
|
builder.append(", txid=");
|
||||||
|
builder.append(txid);
|
||||||
|
builder.append("]");
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static class ConcatDeleteOp extends FSEditLogOp {
|
static class ConcatDeleteOp extends FSEditLogOp {
|
||||||
|
@ -440,6 +502,25 @@ public abstract class FSEditLogOp {
|
||||||
this.timestamp = readLong(in);
|
this.timestamp = readLong(in);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
builder.append("ConcatDeleteOp [length=");
|
||||||
|
builder.append(length);
|
||||||
|
builder.append(", trg=");
|
||||||
|
builder.append(trg);
|
||||||
|
builder.append(", srcs=");
|
||||||
|
builder.append(Arrays.toString(srcs));
|
||||||
|
builder.append(", timestamp=");
|
||||||
|
builder.append(timestamp);
|
||||||
|
builder.append(", opCode=");
|
||||||
|
builder.append(opCode);
|
||||||
|
builder.append(", txid=");
|
||||||
|
builder.append(txid);
|
||||||
|
builder.append("]");
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static class RenameOldOp extends FSEditLogOp {
|
static class RenameOldOp extends FSEditLogOp {
|
||||||
|
@ -497,6 +578,25 @@ public abstract class FSEditLogOp {
|
||||||
this.timestamp = readLong(in);
|
this.timestamp = readLong(in);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
builder.append("RenameOldOp [length=");
|
||||||
|
builder.append(length);
|
||||||
|
builder.append(", src=");
|
||||||
|
builder.append(src);
|
||||||
|
builder.append(", dst=");
|
||||||
|
builder.append(dst);
|
||||||
|
builder.append(", timestamp=");
|
||||||
|
builder.append(timestamp);
|
||||||
|
builder.append(", opCode=");
|
||||||
|
builder.append(opCode);
|
||||||
|
builder.append(", txid=");
|
||||||
|
builder.append(txid);
|
||||||
|
builder.append("]");
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static class DeleteOp extends FSEditLogOp {
|
static class DeleteOp extends FSEditLogOp {
|
||||||
|
@ -545,6 +645,23 @@ public abstract class FSEditLogOp {
|
||||||
this.timestamp = readLong(in);
|
this.timestamp = readLong(in);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
builder.append("DeleteOp [length=");
|
||||||
|
builder.append(length);
|
||||||
|
builder.append(", path=");
|
||||||
|
builder.append(path);
|
||||||
|
builder.append(", timestamp=");
|
||||||
|
builder.append(timestamp);
|
||||||
|
builder.append(", opCode=");
|
||||||
|
builder.append(opCode);
|
||||||
|
builder.append(", txid=");
|
||||||
|
builder.append(txid);
|
||||||
|
builder.append("]");
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static class MkdirOp extends FSEditLogOp {
|
static class MkdirOp extends FSEditLogOp {
|
||||||
|
@ -623,6 +740,25 @@ public abstract class FSEditLogOp {
|
||||||
this.permissions = null;
|
this.permissions = null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
builder.append("MkdirOp [length=");
|
||||||
|
builder.append(length);
|
||||||
|
builder.append(", path=");
|
||||||
|
builder.append(path);
|
||||||
|
builder.append(", timestamp=");
|
||||||
|
builder.append(timestamp);
|
||||||
|
builder.append(", permissions=");
|
||||||
|
builder.append(permissions);
|
||||||
|
builder.append(", opCode=");
|
||||||
|
builder.append(opCode);
|
||||||
|
builder.append(", txid=");
|
||||||
|
builder.append(txid);
|
||||||
|
builder.append("]");
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static class SetGenstampOp extends FSEditLogOp {
|
static class SetGenstampOp extends FSEditLogOp {
|
||||||
|
@ -652,6 +788,19 @@ public abstract class FSEditLogOp {
|
||||||
throws IOException {
|
throws IOException {
|
||||||
this.genStamp = FSImageSerialization.readLong(in);
|
this.genStamp = FSImageSerialization.readLong(in);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
builder.append("SetGenstampOp [genStamp=");
|
||||||
|
builder.append(genStamp);
|
||||||
|
builder.append(", opCode=");
|
||||||
|
builder.append(opCode);
|
||||||
|
builder.append(", txid=");
|
||||||
|
builder.append(txid);
|
||||||
|
builder.append("]");
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@SuppressWarnings("deprecation")
|
@SuppressWarnings("deprecation")
|
||||||
|
@ -676,6 +825,17 @@ public abstract class FSEditLogOp {
|
||||||
//Datanodes are not persistent any more.
|
//Datanodes are not persistent any more.
|
||||||
FSImageSerialization.DatanodeImage.skipOne(in);
|
FSImageSerialization.DatanodeImage.skipOne(in);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
builder.append("DatanodeAddOp [opCode=");
|
||||||
|
builder.append(opCode);
|
||||||
|
builder.append(", txid=");
|
||||||
|
builder.append(txid);
|
||||||
|
builder.append("]");
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@SuppressWarnings("deprecation")
|
@SuppressWarnings("deprecation")
|
||||||
|
@ -701,6 +861,17 @@ public abstract class FSEditLogOp {
|
||||||
nodeID.readFields(in);
|
nodeID.readFields(in);
|
||||||
//Datanodes are not persistent any more.
|
//Datanodes are not persistent any more.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
builder.append("DatanodeRemoveOp [opCode=");
|
||||||
|
builder.append(opCode);
|
||||||
|
builder.append(", txid=");
|
||||||
|
builder.append(txid);
|
||||||
|
builder.append("]");
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static class SetPermissionsOp extends FSEditLogOp {
|
static class SetPermissionsOp extends FSEditLogOp {
|
||||||
|
@ -738,6 +909,21 @@ public abstract class FSEditLogOp {
|
||||||
this.src = FSImageSerialization.readString(in);
|
this.src = FSImageSerialization.readString(in);
|
||||||
this.permissions = FsPermission.read(in);
|
this.permissions = FsPermission.read(in);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
builder.append("SetPermissionsOp [src=");
|
||||||
|
builder.append(src);
|
||||||
|
builder.append(", permissions=");
|
||||||
|
builder.append(permissions);
|
||||||
|
builder.append(", opCode=");
|
||||||
|
builder.append(opCode);
|
||||||
|
builder.append(", txid=");
|
||||||
|
builder.append(txid);
|
||||||
|
builder.append("]");
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static class SetOwnerOp extends FSEditLogOp {
|
static class SetOwnerOp extends FSEditLogOp {
|
||||||
|
@ -783,6 +969,23 @@ public abstract class FSEditLogOp {
|
||||||
this.username = FSImageSerialization.readString_EmptyAsNull(in);
|
this.username = FSImageSerialization.readString_EmptyAsNull(in);
|
||||||
this.groupname = FSImageSerialization.readString_EmptyAsNull(in);
|
this.groupname = FSImageSerialization.readString_EmptyAsNull(in);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
builder.append("SetOwnerOp [src=");
|
||||||
|
builder.append(src);
|
||||||
|
builder.append(", username=");
|
||||||
|
builder.append(username);
|
||||||
|
builder.append(", groupname=");
|
||||||
|
builder.append(groupname);
|
||||||
|
builder.append(", opCode=");
|
||||||
|
builder.append(opCode);
|
||||||
|
builder.append(", txid=");
|
||||||
|
builder.append(txid);
|
||||||
|
builder.append("]");
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static class SetNSQuotaOp extends FSEditLogOp {
|
static class SetNSQuotaOp extends FSEditLogOp {
|
||||||
|
@ -809,6 +1012,21 @@ public abstract class FSEditLogOp {
|
||||||
this.src = FSImageSerialization.readString(in);
|
this.src = FSImageSerialization.readString(in);
|
||||||
this.nsQuota = FSImageSerialization.readLong(in);
|
this.nsQuota = FSImageSerialization.readLong(in);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
builder.append("SetNSQuotaOp [src=");
|
||||||
|
builder.append(src);
|
||||||
|
builder.append(", nsQuota=");
|
||||||
|
builder.append(nsQuota);
|
||||||
|
builder.append(", opCode=");
|
||||||
|
builder.append(opCode);
|
||||||
|
builder.append(", txid=");
|
||||||
|
builder.append(txid);
|
||||||
|
builder.append("]");
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static class ClearNSQuotaOp extends FSEditLogOp {
|
static class ClearNSQuotaOp extends FSEditLogOp {
|
||||||
|
@ -833,6 +1051,19 @@ public abstract class FSEditLogOp {
|
||||||
throws IOException {
|
throws IOException {
|
||||||
this.src = FSImageSerialization.readString(in);
|
this.src = FSImageSerialization.readString(in);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
builder.append("ClearNSQuotaOp [src=");
|
||||||
|
builder.append(src);
|
||||||
|
builder.append(", opCode=");
|
||||||
|
builder.append(opCode);
|
||||||
|
builder.append(", txid=");
|
||||||
|
builder.append(txid);
|
||||||
|
builder.append("]");
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static class SetQuotaOp extends FSEditLogOp {
|
static class SetQuotaOp extends FSEditLogOp {
|
||||||
|
@ -878,6 +1109,23 @@ public abstract class FSEditLogOp {
|
||||||
this.nsQuota = FSImageSerialization.readLong(in);
|
this.nsQuota = FSImageSerialization.readLong(in);
|
||||||
this.dsQuota = FSImageSerialization.readLong(in);
|
this.dsQuota = FSImageSerialization.readLong(in);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
builder.append("SetQuotaOp [src=");
|
||||||
|
builder.append(src);
|
||||||
|
builder.append(", nsQuota=");
|
||||||
|
builder.append(nsQuota);
|
||||||
|
builder.append(", dsQuota=");
|
||||||
|
builder.append(dsQuota);
|
||||||
|
builder.append(", opCode=");
|
||||||
|
builder.append(opCode);
|
||||||
|
builder.append(", txid=");
|
||||||
|
builder.append(txid);
|
||||||
|
builder.append("]");
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static class TimesOp extends FSEditLogOp {
|
static class TimesOp extends FSEditLogOp {
|
||||||
|
@ -936,6 +1184,25 @@ public abstract class FSEditLogOp {
|
||||||
this.atime = readLong(in);
|
this.atime = readLong(in);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
builder.append("TimesOp [length=");
|
||||||
|
builder.append(length);
|
||||||
|
builder.append(", path=");
|
||||||
|
builder.append(path);
|
||||||
|
builder.append(", mtime=");
|
||||||
|
builder.append(mtime);
|
||||||
|
builder.append(", atime=");
|
||||||
|
builder.append(atime);
|
||||||
|
builder.append(", opCode=");
|
||||||
|
builder.append(opCode);
|
||||||
|
builder.append(", txid=");
|
||||||
|
builder.append(txid);
|
||||||
|
builder.append("]");
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static class SymlinkOp extends FSEditLogOp {
|
static class SymlinkOp extends FSEditLogOp {
|
||||||
|
@ -1011,6 +1278,29 @@ public abstract class FSEditLogOp {
|
||||||
}
|
}
|
||||||
this.permissionStatus = PermissionStatus.read(in);
|
this.permissionStatus = PermissionStatus.read(in);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
builder.append("SymlinkOp [length=");
|
||||||
|
builder.append(length);
|
||||||
|
builder.append(", path=");
|
||||||
|
builder.append(path);
|
||||||
|
builder.append(", value=");
|
||||||
|
builder.append(value);
|
||||||
|
builder.append(", mtime=");
|
||||||
|
builder.append(mtime);
|
||||||
|
builder.append(", atime=");
|
||||||
|
builder.append(atime);
|
||||||
|
builder.append(", permissionStatus=");
|
||||||
|
builder.append(permissionStatus);
|
||||||
|
builder.append(", opCode=");
|
||||||
|
builder.append(opCode);
|
||||||
|
builder.append(", txid=");
|
||||||
|
builder.append(txid);
|
||||||
|
builder.append("]");
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static class RenameOp extends FSEditLogOp {
|
static class RenameOp extends FSEditLogOp {
|
||||||
|
@ -1097,6 +1387,27 @@ public abstract class FSEditLogOp {
|
||||||
}
|
}
|
||||||
return new BytesWritable(bytes);
|
return new BytesWritable(bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
builder.append("RenameOp [length=");
|
||||||
|
builder.append(length);
|
||||||
|
builder.append(", src=");
|
||||||
|
builder.append(src);
|
||||||
|
builder.append(", dst=");
|
||||||
|
builder.append(dst);
|
||||||
|
builder.append(", timestamp=");
|
||||||
|
builder.append(timestamp);
|
||||||
|
builder.append(", options=");
|
||||||
|
builder.append(Arrays.toString(options));
|
||||||
|
builder.append(", opCode=");
|
||||||
|
builder.append(opCode);
|
||||||
|
builder.append(", txid=");
|
||||||
|
builder.append(txid);
|
||||||
|
builder.append("]");
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static class ReassignLeaseOp extends FSEditLogOp {
|
static class ReassignLeaseOp extends FSEditLogOp {
|
||||||
|
@ -1142,6 +1453,23 @@ public abstract class FSEditLogOp {
|
||||||
this.path = FSImageSerialization.readString(in);
|
this.path = FSImageSerialization.readString(in);
|
||||||
this.newHolder = FSImageSerialization.readString(in);
|
this.newHolder = FSImageSerialization.readString(in);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
builder.append("ReassignLeaseOp [leaseHolder=");
|
||||||
|
builder.append(leaseHolder);
|
||||||
|
builder.append(", path=");
|
||||||
|
builder.append(path);
|
||||||
|
builder.append(", newHolder=");
|
||||||
|
builder.append(newHolder);
|
||||||
|
builder.append(", opCode=");
|
||||||
|
builder.append(opCode);
|
||||||
|
builder.append(", txid=");
|
||||||
|
builder.append(txid);
|
||||||
|
builder.append("]");
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static class GetDelegationTokenOp extends FSEditLogOp {
|
static class GetDelegationTokenOp extends FSEditLogOp {
|
||||||
|
@ -1185,6 +1513,21 @@ public abstract class FSEditLogOp {
|
||||||
this.expiryTime = readLong(in);
|
this.expiryTime = readLong(in);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
builder.append("GetDelegationTokenOp [token=");
|
||||||
|
builder.append(token);
|
||||||
|
builder.append(", expiryTime=");
|
||||||
|
builder.append(expiryTime);
|
||||||
|
builder.append(", opCode=");
|
||||||
|
builder.append(opCode);
|
||||||
|
builder.append(", txid=");
|
||||||
|
builder.append(txid);
|
||||||
|
builder.append("]");
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static class RenewDelegationTokenOp extends FSEditLogOp {
|
static class RenewDelegationTokenOp extends FSEditLogOp {
|
||||||
|
@ -1228,6 +1571,21 @@ public abstract class FSEditLogOp {
|
||||||
this.expiryTime = readLong(in);
|
this.expiryTime = readLong(in);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
builder.append("RenewDelegationTokenOp [token=");
|
||||||
|
builder.append(token);
|
||||||
|
builder.append(", expiryTime=");
|
||||||
|
builder.append(expiryTime);
|
||||||
|
builder.append(", opCode=");
|
||||||
|
builder.append(opCode);
|
||||||
|
builder.append(", txid=");
|
||||||
|
builder.append(txid);
|
||||||
|
builder.append("]");
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static class CancelDelegationTokenOp extends FSEditLogOp {
|
static class CancelDelegationTokenOp extends FSEditLogOp {
|
||||||
|
@ -1259,6 +1617,19 @@ public abstract class FSEditLogOp {
|
||||||
this.token = new DelegationTokenIdentifier();
|
this.token = new DelegationTokenIdentifier();
|
||||||
this.token.readFields(in);
|
this.token.readFields(in);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
builder.append("CancelDelegationTokenOp [token=");
|
||||||
|
builder.append(token);
|
||||||
|
builder.append(", opCode=");
|
||||||
|
builder.append(opCode);
|
||||||
|
builder.append(", txid=");
|
||||||
|
builder.append(txid);
|
||||||
|
builder.append("]");
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static class UpdateMasterKeyOp extends FSEditLogOp {
|
static class UpdateMasterKeyOp extends FSEditLogOp {
|
||||||
|
@ -1289,6 +1660,19 @@ public abstract class FSEditLogOp {
|
||||||
this.key = new DelegationKey();
|
this.key = new DelegationKey();
|
||||||
this.key.readFields(in);
|
this.key.readFields(in);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
builder.append("UpdateMasterKeyOp [key=");
|
||||||
|
builder.append(key);
|
||||||
|
builder.append(", opCode=");
|
||||||
|
builder.append(opCode);
|
||||||
|
builder.append(", txid=");
|
||||||
|
builder.append(txid);
|
||||||
|
builder.append("]");
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static class LogSegmentOp extends FSEditLogOp {
|
static class LogSegmentOp extends FSEditLogOp {
|
||||||
|
@ -1311,6 +1695,17 @@ public abstract class FSEditLogOp {
|
||||||
void writeFields(DataOutputStream out) throws IOException {
|
void writeFields(DataOutputStream out) throws IOException {
|
||||||
// no data stored
|
// no data stored
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
builder.append("LogSegmentOp [opCode=");
|
||||||
|
builder.append(opCode);
|
||||||
|
builder.append(", txid=");
|
||||||
|
builder.append(txid);
|
||||||
|
builder.append("]");
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static class InvalidOp extends FSEditLogOp {
|
static class InvalidOp extends FSEditLogOp {
|
||||||
|
@ -1331,6 +1726,17 @@ public abstract class FSEditLogOp {
|
||||||
throws IOException {
|
throws IOException {
|
||||||
// nothing to read
|
// nothing to read
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
builder.append("InvalidOp [opCode=");
|
||||||
|
builder.append(opCode);
|
||||||
|
builder.append(", txid=");
|
||||||
|
builder.append(txid);
|
||||||
|
builder.append("]");
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static private short readShort(DataInputStream in) throws IOException {
|
static private short readShort(DataInputStream in) throws IOException {
|
||||||
|
|
|
@ -1639,6 +1639,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
"Unable to add file to namespace.");
|
"Unable to add file to namespace.");
|
||||||
}
|
}
|
||||||
leaseManager.addLease(newNode.getClientName(), src);
|
leaseManager.addLease(newNode.getClientName(), src);
|
||||||
|
|
||||||
|
// record file record in log, record new generation stamp
|
||||||
|
getEditLog().logOpenFile(src, newNode);
|
||||||
if (NameNode.stateChangeLog.isDebugEnabled()) {
|
if (NameNode.stateChangeLog.isDebugEnabled()) {
|
||||||
NameNode.stateChangeLog.debug("DIR* NameSystem.startFile: "
|
NameNode.stateChangeLog.debug("DIR* NameSystem.startFile: "
|
||||||
+"add "+src+" to namespace for "+holder);
|
+"add "+src+" to namespace for "+holder);
|
||||||
|
@ -1684,11 +1687,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
dir.replaceNode(src, node, cons);
|
dir.replaceNode(src, node, cons);
|
||||||
leaseManager.addLease(cons.getClientName(), src);
|
leaseManager.addLease(cons.getClientName(), src);
|
||||||
|
|
||||||
|
LocatedBlock ret = blockManager.convertLastBlockToUnderConstruction(cons);
|
||||||
if (writeToEditLog) {
|
if (writeToEditLog) {
|
||||||
getEditLog().logOpenFile(src, cons);
|
getEditLog().logOpenFile(src, cons);
|
||||||
}
|
}
|
||||||
|
return ret;
|
||||||
return blockManager.convertLastBlockToUnderConstruction(cons);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -17,6 +17,10 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.namenode;
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY;
|
||||||
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.MAX_PATH_DEPTH;
|
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.MAX_PATH_DEPTH;
|
||||||
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.MAX_PATH_LENGTH;
|
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.MAX_PATH_LENGTH;
|
||||||
|
|
||||||
|
@ -37,8 +41,6 @@ import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
|
||||||
|
|
||||||
import org.apache.hadoop.ha.HAServiceProtocol;
|
import org.apache.hadoop.ha.HAServiceProtocol;
|
||||||
import org.apache.hadoop.ha.HealthCheckFailedException;
|
import org.apache.hadoop.ha.HealthCheckFailedException;
|
||||||
import org.apache.hadoop.ha.ServiceFailedException;
|
import org.apache.hadoop.ha.ServiceFailedException;
|
||||||
|
@ -50,44 +52,43 @@ import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
|
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
|
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
|
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
|
||||||
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
|
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.NamenodeProtocolService;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeProtocolService;
|
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeProtocolService;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetUserMappingsProtocolService;
|
import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetUserMappingsProtocolService;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.NamenodeProtocolService;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshAuthorizationPolicyProtocolService;
|
import org.apache.hadoop.hdfs.protocol.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshAuthorizationPolicyProtocolService;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshUserMappingsProtocolService;
|
import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshUserMappingsProtocolService;
|
||||||
|
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
|
||||||
|
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB;
|
||||||
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolPB;
|
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolPB;
|
||||||
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolServerSideTranslatorPB;
|
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolServerSideTranslatorPB;
|
||||||
import org.apache.hadoop.hdfs.protocolPB.GetUserMappingsProtocolPB;
|
import org.apache.hadoop.hdfs.protocolPB.GetUserMappingsProtocolPB;
|
||||||
import org.apache.hadoop.hdfs.protocolPB.GetUserMappingsProtocolServerSideTranslatorPB;
|
import org.apache.hadoop.hdfs.protocolPB.GetUserMappingsProtocolServerSideTranslatorPB;
|
||||||
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
|
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
|
||||||
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolServerSideTranslatorPB;
|
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolServerSideTranslatorPB;
|
||||||
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
|
|
||||||
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB;
|
|
||||||
import org.apache.hadoop.hdfs.protocolPB.RefreshAuthorizationPolicyProtocolPB;
|
import org.apache.hadoop.hdfs.protocolPB.RefreshAuthorizationPolicyProtocolPB;
|
||||||
import org.apache.hadoop.hdfs.protocolPB.RefreshAuthorizationPolicyProtocolServerSideTranslatorPB;
|
import org.apache.hadoop.hdfs.protocolPB.RefreshAuthorizationPolicyProtocolServerSideTranslatorPB;
|
||||||
import org.apache.hadoop.hdfs.protocolPB.RefreshUserMappingsProtocolPB;
|
import org.apache.hadoop.hdfs.protocolPB.RefreshUserMappingsProtocolPB;
|
||||||
import org.apache.hadoop.hdfs.protocolPB.RefreshUserMappingsProtocolServerSideTranslatorPB;
|
import org.apache.hadoop.hdfs.protocolPB.RefreshUserMappingsProtocolServerSideTranslatorPB;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
||||||
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
|
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
|
||||||
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
|
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
|
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
|
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
|
||||||
|
@ -99,7 +100,6 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
|
import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
|
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||||
|
@ -112,7 +112,6 @@ import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
|
||||||
import org.apache.hadoop.io.EnumSetWritable;
|
import org.apache.hadoop.io.EnumSetWritable;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||||
import org.apache.hadoop.ipc.ProtocolSignature;
|
|
||||||
import org.apache.hadoop.ipc.RPC;
|
import org.apache.hadoop.ipc.RPC;
|
||||||
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
|
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
|
||||||
import org.apache.hadoop.ipc.Server;
|
import org.apache.hadoop.ipc.Server;
|
||||||
|
@ -120,14 +119,11 @@ import org.apache.hadoop.ipc.WritableRpcEngine;
|
||||||
import org.apache.hadoop.net.Node;
|
import org.apache.hadoop.net.Node;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
import org.apache.hadoop.security.Groups;
|
import org.apache.hadoop.security.Groups;
|
||||||
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
|
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.security.authorize.AuthorizationException;
|
import org.apache.hadoop.security.authorize.AuthorizationException;
|
||||||
import org.apache.hadoop.security.authorize.ProxyUsers;
|
import org.apache.hadoop.security.authorize.ProxyUsers;
|
||||||
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
|
|
||||||
import org.apache.hadoop.security.token.Token;
|
|
||||||
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
||||||
import org.apache.hadoop.tools.GetUserMappingsProtocol;
|
import org.apache.hadoop.security.token.Token;
|
||||||
|
|
||||||
import com.google.protobuf.BlockingService;
|
import com.google.protobuf.BlockingService;
|
||||||
|
|
||||||
|
@ -299,36 +295,6 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
||||||
InetSocketAddress getRpcAddress() {
|
InetSocketAddress getRpcAddress() {
|
||||||
return clientRpcAddress;
|
return clientRpcAddress;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override // VersionedProtocol
|
|
||||||
public ProtocolSignature getProtocolSignature(String protocol,
|
|
||||||
long clientVersion, int clientMethodsHash) throws IOException {
|
|
||||||
return ProtocolSignature.getProtocolSignature(
|
|
||||||
this, protocol, clientVersion, clientMethodsHash);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public long getProtocolVersion(String protocol,
|
|
||||||
long clientVersion) throws IOException {
|
|
||||||
if (protocol.equals(ClientProtocol.class.getName())) {
|
|
||||||
throw new IOException("Old Namenode Client protocol is not supported:" +
|
|
||||||
protocol + "Switch your clientside to " + ClientNamenodeProtocol.class);
|
|
||||||
} else if (protocol.equals(DatanodeProtocol.class.getName())){
|
|
||||||
return DatanodeProtocol.versionID;
|
|
||||||
} else if (protocol.equals(NamenodeProtocol.class.getName())){
|
|
||||||
return NamenodeProtocol.versionID;
|
|
||||||
} else if (protocol.equals(RefreshAuthorizationPolicyProtocol.class.getName())){
|
|
||||||
return RefreshAuthorizationPolicyProtocol.versionID;
|
|
||||||
} else if (protocol.equals(RefreshUserMappingsProtocol.class.getName())){
|
|
||||||
return RefreshUserMappingsProtocol.versionID;
|
|
||||||
} else if (protocol.equals(GetUserMappingsProtocol.class.getName())){
|
|
||||||
return GetUserMappingsProtocol.versionID;
|
|
||||||
} else if (protocol.equals(HAServiceProtocol.class.getName())) {
|
|
||||||
return HAServiceProtocol.versionID;
|
|
||||||
} else {
|
|
||||||
throw new IOException("Unknown protocol to name node: " + protocol);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////
|
||||||
// NamenodeProtocol
|
// NamenodeProtocol
|
||||||
|
|
|
@ -25,7 +25,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.ipc.VersionedProtocol;
|
|
||||||
import org.apache.hadoop.security.KerberosInfo;
|
import org.apache.hadoop.security.KerberosInfo;
|
||||||
|
|
||||||
/**********************************************************************
|
/**********************************************************************
|
||||||
|
@ -40,7 +39,7 @@ import org.apache.hadoop.security.KerberosInfo;
|
||||||
serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
|
serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
|
||||||
clientPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY)
|
clientPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY)
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public interface DatanodeProtocol extends VersionedProtocol {
|
public interface DatanodeProtocol {
|
||||||
/**
|
/**
|
||||||
* This class is used by both the Namenode (client) and BackupNode (server)
|
* This class is used by both the Namenode (client) and BackupNode (server)
|
||||||
* to insulate from the protocol serialization.
|
* to insulate from the protocol serialization.
|
||||||
|
|
|
@ -35,7 +35,7 @@ import org.apache.hadoop.security.KerberosInfo;
|
||||||
serverPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY,
|
serverPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY,
|
||||||
clientPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY)
|
clientPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY)
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public interface InterDatanodeProtocol extends VersionedProtocol {
|
public interface InterDatanodeProtocol {
|
||||||
public static final Log LOG = LogFactory.getLog(InterDatanodeProtocol.class);
|
public static final Log LOG = LogFactory.getLog(InterDatanodeProtocol.class);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -32,7 +32,7 @@ import org.apache.hadoop.security.KerberosInfo;
|
||||||
serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
|
serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
|
||||||
clientPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
|
clientPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public interface JournalProtocol extends VersionedProtocol {
|
public interface JournalProtocol {
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
* This class is used by both the Namenode (client) and BackupNode (server)
|
* This class is used by both the Namenode (client) and BackupNode (server)
|
||||||
|
|
|
@ -36,7 +36,7 @@ import org.apache.hadoop.security.KerberosInfo;
|
||||||
serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
|
serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
|
||||||
clientPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
|
clientPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public interface NamenodeProtocol extends VersionedProtocol {
|
public interface NamenodeProtocol {
|
||||||
/**
|
/**
|
||||||
* Until version 6L, this class served as both
|
* Until version 6L, this class served as both
|
||||||
* the client interface to the NN AND the RPC protocol used to
|
* the client interface to the NN AND the RPC protocol used to
|
||||||
|
|
|
@ -0,0 +1,176 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.EnumMap;
|
||||||
|
import java.util.Random;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||||
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
|
||||||
|
import org.apache.hadoop.hdfs.util.Holder;
|
||||||
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Unit test to make sure that Append properly logs the right
|
||||||
|
* things to the edit log, such that files aren't lost or truncated
|
||||||
|
* on restart.
|
||||||
|
*/
|
||||||
|
public class TestFileAppendRestart {
|
||||||
|
private static final int BLOCK_SIZE = 4096;
|
||||||
|
private static final String HADOOP_23_BROKEN_APPEND_TGZ =
|
||||||
|
"image-with-buggy-append.tgz";
|
||||||
|
|
||||||
|
private void writeAndAppend(FileSystem fs, Path p,
|
||||||
|
int lengthForCreate, int lengthForAppend) throws IOException {
|
||||||
|
// Creating a file with 4096 blockSize to write multiple blocks
|
||||||
|
FSDataOutputStream stream = fs.create(
|
||||||
|
p, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
|
||||||
|
try {
|
||||||
|
AppendTestUtil.write(stream, 0, lengthForCreate);
|
||||||
|
stream.close();
|
||||||
|
|
||||||
|
stream = fs.append(p);
|
||||||
|
AppendTestUtil.write(stream, lengthForCreate, lengthForAppend);
|
||||||
|
stream.close();
|
||||||
|
} finally {
|
||||||
|
IOUtils.closeStream(stream);
|
||||||
|
}
|
||||||
|
|
||||||
|
int totalLength = lengthForCreate + lengthForAppend;
|
||||||
|
assertEquals(totalLength, fs.getFileStatus(p).getLen());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Regression test for HDFS-2991. Creates and appends to files
|
||||||
|
* where blocks start/end on block boundaries.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testAppendRestart() throws Exception {
|
||||||
|
final Configuration conf = new HdfsConfiguration();
|
||||||
|
// Turn off persistent IPC, so that the DFSClient can survive NN restart
|
||||||
|
conf.setInt(
|
||||||
|
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
|
||||||
|
0);
|
||||||
|
MiniDFSCluster cluster = null;
|
||||||
|
|
||||||
|
FSDataOutputStream stream = null;
|
||||||
|
try {
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||||
|
FileSystem fs = cluster.getFileSystem();
|
||||||
|
File editLog =
|
||||||
|
new File(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(0),
|
||||||
|
NNStorage.getInProgressEditsFileName(1));
|
||||||
|
EnumMap<FSEditLogOpCodes, Holder<Integer>> counts;
|
||||||
|
|
||||||
|
Path p1 = new Path("/block-boundaries");
|
||||||
|
writeAndAppend(fs, p1, BLOCK_SIZE, BLOCK_SIZE);
|
||||||
|
|
||||||
|
counts = FSImageTestUtil.countEditLogOpTypes(editLog);
|
||||||
|
// OP_ADD to create file
|
||||||
|
// OP_ADD for first block
|
||||||
|
// OP_CLOSE to close file
|
||||||
|
// OP_ADD to reopen file
|
||||||
|
// OP_ADD for second block
|
||||||
|
// OP_CLOSE to close file
|
||||||
|
assertEquals(4, (int)counts.get(FSEditLogOpCodes.OP_ADD).held);
|
||||||
|
assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_CLOSE).held);
|
||||||
|
|
||||||
|
Path p2 = new Path("/not-block-boundaries");
|
||||||
|
writeAndAppend(fs, p2, BLOCK_SIZE/2, BLOCK_SIZE);
|
||||||
|
counts = FSImageTestUtil.countEditLogOpTypes(editLog);
|
||||||
|
// OP_ADD to create file
|
||||||
|
// OP_ADD for first block
|
||||||
|
// OP_CLOSE to close file
|
||||||
|
// OP_ADD to re-establish the lease
|
||||||
|
// OP_ADD from the updatePipeline call (increments genstamp of last block)
|
||||||
|
// OP_ADD at the start of the second block
|
||||||
|
// OP_CLOSE to close file
|
||||||
|
// Total: 5 OP_ADDs and 2 OP_CLOSEs in addition to the ones above
|
||||||
|
assertEquals(9, (int)counts.get(FSEditLogOpCodes.OP_ADD).held);
|
||||||
|
assertEquals(4, (int)counts.get(FSEditLogOpCodes.OP_CLOSE).held);
|
||||||
|
|
||||||
|
cluster.restartNameNode();
|
||||||
|
|
||||||
|
AppendTestUtil.check(fs, p1, 2*BLOCK_SIZE);
|
||||||
|
AppendTestUtil.check(fs, p2, 3*BLOCK_SIZE/2);
|
||||||
|
} finally {
|
||||||
|
IOUtils.closeStream(stream);
|
||||||
|
if (cluster != null) { cluster.shutdown(); }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Earlier versions of HDFS had a bug (HDFS-2991) which caused
|
||||||
|
* append(), when called exactly at a block boundary,
|
||||||
|
* to not log an OP_ADD. This ensures that we can read from
|
||||||
|
* such buggy versions correctly, by loading an image created
|
||||||
|
* using a namesystem image created with 0.23.1-rc2 exhibiting
|
||||||
|
* the issue.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testLoadLogsFromBuggyEarlierVersions() throws IOException {
|
||||||
|
final Configuration conf = new HdfsConfiguration();
|
||||||
|
|
||||||
|
String tarFile = System.getProperty("test.cache.data", "build/test/cache")
|
||||||
|
+ "/" + HADOOP_23_BROKEN_APPEND_TGZ;
|
||||||
|
String testDir = System.getProperty("test.build.data", "build/test/data");
|
||||||
|
File dfsDir = new File(testDir, "image-with-buggy-append");
|
||||||
|
if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
|
||||||
|
throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
|
||||||
|
}
|
||||||
|
FileUtil.unTar(new File(tarFile), new File(testDir));
|
||||||
|
|
||||||
|
File nameDir = new File(dfsDir, "name");
|
||||||
|
GenericTestUtils.assertExists(nameDir);
|
||||||
|
|
||||||
|
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());
|
||||||
|
|
||||||
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
|
||||||
|
.format(false)
|
||||||
|
.manageDataDfsDirs(false)
|
||||||
|
.manageNameDfsDirs(false)
|
||||||
|
.numDataNodes(0)
|
||||||
|
.waitSafeMode(false)
|
||||||
|
.startupOption(StartupOption.UPGRADE)
|
||||||
|
.build();
|
||||||
|
try {
|
||||||
|
FileSystem fs = cluster.getFileSystem();
|
||||||
|
Path testPath = new Path("/tmp/io_data/test_io_0");
|
||||||
|
assertEquals(2*1024*1024, fs.getFileStatus(testPath).getLen());
|
||||||
|
} finally {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -18,39 +18,31 @@
|
||||||
|
|
||||||
package org.apache.hadoop.hdfs.security;
|
package org.apache.hadoop.hdfs.security;
|
||||||
|
|
||||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION;
|
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
|
||||||
import static org.mockito.Matchers.anyInt;
|
|
||||||
import static org.mockito.Matchers.anyLong;
|
|
||||||
import static org.mockito.Matchers.anyString;
|
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
import static org.mockito.Mockito.doReturn;
|
|
||||||
import static org.mockito.Mockito.when;
|
|
||||||
|
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.security.PrivilegedExceptionAction;
|
import java.security.PrivilegedExceptionAction;
|
||||||
|
|
||||||
import org.apache.commons.logging.*;
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.commons.logging.impl.Log4JLogger;
|
import org.apache.commons.logging.impl.Log4JLogger;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.io.Text;
|
|
||||||
|
|
||||||
import org.apache.hadoop.ipc.Client;
|
|
||||||
import org.apache.hadoop.ipc.RPC;
|
|
||||||
import org.apache.hadoop.ipc.ProtocolSignature;
|
|
||||||
import org.apache.hadoop.ipc.Server;
|
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
|
||||||
import org.apache.hadoop.security.token.Token;
|
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
|
import org.apache.hadoop.io.Text;
|
||||||
|
import org.apache.hadoop.ipc.Client;
|
||||||
|
import org.apache.hadoop.ipc.RPC;
|
||||||
|
import org.apache.hadoop.ipc.Server;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.SaslInputStream;
|
import org.apache.hadoop.security.SaslInputStream;
|
||||||
import org.apache.hadoop.security.SaslRpcClient;
|
import org.apache.hadoop.security.SaslRpcClient;
|
||||||
import org.apache.hadoop.security.SaslRpcServer;
|
import org.apache.hadoop.security.SaslRpcServer;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -80,12 +72,6 @@ public class TestClientProtocolWithDelegationToken {
|
||||||
public void testDelegationTokenRpc() throws Exception {
|
public void testDelegationTokenRpc() throws Exception {
|
||||||
ClientProtocol mockNN = mock(ClientProtocol.class);
|
ClientProtocol mockNN = mock(ClientProtocol.class);
|
||||||
FSNamesystem mockNameSys = mock(FSNamesystem.class);
|
FSNamesystem mockNameSys = mock(FSNamesystem.class);
|
||||||
when(mockNN.getProtocolVersion(anyString(), anyLong())).thenReturn(
|
|
||||||
ClientProtocol.versionID);
|
|
||||||
doReturn(ProtocolSignature.getProtocolSignature(
|
|
||||||
mockNN, ClientProtocol.class.getName(),
|
|
||||||
ClientProtocol.versionID, 0))
|
|
||||||
.when(mockNN).getProtocolSignature(anyString(), anyLong(), anyInt());
|
|
||||||
|
|
||||||
DelegationTokenSecretManager sm = new DelegationTokenSecretManager(
|
DelegationTokenSecretManager sm = new DelegationTokenSecretManager(
|
||||||
DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT,
|
DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT,
|
||||||
|
|
|
@ -23,13 +23,8 @@ import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
import static org.junit.Assert.fail;
|
import static org.junit.Assert.fail;
|
||||||
import static org.mockito.Matchers.any;
|
import static org.mockito.Matchers.any;
|
||||||
import static org.mockito.Matchers.anyInt;
|
|
||||||
import static org.mockito.Matchers.anyLong;
|
|
||||||
import static org.mockito.Matchers.anyString;
|
|
||||||
import static org.mockito.Mockito.doAnswer;
|
import static org.mockito.Mockito.doAnswer;
|
||||||
import static org.mockito.Mockito.doReturn;
|
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
import static org.mockito.Mockito.when;
|
|
||||||
|
|
||||||
import java.io.ByteArrayInputStream;
|
import java.io.ByteArrayInputStream;
|
||||||
import java.io.DataInputStream;
|
import java.io.DataInputStream;
|
||||||
|
@ -51,12 +46,12 @@ import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto;
|
||||||
|
@ -65,7 +60,6 @@ import org.apache.hadoop.hdfs.protocolPB.PBHelper;
|
||||||
import org.apache.hadoop.io.TestWritable;
|
import org.apache.hadoop.io.TestWritable;
|
||||||
import org.apache.hadoop.ipc.Client;
|
import org.apache.hadoop.ipc.Client;
|
||||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||||
import org.apache.hadoop.ipc.ProtocolSignature;
|
|
||||||
import org.apache.hadoop.ipc.RPC;
|
import org.apache.hadoop.ipc.RPC;
|
||||||
import org.apache.hadoop.ipc.Server;
|
import org.apache.hadoop.ipc.Server;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
|
@ -222,13 +216,6 @@ public class TestBlockToken {
|
||||||
private Server createMockDatanode(BlockTokenSecretManager sm,
|
private Server createMockDatanode(BlockTokenSecretManager sm,
|
||||||
Token<BlockTokenIdentifier> token) throws IOException, ServiceException {
|
Token<BlockTokenIdentifier> token) throws IOException, ServiceException {
|
||||||
ClientDatanodeProtocolPB mockDN = mock(ClientDatanodeProtocolPB.class);
|
ClientDatanodeProtocolPB mockDN = mock(ClientDatanodeProtocolPB.class);
|
||||||
when(mockDN.getProtocolVersion(anyString(), anyLong())).thenReturn(
|
|
||||||
RPC.getProtocolVersion(ClientDatanodeProtocolPB.class));
|
|
||||||
doReturn(
|
|
||||||
ProtocolSignature.getProtocolSignature(mockDN,
|
|
||||||
ClientDatanodeProtocolPB.class.getName(),
|
|
||||||
RPC.getProtocolVersion(ClientDatanodeProtocolPB.class), 0)).when(
|
|
||||||
mockDN).getProtocolSignature(anyString(), anyLong(), anyInt());
|
|
||||||
|
|
||||||
BlockTokenIdentifier id = sm.createIdentifier();
|
BlockTokenIdentifier id = sm.createIdentifier();
|
||||||
id.readFields(new DataInputStream(new ByteArrayInputStream(token
|
id.readFields(new DataInputStream(new ByteArrayInputStream(token
|
||||||
|
|
|
@ -26,6 +26,7 @@ import java.net.URI;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
import java.util.EnumMap;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
@ -44,6 +45,7 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
|
import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile;
|
import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
|
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
|
||||||
|
import org.apache.hadoop.hdfs.util.Holder;
|
||||||
import org.apache.hadoop.hdfs.util.MD5FileUtils;
|
import org.apache.hadoop.hdfs.util.MD5FileUtils;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
@ -195,6 +197,7 @@ public abstract class FSImageTestUtil {
|
||||||
return editLog;
|
return editLog;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create an aborted in-progress log in the given directory, containing
|
* Create an aborted in-progress log in the given directory, containing
|
||||||
* only a specified number of "mkdirs" operations.
|
* only a specified number of "mkdirs" operations.
|
||||||
|
@ -216,6 +219,35 @@ public abstract class FSImageTestUtil {
|
||||||
editLog.abortCurrentLogSegment();
|
editLog.abortCurrentLogSegment();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param editLog a path of an edit log file
|
||||||
|
* @return the count of each type of operation in the log file
|
||||||
|
* @throws Exception if there is an error reading it
|
||||||
|
*/
|
||||||
|
public static EnumMap<FSEditLogOpCodes,Holder<Integer>> countEditLogOpTypes(
|
||||||
|
File editLog) throws Exception {
|
||||||
|
EnumMap<FSEditLogOpCodes, Holder<Integer>> opCounts =
|
||||||
|
new EnumMap<FSEditLogOpCodes, Holder<Integer>>(FSEditLogOpCodes.class);
|
||||||
|
|
||||||
|
EditLogInputStream elis = new EditLogFileInputStream(editLog);
|
||||||
|
try {
|
||||||
|
FSEditLogOp op;
|
||||||
|
while ((op = elis.readOp()) != null) {
|
||||||
|
Holder<Integer> i = opCounts.get(op.opCode);
|
||||||
|
if (i == null) {
|
||||||
|
i = new Holder<Integer>(0);
|
||||||
|
opCounts.put(op.opCode, i);
|
||||||
|
}
|
||||||
|
i.held++;
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
IOUtils.closeStream(elis);
|
||||||
|
}
|
||||||
|
|
||||||
|
return opCounts;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Assert that all of the given directories have the same newest filename
|
* Assert that all of the given directories have the same newest filename
|
||||||
* for fsimage that they hold the same data.
|
* for fsimage that they hold the same data.
|
||||||
|
|
|
@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
|
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
|
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
|
@ -818,6 +819,40 @@ public class TestEditLog extends TestCase {
|
||||||
log.close();
|
log.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Regression test for HDFS-1112/HDFS-3020. Ensures that, even if
|
||||||
|
* logSync isn't called periodically, the edit log will sync itself.
|
||||||
|
*/
|
||||||
|
public void testAutoSync() throws Exception {
|
||||||
|
File logDir = new File(TEST_DIR, "testAutoSync");
|
||||||
|
logDir.mkdirs();
|
||||||
|
FSEditLog log = FSImageTestUtil.createStandaloneEditLog(logDir);
|
||||||
|
|
||||||
|
String oneKB = StringUtils.byteToHexString(
|
||||||
|
new byte[500]);
|
||||||
|
|
||||||
|
try {
|
||||||
|
log.openForWrite();
|
||||||
|
NameNodeMetrics mockMetrics = Mockito.mock(NameNodeMetrics.class);
|
||||||
|
log.setMetricsForTests(mockMetrics);
|
||||||
|
|
||||||
|
for (int i = 0; i < 400; i++) {
|
||||||
|
log.logDelete(oneKB, 1L);
|
||||||
|
}
|
||||||
|
// After ~400KB, we're still within the 512KB buffer size
|
||||||
|
Mockito.verify(mockMetrics, Mockito.times(0)).addSync(Mockito.anyLong());
|
||||||
|
|
||||||
|
// After ~400KB more, we should have done an automatic sync
|
||||||
|
for (int i = 0; i < 400; i++) {
|
||||||
|
log.logDelete(oneKB, 1L);
|
||||||
|
}
|
||||||
|
Mockito.verify(mockMetrics, Mockito.times(1)).addSync(Mockito.anyLong());
|
||||||
|
|
||||||
|
} finally {
|
||||||
|
log.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tests the getEditLogManifest function using mock storage for a number
|
* Tests the getEditLogManifest function using mock storage for a number
|
||||||
|
|
|
@ -90,15 +90,17 @@ public class TestFSEditLogLoader {
|
||||||
}
|
}
|
||||||
rwf.close();
|
rwf.close();
|
||||||
|
|
||||||
String expectedErrorMessage = "^Error replaying edit log at offset \\d+\n";
|
StringBuilder bld = new StringBuilder();
|
||||||
expectedErrorMessage += "Recent opcode offsets: (\\d+\\s*){4}$";
|
bld.append("^Error replaying edit log at offset \\d+");
|
||||||
|
bld.append(" on transaction ID \\d+\n");
|
||||||
|
bld.append("Recent opcode offsets: (\\d+\\s*){4}$");
|
||||||
try {
|
try {
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES)
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES)
|
||||||
.format(false).build();
|
.format(false).build();
|
||||||
fail("should not be able to start");
|
fail("should not be able to start");
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
assertTrue("error message contains opcodes message",
|
assertTrue("error message contains opcodes message",
|
||||||
e.getMessage().matches(expectedErrorMessage));
|
e.getMessage().matches(bld.toString()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,97 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with this
|
||||||
|
* work for additional information regarding copyright ownership. The ASF
|
||||||
|
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
* License for the specific language governing permissions and limitations under
|
||||||
|
* the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.security.PrivilegedExceptionAction;
|
||||||
|
|
||||||
|
import junit.framework.Assert;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.security.TestUGIWithSecurityOn;
|
||||||
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
||||||
|
import org.junit.Assume;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
public class TestSecureNameNode {
|
||||||
|
final static private int NUM_OF_DATANODES = 0;
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void testKdcRunning() {
|
||||||
|
// Tests are skipped if KDC is not running
|
||||||
|
Assume.assumeTrue(TestUGIWithSecurityOn.isKdcRunning());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testName() throws IOException, InterruptedException {
|
||||||
|
MiniDFSCluster cluster = null;
|
||||||
|
try {
|
||||||
|
String keyTabDir = System.getProperty("kdc.resource.dir") + "/keytabs";
|
||||||
|
String nn1KeytabPath = keyTabDir + "/nn1.keytab";
|
||||||
|
String user1KeyTabPath = keyTabDir + "/user1.keytab";
|
||||||
|
Configuration conf = new HdfsConfiguration();
|
||||||
|
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
|
||||||
|
"kerberos");
|
||||||
|
conf.set(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
|
||||||
|
"nn1/localhost@EXAMPLE.COM");
|
||||||
|
conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, nn1KeytabPath);
|
||||||
|
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES)
|
||||||
|
.build();
|
||||||
|
final MiniDFSCluster clusterRef = cluster;
|
||||||
|
cluster.waitActive();
|
||||||
|
FileSystem fsForCurrentUser = cluster.getFileSystem();
|
||||||
|
fsForCurrentUser.mkdirs(new Path("/tmp"));
|
||||||
|
fsForCurrentUser.setPermission(new Path("/tmp"), new FsPermission(
|
||||||
|
(short) 511));
|
||||||
|
|
||||||
|
UserGroupInformation ugi = UserGroupInformation
|
||||||
|
.loginUserFromKeytabAndReturnUGI("user1@EXAMPLE.COM", user1KeyTabPath);
|
||||||
|
FileSystem fs = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
|
||||||
|
@Override
|
||||||
|
public FileSystem run() throws Exception {
|
||||||
|
return clusterRef.getFileSystem();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
try {
|
||||||
|
Path p = new Path("/users");
|
||||||
|
fs.mkdirs(p);
|
||||||
|
Assert.fail("user1 must not be allowed to write in /");
|
||||||
|
} catch (IOException expected) {
|
||||||
|
}
|
||||||
|
|
||||||
|
Path p = new Path("/tmp/alpha");
|
||||||
|
fs.mkdirs(p);
|
||||||
|
Assert.assertNotNull(fs.listStatus(p));
|
||||||
|
Assert.assertEquals(AuthenticationMethod.KERBEROS,
|
||||||
|
ugi.getAuthenticationMethod());
|
||||||
|
} finally {
|
||||||
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
Binary file not shown.
|
@ -14,15 +14,24 @@
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
|
||||||
[libdefaults]
|
[libdefaults]
|
||||||
default_realm = APACHE.ORG
|
default_realm = EXAMPLE.COM
|
||||||
udp_preference_limit = 1
|
allow_weak_crypto = true
|
||||||
extra_addresses = 127.0.0.1
|
default_tkt_enctypes = des-cbc-md5 des-cbc-crc des3-cbc-sha1
|
||||||
|
default_tgs_enctypes = des-cbc-md5 des-cbc-crc des3-cbc-sha1
|
||||||
|
|
||||||
[realms]
|
[realms]
|
||||||
APACHE.ORG = {
|
EXAMPLE.COM = {
|
||||||
admin_server = localhost:88
|
kdc = localhost:60088
|
||||||
kdc = localhost:88
|
}
|
||||||
}
|
|
||||||
[domain_realm]
|
[domain_realm]
|
||||||
localhost = APACHE.ORG
|
.example.com = EXAMPLE.COM
|
||||||
|
example.com = EXAMPLE.COM
|
||||||
|
|
||||||
|
[login]
|
||||||
|
krb4_convert = true
|
||||||
|
krb4_get_tickets = false
|
||||||
|
|
||||||
|
|
|
@ -110,6 +110,11 @@ Release 0.23.3 - UNRELEASED
|
||||||
MAPREDUCE-2942. TestNMAuditLogger.testNMAuditLoggerWithIP failing (Thomas
|
MAPREDUCE-2942. TestNMAuditLogger.testNMAuditLoggerWithIP failing (Thomas
|
||||||
Graves via mahadev)
|
Graves via mahadev)
|
||||||
|
|
||||||
|
MAPREDUCE-3933. Failures because MALLOC_ARENA_MAX is not set (ahmed via tucu)
|
||||||
|
|
||||||
|
MAPREDUCE-3728. ShuffleHandler can't access results when configured in a
|
||||||
|
secure mode (ahmed via tucu)
|
||||||
|
|
||||||
Release 0.23.2 - UNRELEASED
|
Release 0.23.2 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -197,9 +202,33 @@ Release 0.23.2 - UNRELEASED
|
||||||
MAPREDUCE-3922. Fixed build to not compile 32bit container-executor binary
|
MAPREDUCE-3922. Fixed build to not compile 32bit container-executor binary
|
||||||
by default on all platforms. (Hitesh Shah via vinodkv)
|
by default on all platforms. (Hitesh Shah via vinodkv)
|
||||||
|
|
||||||
MAPREDUCE-3790 Broken pipe on streaming job can lead to truncated output for
|
MAPREDUCE-3790. Broken pipe on streaming job can lead to truncated output for
|
||||||
a successful job (Jason Lowe via bobby)
|
a successful job (Jason Lowe via bobby)
|
||||||
|
|
||||||
|
MAPREDUCE-3816. capacity scheduler web ui bar graphs for used capacity wrong
|
||||||
|
(tgraves via bobby)
|
||||||
|
|
||||||
|
MAPREDUCE-3930. Fixed an NPE while accessing the AM page/webservice for a
|
||||||
|
task attempt without an assigned container. (Robert Joseph Evans via
|
||||||
|
sseth)
|
||||||
|
|
||||||
|
MAPREDUCE-3931. Changed PB implementation of LocalResource to take locks
|
||||||
|
so that race conditions don't fail tasks by inadvertantly changing the
|
||||||
|
timestamps. (Siddarth Seth via vinodkv)
|
||||||
|
|
||||||
|
MAPREDUCE-3687. If AM dies before it returns new tracking URL, proxy
|
||||||
|
redirects to http://N/A/ and doesn't return error code (Ravi Prakash via
|
||||||
|
bobby)
|
||||||
|
|
||||||
|
MAPREDUCE-3920. Revise yarn default port number selection
|
||||||
|
(Dave Thompson via tgraves)
|
||||||
|
|
||||||
|
MAPREDUCE-3903. Add support for mapreduce admin users. (Thomas Graves via
|
||||||
|
sseth)
|
||||||
|
|
||||||
|
MAPREDUCE-3706. Fix circular redirect error in job-attempts page. (bobby
|
||||||
|
via acmurthy)
|
||||||
|
|
||||||
Release 0.23.1 - 2012-02-17
|
Release 0.23.1 - 2012-02-17
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -141,7 +141,7 @@ public class LocalContainerAllocator extends RMCommunicator
|
||||||
nodeId.setPort(1234);
|
nodeId.setPort(1234);
|
||||||
container.setNodeId(nodeId);
|
container.setNodeId(nodeId);
|
||||||
container.setContainerToken(null);
|
container.setContainerToken(null);
|
||||||
container.setNodeHttpAddress("localhost:9999");
|
container.setNodeHttpAddress("localhost:8042");
|
||||||
// send the container-assigned event to task attempt
|
// send the container-assigned event to task attempt
|
||||||
|
|
||||||
if (event.getAttemptID().getTaskId().getTaskType() == TaskType.MAP) {
|
if (event.getAttemptID().getTaskId().getTaskType() == TaskType.MAP) {
|
||||||
|
|
|
@ -104,7 +104,7 @@ public class MRApp extends MRAppMaster {
|
||||||
|
|
||||||
public static String NM_HOST = "localhost";
|
public static String NM_HOST = "localhost";
|
||||||
public static int NM_PORT = 1234;
|
public static int NM_PORT = 1234;
|
||||||
public static int NM_HTTP_PORT = 9999;
|
public static int NM_HTTP_PORT = 8042;
|
||||||
|
|
||||||
private static final RecordFactory recordFactory =
|
private static final RecordFactory recordFactory =
|
||||||
RecordFactoryProvider.getRecordFactory(null);
|
RecordFactoryProvider.getRecordFactory(null);
|
||||||
|
|
|
@ -155,7 +155,7 @@ public class MRAppBenchmark {
|
||||||
nodeId.setPort(1234);
|
nodeId.setPort(1234);
|
||||||
container.setNodeId(nodeId);
|
container.setNodeId(nodeId);
|
||||||
container.setContainerToken(null);
|
container.setContainerToken(null);
|
||||||
container.setNodeHttpAddress("localhost:9999");
|
container.setNodeHttpAddress("localhost:8042");
|
||||||
getContext().getEventHandler()
|
getContext().getEventHandler()
|
||||||
.handle(
|
.handle(
|
||||||
new TaskAttemptContainerAssignedEvent(event
|
new TaskAttemptContainerAssignedEvent(event
|
||||||
|
|
|
@ -98,7 +98,7 @@ public class MockJobs extends MockApps {
|
||||||
|
|
||||||
public static final String NM_HOST = "localhost";
|
public static final String NM_HOST = "localhost";
|
||||||
public static final int NM_PORT = 1234;
|
public static final int NM_PORT = 1234;
|
||||||
public static final int NM_HTTP_PORT = 9999;
|
public static final int NM_HTTP_PORT = 8042;
|
||||||
|
|
||||||
static final int DT = 1000000; // ms
|
static final int DT = 1000000; // ms
|
||||||
|
|
||||||
|
@ -284,7 +284,7 @@ public class MockJobs extends MockApps {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getNodeHttpAddress() {
|
public String getNodeHttpAddress() {
|
||||||
return "localhost:9999";
|
return "localhost:8042";
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -20,6 +20,8 @@ package org.apache.hadoop.mapred;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.mapreduce.JobACL;
|
import org.apache.hadoop.mapreduce.JobACL;
|
||||||
|
@ -31,9 +33,12 @@ import org.apache.hadoop.security.authorize.AccessControlList;
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class JobACLsManager {
|
public class JobACLsManager {
|
||||||
|
|
||||||
|
static final Log LOG = LogFactory.getLog(JobACLsManager.class);
|
||||||
Configuration conf;
|
Configuration conf;
|
||||||
|
private final AccessControlList adminAcl;
|
||||||
|
|
||||||
public JobACLsManager(Configuration conf) {
|
public JobACLsManager(Configuration conf) {
|
||||||
|
adminAcl = new AccessControlList(conf.get(MRConfig.MR_ADMINS, " "));
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -71,6 +76,18 @@ public class JobACLsManager {
|
||||||
return acls;
|
return acls;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Is the calling user an admin for the mapreduce cluster
|
||||||
|
* i.e. member of mapreduce.cluster.administrators
|
||||||
|
* @return true, if user is an admin
|
||||||
|
*/
|
||||||
|
boolean isMRAdmin(UserGroupInformation callerUGI) {
|
||||||
|
if (adminAcl.isUserAllowed(callerUGI)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If authorization is enabled, checks whether the user (in the callerUGI)
|
* If authorization is enabled, checks whether the user (in the callerUGI)
|
||||||
* is authorized to perform the operation specified by 'jobOperation' on
|
* is authorized to perform the operation specified by 'jobOperation' on
|
||||||
|
@ -89,13 +106,18 @@ public class JobACLsManager {
|
||||||
public boolean checkAccess(UserGroupInformation callerUGI,
|
public boolean checkAccess(UserGroupInformation callerUGI,
|
||||||
JobACL jobOperation, String jobOwner, AccessControlList jobACL) {
|
JobACL jobOperation, String jobOwner, AccessControlList jobACL) {
|
||||||
|
|
||||||
|
if (LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("checkAccess job acls, jobOwner: " + jobOwner + " jobacl: "
|
||||||
|
+ jobOperation.toString() + " user: " + callerUGI.getShortUserName());
|
||||||
|
}
|
||||||
String user = callerUGI.getShortUserName();
|
String user = callerUGI.getShortUserName();
|
||||||
if (!areACLsEnabled()) {
|
if (!areACLsEnabled()) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allow Job-owner for any operation on the job
|
// Allow Job-owner for any operation on the job
|
||||||
if (user.equals(jobOwner)
|
if (isMRAdmin(callerUGI)
|
||||||
|
|| user.equals(jobOwner)
|
||||||
|| jobACL.isUserAllowed(callerUGI)) {
|
|| jobACL.isUserAllowed(callerUGI)) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,142 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.mapred;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertFalse;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.mapreduce.JobACL;
|
||||||
|
import org.apache.hadoop.mapreduce.MRConfig;
|
||||||
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
import org.apache.hadoop.security.authorize.AccessControlList;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test the job acls manager
|
||||||
|
*/
|
||||||
|
public class TestJobAclsManager {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testClusterAdmins() {
|
||||||
|
Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
String jobOwner = "testuser";
|
||||||
|
conf.set(JobACL.VIEW_JOB.getAclName(), jobOwner);
|
||||||
|
conf.set(JobACL.MODIFY_JOB.getAclName(), jobOwner);
|
||||||
|
conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
|
||||||
|
String clusterAdmin = "testuser2";
|
||||||
|
conf.set(MRConfig.MR_ADMINS, clusterAdmin);
|
||||||
|
|
||||||
|
JobACLsManager aclsManager = new JobACLsManager(conf);
|
||||||
|
tmpJobACLs = aclsManager.constructJobACLs(conf);
|
||||||
|
final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;
|
||||||
|
|
||||||
|
UserGroupInformation callerUGI = UserGroupInformation.createUserForTesting(
|
||||||
|
clusterAdmin, new String[] {});
|
||||||
|
|
||||||
|
// cluster admin should have access
|
||||||
|
boolean val = aclsManager.checkAccess(callerUGI, JobACL.VIEW_JOB, jobOwner,
|
||||||
|
jobACLs.get(JobACL.VIEW_JOB));
|
||||||
|
assertTrue("cluster admin should have view access", val);
|
||||||
|
val = aclsManager.checkAccess(callerUGI, JobACL.MODIFY_JOB, jobOwner,
|
||||||
|
jobACLs.get(JobACL.MODIFY_JOB));
|
||||||
|
assertTrue("cluster admin should have modify access", val);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testClusterNoAdmins() {
|
||||||
|
Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
String jobOwner = "testuser";
|
||||||
|
conf.set(JobACL.VIEW_JOB.getAclName(), "");
|
||||||
|
conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
|
||||||
|
String noAdminUser = "testuser2";
|
||||||
|
|
||||||
|
JobACLsManager aclsManager = new JobACLsManager(conf);
|
||||||
|
tmpJobACLs = aclsManager.constructJobACLs(conf);
|
||||||
|
final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;
|
||||||
|
|
||||||
|
UserGroupInformation callerUGI = UserGroupInformation.createUserForTesting(
|
||||||
|
noAdminUser, new String[] {});
|
||||||
|
// random user should not have access
|
||||||
|
boolean val = aclsManager.checkAccess(callerUGI, JobACL.VIEW_JOB, jobOwner,
|
||||||
|
jobACLs.get(JobACL.VIEW_JOB));
|
||||||
|
assertFalse("random user should not have view access", val);
|
||||||
|
val = aclsManager.checkAccess(callerUGI, JobACL.MODIFY_JOB, jobOwner,
|
||||||
|
jobACLs.get(JobACL.MODIFY_JOB));
|
||||||
|
assertFalse("random user should not have modify access", val);
|
||||||
|
|
||||||
|
callerUGI = UserGroupInformation.createUserForTesting(jobOwner,
|
||||||
|
new String[] {});
|
||||||
|
// Owner should have access
|
||||||
|
val = aclsManager.checkAccess(callerUGI, JobACL.VIEW_JOB, jobOwner,
|
||||||
|
jobACLs.get(JobACL.VIEW_JOB));
|
||||||
|
assertTrue("owner should have view access", val);
|
||||||
|
val = aclsManager.checkAccess(callerUGI, JobACL.MODIFY_JOB, jobOwner,
|
||||||
|
jobACLs.get(JobACL.MODIFY_JOB));
|
||||||
|
assertTrue("owner should have modify access", val);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testAclsOff() {
|
||||||
|
Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
String jobOwner = "testuser";
|
||||||
|
conf.set(JobACL.VIEW_JOB.getAclName(), jobOwner);
|
||||||
|
conf.setBoolean(MRConfig.MR_ACLS_ENABLED, false);
|
||||||
|
String noAdminUser = "testuser2";
|
||||||
|
|
||||||
|
JobACLsManager aclsManager = new JobACLsManager(conf);
|
||||||
|
tmpJobACLs = aclsManager.constructJobACLs(conf);
|
||||||
|
final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;
|
||||||
|
|
||||||
|
UserGroupInformation callerUGI = UserGroupInformation.createUserForTesting(
|
||||||
|
noAdminUser, new String[] {});
|
||||||
|
// acls off so anyone should have access
|
||||||
|
boolean val = aclsManager.checkAccess(callerUGI, JobACL.VIEW_JOB, jobOwner,
|
||||||
|
jobACLs.get(JobACL.VIEW_JOB));
|
||||||
|
assertTrue("acls off so anyone should have access", val);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testGroups() {
|
||||||
|
Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
String jobOwner = "testuser";
|
||||||
|
conf.set(JobACL.VIEW_JOB.getAclName(), jobOwner);
|
||||||
|
conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
|
||||||
|
String user = "testuser2";
|
||||||
|
String adminGroup = "adminGroup";
|
||||||
|
conf.set(MRConfig.MR_ADMINS, " " + adminGroup);
|
||||||
|
|
||||||
|
JobACLsManager aclsManager = new JobACLsManager(conf);
|
||||||
|
tmpJobACLs = aclsManager.constructJobACLs(conf);
|
||||||
|
final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;
|
||||||
|
|
||||||
|
UserGroupInformation callerUGI = UserGroupInformation.createUserForTesting(
|
||||||
|
user, new String[] {adminGroup});
|
||||||
|
// acls off so anyone should have access
|
||||||
|
boolean val = aclsManager.checkAccess(callerUGI, JobACL.VIEW_JOB, jobOwner,
|
||||||
|
jobACLs.get(JobACL.VIEW_JOB));
|
||||||
|
assertTrue("user in admin group should have access", val);
|
||||||
|
}
|
||||||
|
}
|
|
@ -54,7 +54,7 @@ public class TestMaster {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Change master address to a valid value
|
// Change master address to a valid value
|
||||||
conf.set(MRConfig.MASTER_ADDRESS, "bar.com:9999");
|
conf.set(MRConfig.MASTER_ADDRESS, "bar.com:8042");
|
||||||
masterHostname = Master.getMasterAddress(conf).getHostName();
|
masterHostname = Master.getMasterAddress(conf).getHostName();
|
||||||
assertEquals(masterHostname, "bar.com");
|
assertEquals(masterHostname, "bar.com");
|
||||||
|
|
||||||
|
|
|
@ -192,7 +192,6 @@ public class HistoryClientService extends AbstractService {
|
||||||
throw RPCUtil.getRemoteException("Unknown job " + jobID);
|
throw RPCUtil.getRemoteException("Unknown job " + jobID);
|
||||||
}
|
}
|
||||||
JobACL operation = JobACL.VIEW_JOB;
|
JobACL operation = JobACL.VIEW_JOB;
|
||||||
//TODO disable check access for now.
|
|
||||||
checkAccess(job, operation);
|
checkAccess(job, operation);
|
||||||
return job;
|
return job;
|
||||||
}
|
}
|
||||||
|
@ -324,9 +323,7 @@ public class HistoryClientService extends AbstractService {
|
||||||
|
|
||||||
private void checkAccess(Job job, JobACL jobOperation)
|
private void checkAccess(Job job, JobACL jobOperation)
|
||||||
throws YarnRemoteException {
|
throws YarnRemoteException {
|
||||||
if (!UserGroupInformation.isSecurityEnabled()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
UserGroupInformation callerUGI;
|
UserGroupInformation callerUGI;
|
||||||
try {
|
try {
|
||||||
callerUGI = UserGroupInformation.getCurrentUser();
|
callerUGI = UserGroupInformation.getCurrentUser();
|
||||||
|
|
|
@ -92,7 +92,7 @@ public class TestYarnClientProtocolProvider extends TestCase {
|
||||||
rmDTToken.setIdentifier(ByteBuffer.wrap(new byte[2]));
|
rmDTToken.setIdentifier(ByteBuffer.wrap(new byte[2]));
|
||||||
rmDTToken.setKind("Testclusterkind");
|
rmDTToken.setKind("Testclusterkind");
|
||||||
rmDTToken.setPassword(ByteBuffer.wrap("testcluster".getBytes()));
|
rmDTToken.setPassword(ByteBuffer.wrap("testcluster".getBytes()));
|
||||||
rmDTToken.setService("0.0.0.0:8040");
|
rmDTToken.setService("0.0.0.0:8032");
|
||||||
getDTResponse.setRMDelegationToken(rmDTToken);
|
getDTResponse.setRMDelegationToken(rmDTToken);
|
||||||
ClientRMProtocol cRMProtocol = mock(ClientRMProtocol.class);
|
ClientRMProtocol cRMProtocol = mock(ClientRMProtocol.class);
|
||||||
when(cRMProtocol.getDelegationToken(any(
|
when(cRMProtocol.getDelegationToken(any(
|
||||||
|
|
|
@ -32,14 +32,14 @@ import org.apache.hadoop.yarn.util.ProtoUtils;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
public class LocalResourcePBImpl extends ProtoBase<LocalResourceProto> implements LocalResource {
|
public class LocalResourcePBImpl extends ProtoBase<LocalResourceProto>
|
||||||
|
implements LocalResource {
|
||||||
LocalResourceProto proto = LocalResourceProto.getDefaultInstance();
|
LocalResourceProto proto = LocalResourceProto.getDefaultInstance();
|
||||||
LocalResourceProto.Builder builder = null;
|
LocalResourceProto.Builder builder = null;
|
||||||
boolean viaProto = false;
|
boolean viaProto = false;
|
||||||
|
|
||||||
private URL url = null;
|
private URL url = null;
|
||||||
|
|
||||||
|
|
||||||
public LocalResourcePBImpl() {
|
public LocalResourcePBImpl() {
|
||||||
builder = LocalResourceProto.newBuilder();
|
builder = LocalResourceProto.newBuilder();
|
||||||
}
|
}
|
||||||
|
@ -48,60 +48,55 @@ public class LocalResourcePBImpl extends ProtoBase<LocalResourceProto> implement
|
||||||
this.proto = proto;
|
this.proto = proto;
|
||||||
viaProto = true;
|
viaProto = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
public LocalResourceProto getProto() {
|
public synchronized LocalResourceProto getProto() {
|
||||||
mergeLocalToProto();
|
mergeLocalToBuilder();
|
||||||
proto = viaProto ? proto : builder.build();
|
proto = viaProto ? proto : builder.build();
|
||||||
viaProto = true;
|
viaProto = true;
|
||||||
return proto;
|
return proto;
|
||||||
}
|
}
|
||||||
|
|
||||||
private void mergeLocalToBuilder() {
|
private synchronized void mergeLocalToBuilder() {
|
||||||
if (this.url != null) {
|
LocalResourceProtoOrBuilder l = viaProto ? proto : builder;
|
||||||
|
if (this.url != null
|
||||||
|
&& !(l.getResource().equals(((URLPBImpl) url).getProto()))) {
|
||||||
|
maybeInitBuilder();
|
||||||
|
l = builder;
|
||||||
builder.setResource(convertToProtoFormat(this.url));
|
builder.setResource(convertToProtoFormat(this.url));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void mergeLocalToProto() {
|
private synchronized void maybeInitBuilder() {
|
||||||
if (viaProto)
|
|
||||||
maybeInitBuilder();
|
|
||||||
mergeLocalToBuilder();
|
|
||||||
proto = builder.build();
|
|
||||||
viaProto = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
private void maybeInitBuilder() {
|
|
||||||
if (viaProto || builder == null) {
|
if (viaProto || builder == null) {
|
||||||
builder = LocalResourceProto.newBuilder(proto);
|
builder = LocalResourceProto.newBuilder(proto);
|
||||||
}
|
}
|
||||||
viaProto = false;
|
viaProto = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getSize() {
|
public synchronized long getSize() {
|
||||||
LocalResourceProtoOrBuilder p = viaProto ? proto : builder;
|
LocalResourceProtoOrBuilder p = viaProto ? proto : builder;
|
||||||
return (p.getSize());
|
return (p.getSize());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void setSize(long size) {
|
public synchronized void setSize(long size) {
|
||||||
maybeInitBuilder();
|
maybeInitBuilder();
|
||||||
builder.setSize((size));
|
builder.setSize((size));
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public long getTimestamp() {
|
public synchronized long getTimestamp() {
|
||||||
LocalResourceProtoOrBuilder p = viaProto ? proto : builder;
|
LocalResourceProtoOrBuilder p = viaProto ? proto : builder;
|
||||||
return (p.getTimestamp());
|
return (p.getTimestamp());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void setTimestamp(long timestamp) {
|
public synchronized void setTimestamp(long timestamp) {
|
||||||
maybeInitBuilder();
|
maybeInitBuilder();
|
||||||
builder.setTimestamp((timestamp));
|
builder.setTimestamp((timestamp));
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public LocalResourceType getType() {
|
public synchronized LocalResourceType getType() {
|
||||||
LocalResourceProtoOrBuilder p = viaProto ? proto : builder;
|
LocalResourceProtoOrBuilder p = viaProto ? proto : builder;
|
||||||
if (!p.hasType()) {
|
if (!p.hasType()) {
|
||||||
return null;
|
return null;
|
||||||
|
@ -110,7 +105,7 @@ public class LocalResourcePBImpl extends ProtoBase<LocalResourceProto> implement
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void setType(LocalResourceType type) {
|
public synchronized void setType(LocalResourceType type) {
|
||||||
maybeInitBuilder();
|
maybeInitBuilder();
|
||||||
if (type == null) {
|
if (type == null) {
|
||||||
builder.clearType();
|
builder.clearType();
|
||||||
|
@ -119,7 +114,7 @@ public class LocalResourcePBImpl extends ProtoBase<LocalResourceProto> implement
|
||||||
builder.setType(convertToProtoFormat(type));
|
builder.setType(convertToProtoFormat(type));
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public URL getResource() {
|
public synchronized URL getResource() {
|
||||||
LocalResourceProtoOrBuilder p = viaProto ? proto : builder;
|
LocalResourceProtoOrBuilder p = viaProto ? proto : builder;
|
||||||
if (this.url != null) {
|
if (this.url != null) {
|
||||||
return this.url;
|
return this.url;
|
||||||
|
@ -132,14 +127,14 @@ public class LocalResourcePBImpl extends ProtoBase<LocalResourceProto> implement
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void setResource(URL resource) {
|
public synchronized void setResource(URL resource) {
|
||||||
maybeInitBuilder();
|
maybeInitBuilder();
|
||||||
if (resource == null)
|
if (resource == null)
|
||||||
builder.clearResource();
|
builder.clearResource();
|
||||||
this.url = resource;
|
this.url = resource;
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public LocalResourceVisibility getVisibility() {
|
public synchronized LocalResourceVisibility getVisibility() {
|
||||||
LocalResourceProtoOrBuilder p = viaProto ? proto : builder;
|
LocalResourceProtoOrBuilder p = viaProto ? proto : builder;
|
||||||
if (!p.hasVisibility()) {
|
if (!p.hasVisibility()) {
|
||||||
return null;
|
return null;
|
||||||
|
@ -148,7 +143,7 @@ public class LocalResourcePBImpl extends ProtoBase<LocalResourceProto> implement
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void setVisibility(LocalResourceVisibility visibility) {
|
public synchronized void setVisibility(LocalResourceVisibility visibility) {
|
||||||
maybeInitBuilder();
|
maybeInitBuilder();
|
||||||
if (visibility == null) {
|
if (visibility == null) {
|
||||||
builder.clearVisibility();
|
builder.clearVisibility();
|
||||||
|
@ -180,7 +175,4 @@ public class LocalResourcePBImpl extends ProtoBase<LocalResourceProto> implement
|
||||||
private LocalResourceVisibility convertFromProtoFormat(LocalResourceVisibilityProto e) {
|
private LocalResourceVisibility convertFromProtoFormat(LocalResourceVisibilityProto e) {
|
||||||
return ProtoUtils.convertFromProtoFormat(e);
|
return ProtoUtils.convertFromProtoFormat(e);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -104,7 +104,6 @@
|
||||||
<configuration>
|
<configuration>
|
||||||
<environmentVariables>
|
<environmentVariables>
|
||||||
<JAVA_HOME>${java.home}</JAVA_HOME>
|
<JAVA_HOME>${java.home}</JAVA_HOME>
|
||||||
<MALLOC_ARENA_MAX>4</MALLOC_ARENA_MAX>
|
|
||||||
</environmentVariables>
|
</environmentVariables>
|
||||||
</configuration>
|
</configuration>
|
||||||
</plugin>
|
</plugin>
|
||||||
|
|
|
@ -88,7 +88,7 @@ public class YarnConfiguration extends Configuration {
|
||||||
/** The address of the applications manager interface in the RM.*/
|
/** The address of the applications manager interface in the RM.*/
|
||||||
public static final String RM_ADDRESS =
|
public static final String RM_ADDRESS =
|
||||||
RM_PREFIX + "address";
|
RM_PREFIX + "address";
|
||||||
public static final int DEFAULT_RM_PORT = 8040;
|
public static final int DEFAULT_RM_PORT = 8032;
|
||||||
public static final String DEFAULT_RM_ADDRESS =
|
public static final String DEFAULT_RM_ADDRESS =
|
||||||
"0.0.0.0:" + DEFAULT_RM_PORT;
|
"0.0.0.0:" + DEFAULT_RM_PORT;
|
||||||
|
|
||||||
|
@ -123,7 +123,7 @@ public class YarnConfiguration extends Configuration {
|
||||||
|
|
||||||
public static final String RM_RESOURCE_TRACKER_ADDRESS =
|
public static final String RM_RESOURCE_TRACKER_ADDRESS =
|
||||||
RM_PREFIX + "resource-tracker.address";
|
RM_PREFIX + "resource-tracker.address";
|
||||||
public static final int DEFAULT_RM_RESOURCE_TRACKER_PORT = 8025;
|
public static final int DEFAULT_RM_RESOURCE_TRACKER_PORT = 8031;
|
||||||
public static final String DEFAULT_RM_RESOURCE_TRACKER_ADDRESS =
|
public static final String DEFAULT_RM_RESOURCE_TRACKER_ADDRESS =
|
||||||
"0.0.0.0:" + DEFAULT_RM_RESOURCE_TRACKER_PORT;
|
"0.0.0.0:" + DEFAULT_RM_RESOURCE_TRACKER_PORT;
|
||||||
|
|
||||||
|
@ -140,7 +140,7 @@ public class YarnConfiguration extends Configuration {
|
||||||
/** Are acls enabled.*/
|
/** Are acls enabled.*/
|
||||||
public static final String YARN_ACL_ENABLE =
|
public static final String YARN_ACL_ENABLE =
|
||||||
YARN_PREFIX + "acl.enable";
|
YARN_PREFIX + "acl.enable";
|
||||||
public static final boolean DEFAULT_YARN_ACL_ENABLE = true;
|
public static final boolean DEFAULT_YARN_ACL_ENABLE = false;
|
||||||
|
|
||||||
/** ACL of who can be admin of YARN cluster.*/
|
/** ACL of who can be admin of YARN cluster.*/
|
||||||
public static final String YARN_ADMIN_ACL =
|
public static final String YARN_ADMIN_ACL =
|
||||||
|
@ -153,7 +153,7 @@ public class YarnConfiguration extends Configuration {
|
||||||
/** The address of the RM admin interface.*/
|
/** The address of the RM admin interface.*/
|
||||||
public static final String RM_ADMIN_ADDRESS =
|
public static final String RM_ADMIN_ADDRESS =
|
||||||
RM_PREFIX + "admin.address";
|
RM_PREFIX + "admin.address";
|
||||||
public static final int DEFAULT_RM_ADMIN_PORT = 8141;
|
public static final int DEFAULT_RM_ADMIN_PORT = 8033;
|
||||||
public static final String DEFAULT_RM_ADMIN_ADDRESS = "0.0.0.0:" +
|
public static final String DEFAULT_RM_ADMIN_ADDRESS = "0.0.0.0:" +
|
||||||
DEFAULT_RM_ADMIN_PORT;
|
DEFAULT_RM_ADMIN_PORT;
|
||||||
|
|
||||||
|
@ -285,7 +285,7 @@ public class YarnConfiguration extends Configuration {
|
||||||
/** Address where the localizer IPC is.*/
|
/** Address where the localizer IPC is.*/
|
||||||
public static final String NM_LOCALIZER_ADDRESS =
|
public static final String NM_LOCALIZER_ADDRESS =
|
||||||
NM_PREFIX + "localizer.address";
|
NM_PREFIX + "localizer.address";
|
||||||
public static final int DEFAULT_NM_LOCALIZER_PORT = 4344;
|
public static final int DEFAULT_NM_LOCALIZER_PORT = 8040;
|
||||||
public static final String DEFAULT_NM_LOCALIZER_ADDRESS = "0.0.0.0:" +
|
public static final String DEFAULT_NM_LOCALIZER_ADDRESS = "0.0.0.0:" +
|
||||||
DEFAULT_NM_LOCALIZER_PORT;
|
DEFAULT_NM_LOCALIZER_PORT;
|
||||||
|
|
||||||
|
@ -366,7 +366,7 @@ public class YarnConfiguration extends Configuration {
|
||||||
|
|
||||||
/** NM Webapp address.**/
|
/** NM Webapp address.**/
|
||||||
public static final String NM_WEBAPP_ADDRESS = NM_PREFIX + "webapp.address";
|
public static final String NM_WEBAPP_ADDRESS = NM_PREFIX + "webapp.address";
|
||||||
public static final int DEFAULT_NM_WEBAPP_PORT = 9999;
|
public static final int DEFAULT_NM_WEBAPP_PORT = 8042;
|
||||||
public static final String DEFAULT_NM_WEBAPP_ADDRESS = "0.0.0.0:" +
|
public static final String DEFAULT_NM_WEBAPP_ADDRESS = "0.0.0.0:" +
|
||||||
DEFAULT_NM_WEBAPP_PORT;
|
DEFAULT_NM_WEBAPP_PORT;
|
||||||
|
|
||||||
|
|
|
@ -142,7 +142,7 @@ public class ConverterUtils {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static String toString(ContainerId cId) {
|
public static String toString(ContainerId cId) {
|
||||||
return cId.toString();
|
return cId == null ? null : cId.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
public static NodeId toNodeId(String nodeIdStr) {
|
public static NodeId toNodeId(String nodeIdStr) {
|
||||||
|
|
|
@ -61,7 +61,7 @@
|
||||||
<property>
|
<property>
|
||||||
<description>The address of the applications manager interface in the RM.</description>
|
<description>The address of the applications manager interface in the RM.</description>
|
||||||
<name>yarn.resourcemanager.address</name>
|
<name>yarn.resourcemanager.address</name>
|
||||||
<value>0.0.0.0:8040</value>
|
<value>0.0.0.0:8032</value>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
|
@ -101,7 +101,7 @@
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>yarn.resourcemanager.resource-tracker.address</name>
|
<name>yarn.resourcemanager.resource-tracker.address</name>
|
||||||
<value>0.0.0.0:8025</value>
|
<value>0.0.0.0:8031</value>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
|
@ -119,7 +119,7 @@
|
||||||
<property>
|
<property>
|
||||||
<description>The address of the RM admin interface.</description>
|
<description>The address of the RM admin interface.</description>
|
||||||
<name>yarn.resourcemanager.admin.address</name>
|
<name>yarn.resourcemanager.admin.address</name>
|
||||||
<value>0.0.0.0:8141</value>
|
<value>0.0.0.0:8033</value>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
|
@ -274,7 +274,7 @@
|
||||||
<property>
|
<property>
|
||||||
<description>Address where the localizer IPC is.</description>
|
<description>Address where the localizer IPC is.</description>
|
||||||
<name>yarn.nodemanager.localizer.address</name>
|
<name>yarn.nodemanager.localizer.address</name>
|
||||||
<value>0.0.0.0:4344</value>
|
<value>0.0.0.0:8040</value>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
|
@ -355,7 +355,7 @@
|
||||||
<property>
|
<property>
|
||||||
<description>NM Webapp address.</description>
|
<description>NM Webapp address.</description>
|
||||||
<name>yarn.nodemanager.webapp.address</name>
|
<name>yarn.nodemanager.webapp.address</name>
|
||||||
<value>0.0.0.0:9999</value>
|
<value>0.0.0.0:8042</value>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
|
|
|
@ -27,10 +27,10 @@ import org.junit.Test;
|
||||||
public class TestNodeId {
|
public class TestNodeId {
|
||||||
@Test
|
@Test
|
||||||
public void testNodeId() {
|
public void testNodeId() {
|
||||||
NodeId nodeId1 = createNodeId("10.18.52.124", 45454);
|
NodeId nodeId1 = createNodeId("10.18.52.124", 8041);
|
||||||
NodeId nodeId2 = createNodeId("10.18.52.125", 45452);
|
NodeId nodeId2 = createNodeId("10.18.52.125", 8038);
|
||||||
NodeId nodeId3 = createNodeId("10.18.52.124", 45454);
|
NodeId nodeId3 = createNodeId("10.18.52.124", 8041);
|
||||||
NodeId nodeId4 = createNodeId("10.18.52.124", 45453);
|
NodeId nodeId4 = createNodeId("10.18.52.124", 8039);
|
||||||
|
|
||||||
Assert.assertTrue(nodeId1.equals(nodeId3));
|
Assert.assertTrue(nodeId1.equals(nodeId3));
|
||||||
Assert.assertFalse(nodeId1.equals(nodeId2));
|
Assert.assertFalse(nodeId1.equals(nodeId2));
|
||||||
|
@ -44,7 +44,7 @@ public class TestNodeId {
|
||||||
Assert.assertFalse(nodeId1.hashCode() == nodeId2.hashCode());
|
Assert.assertFalse(nodeId1.hashCode() == nodeId2.hashCode());
|
||||||
Assert.assertFalse(nodeId3.hashCode() == nodeId4.hashCode());
|
Assert.assertFalse(nodeId3.hashCode() == nodeId4.hashCode());
|
||||||
|
|
||||||
Assert.assertEquals("10.18.52.124:45454", nodeId1.toString());
|
Assert.assertEquals("10.18.52.124:8041", nodeId1.toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
private NodeId createNodeId(String host, int port) {
|
private NodeId createNodeId(String host, int port) {
|
||||||
|
|
|
@ -22,6 +22,7 @@ import static org.junit.Assert.*;
|
||||||
import java.net.URISyntaxException;
|
import java.net.URISyntaxException;
|
||||||
|
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||||
import org.apache.hadoop.yarn.api.records.URL;
|
import org.apache.hadoop.yarn.api.records.URL;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -35,4 +36,17 @@ public class TestConverterUtils {
|
||||||
assertEquals(expectedPath, actualPath);
|
assertEquals(expectedPath, actualPath);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testContainerId() throws URISyntaxException {
|
||||||
|
ContainerId id = BuilderUtils.newContainerId(0, 0, 0, 0);
|
||||||
|
String cid = ConverterUtils.toString(id);
|
||||||
|
assertEquals("container_0_0000_00_000000", cid);
|
||||||
|
ContainerId gen = ConverterUtils.toContainerId(cid);
|
||||||
|
assertEquals(gen, id);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testContainerIdNull() throws URISyntaxException {
|
||||||
|
assertNull(ConverterUtils.toString((ContainerId)null));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -371,8 +371,6 @@ public class ContainerLocalizer {
|
||||||
Path appFileCacheDir = new Path(appBase, FILECACHE);
|
Path appFileCacheDir = new Path(appBase, FILECACHE);
|
||||||
appsFileCacheDirs[i] = appFileCacheDir.toString();
|
appsFileCacheDirs[i] = appFileCacheDir.toString();
|
||||||
lfs.mkdir(appFileCacheDir, null, false);
|
lfs.mkdir(appFileCacheDir, null, false);
|
||||||
// $x/usercache/$user/appcache/$appId/output
|
|
||||||
lfs.mkdir(new Path(appBase, OUTPUTDIR), null, false);
|
|
||||||
}
|
}
|
||||||
conf.setStrings(String.format(APPCACHE_CTXT_FMT, appId), appsFileCacheDirs);
|
conf.setStrings(String.format(APPCACHE_CTXT_FMT, appId), appsFileCacheDirs);
|
||||||
conf.setStrings(String.format(USERCACHE_CTXT_FMT, appId), usersFileCacheDirs);
|
conf.setStrings(String.format(USERCACHE_CTXT_FMT, appId), usersFileCacheDirs);
|
||||||
|
|
|
@ -140,7 +140,7 @@ public class TestDefaultContainerExecutor {
|
||||||
// final String appId = "app_RM_0";
|
// final String appId = "app_RM_0";
|
||||||
// final Path logDir = new Path(basedir, "logs");
|
// final Path logDir = new Path(basedir, "logs");
|
||||||
// final Path nmLocal = new Path(basedir, "nmPrivate/" + user + "/" + appId);
|
// final Path nmLocal = new Path(basedir, "nmPrivate/" + user + "/" + appId);
|
||||||
// final InetSocketAddress nmAddr = new InetSocketAddress("foobar", 4344);
|
// final InetSocketAddress nmAddr = new InetSocketAddress("foobar", 8040);
|
||||||
// System.out.println("NMLOCAL: " + nmLocal);
|
// System.out.println("NMLOCAL: " + nmLocal);
|
||||||
// Random r = new Random();
|
// Random r = new Random();
|
||||||
//
|
//
|
||||||
|
|
|
@ -80,7 +80,7 @@ public class TestPBLocalizerRPC {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testLocalizerRPC() throws Exception {
|
public void testLocalizerRPC() throws Exception {
|
||||||
InetSocketAddress locAddr = new InetSocketAddress("0.0.0.0", 4344);
|
InetSocketAddress locAddr = new InetSocketAddress("0.0.0.0", 8040);
|
||||||
LocalizerService server = new LocalizerService(locAddr);
|
LocalizerService server = new LocalizerService(locAddr);
|
||||||
try {
|
try {
|
||||||
server.start();
|
server.start();
|
||||||
|
|
|
@ -89,7 +89,7 @@ public class TestContainerLocalizer {
|
||||||
final String user = "yak";
|
final String user = "yak";
|
||||||
final String appId = "app_RM_0";
|
final String appId = "app_RM_0";
|
||||||
final String cId = "container_0";
|
final String cId = "container_0";
|
||||||
final InetSocketAddress nmAddr = new InetSocketAddress("foobar", 4344);
|
final InetSocketAddress nmAddr = new InetSocketAddress("foobar", 8040);
|
||||||
final List<Path> localDirs = new ArrayList<Path>();
|
final List<Path> localDirs = new ArrayList<Path>();
|
||||||
for (int i = 0; i < 4; ++i) {
|
for (int i = 0; i < 4; ++i) {
|
||||||
localDirs.add(lfs.makeQualified(new Path(basedir, i + "")));
|
localDirs.add(lfs.makeQualified(new Path(basedir, i + "")));
|
||||||
|
@ -177,9 +177,6 @@ public class TestContainerLocalizer {
|
||||||
// $x/usercache/$user/appcache/$appId/filecache
|
// $x/usercache/$user/appcache/$appId/filecache
|
||||||
Path appcache = new Path(appDir, ContainerLocalizer.FILECACHE);
|
Path appcache = new Path(appDir, ContainerLocalizer.FILECACHE);
|
||||||
verify(spylfs).mkdir(eq(appcache), isA(FsPermission.class), eq(false));
|
verify(spylfs).mkdir(eq(appcache), isA(FsPermission.class), eq(false));
|
||||||
// $x/usercache/$user/appcache/$appId/output
|
|
||||||
Path appOutput = new Path(appDir, ContainerLocalizer.OUTPUTDIR);
|
|
||||||
verify(spylfs).mkdir(eq(appOutput), isA(FsPermission.class), eq(false));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// verify tokens read at expected location
|
// verify tokens read at expected location
|
||||||
|
|
|
@ -87,7 +87,7 @@ public class TestNMWebServices extends JerseyTest {
|
||||||
protected void configureServlets() {
|
protected void configureServlets() {
|
||||||
nmContext = new NodeManager.NMContext();
|
nmContext = new NodeManager.NMContext();
|
||||||
nmContext.getNodeId().setHost("testhost.foo.com");
|
nmContext.getNodeId().setHost("testhost.foo.com");
|
||||||
nmContext.getNodeId().setPort(9999);
|
nmContext.getNodeId().setPort(8042);
|
||||||
resourceView = new ResourceView() {
|
resourceView = new ResourceView() {
|
||||||
@Override
|
@Override
|
||||||
public long getVmemAllocatedForContainers() {
|
public long getVmemAllocatedForContainers() {
|
||||||
|
@ -330,7 +330,7 @@ public class TestNMWebServices extends JerseyTest {
|
||||||
String hadoopVersion, String resourceManagerVersionBuiltOn,
|
String hadoopVersion, String resourceManagerVersionBuiltOn,
|
||||||
String resourceManagerBuildVersion, String resourceManagerVersion) {
|
String resourceManagerBuildVersion, String resourceManagerVersion) {
|
||||||
|
|
||||||
WebServicesTestUtils.checkStringMatch("id", "testhost.foo.com:9999", id);
|
WebServicesTestUtils.checkStringMatch("id", "testhost.foo.com:8042", id);
|
||||||
WebServicesTestUtils.checkStringMatch("healthReport", "Healthy",
|
WebServicesTestUtils.checkStringMatch("healthReport", "Healthy",
|
||||||
healthReport);
|
healthReport);
|
||||||
assertEquals("totalVmemAllocatedContainersMB incorrect", 15872,
|
assertEquals("totalVmemAllocatedContainersMB incorrect", 15872,
|
||||||
|
|
|
@ -93,7 +93,7 @@ public class TestNMWebServicesContainers extends JerseyTest {
|
||||||
protected void configureServlets() {
|
protected void configureServlets() {
|
||||||
nmContext = new NodeManager.NMContext();
|
nmContext = new NodeManager.NMContext();
|
||||||
nmContext.getNodeId().setHost("testhost.foo.com");
|
nmContext.getNodeId().setHost("testhost.foo.com");
|
||||||
nmContext.getNodeId().setPort(9999);
|
nmContext.getNodeId().setPort(8042);
|
||||||
resourceView = new ResourceView() {
|
resourceView = new ResourceView() {
|
||||||
@Override
|
@Override
|
||||||
public long getVmemAllocatedForContainers() {
|
public long getVmemAllocatedForContainers() {
|
||||||
|
|
|
@ -91,10 +91,16 @@ extends org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue {
|
||||||
public float getAbsoluteMaximumCapacity();
|
public float getAbsoluteMaximumCapacity();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the currently utilized capacity of the queue
|
* Get the current absolute used capacity of the queue
|
||||||
* relative to it's parent queue.
|
* relative to the entire cluster.
|
||||||
* @return the currently utilized capacity of the queue
|
* @return queue absolute used capacity
|
||||||
* relative to it's parent queue
|
*/
|
||||||
|
public float getAbsoluteUsedCapacity();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the current used capacity of the queue
|
||||||
|
* and it's children (if any).
|
||||||
|
* @return queue used capacity
|
||||||
*/
|
*/
|
||||||
public float getUsedCapacity();
|
public float getUsedCapacity();
|
||||||
|
|
||||||
|
@ -104,6 +110,12 @@ extends org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue {
|
||||||
*/
|
*/
|
||||||
public void setUsedCapacity(float usedCapacity);
|
public void setUsedCapacity(float usedCapacity);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set absolute used capacity of the queue.
|
||||||
|
* @param absUsedCapacity absolute used capacity of the queue
|
||||||
|
*/
|
||||||
|
public void setAbsoluteUsedCapacity(float absUsedCapacity);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the currently utilized resources in the cluster
|
* Get the currently utilized resources in the cluster
|
||||||
* by the queue and children (if any).
|
* by the queue and children (if any).
|
||||||
|
@ -111,21 +123,6 @@ extends org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue {
|
||||||
*/
|
*/
|
||||||
public Resource getUsedResources();
|
public Resource getUsedResources();
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the current <em>utilization</em> of the queue
|
|
||||||
* and it's children (if any).
|
|
||||||
* Utilization is defined as the ratio of
|
|
||||||
* <em>used-capacity over configured-capacity</em> of the queue.
|
|
||||||
* @return queue utilization
|
|
||||||
*/
|
|
||||||
public float getUtilization();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the current <em>utilization</em> of the queue.
|
|
||||||
* @param utilization queue utilization
|
|
||||||
*/
|
|
||||||
public void setUtilization(float utilization);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the current run-state of the queue
|
* Get the current run-state of the queue
|
||||||
* @return current run-state
|
* @return current run-state
|
||||||
|
|
|
@ -23,21 +23,25 @@ import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
|
||||||
|
|
||||||
class CSQueueUtils {
|
class CSQueueUtils {
|
||||||
|
|
||||||
|
final static float EPSILON = 0.0001f;
|
||||||
|
|
||||||
public static void checkMaxCapacity(String queueName,
|
public static void checkMaxCapacity(String queueName,
|
||||||
float capacity, float maximumCapacity) {
|
float capacity, float maximumCapacity) {
|
||||||
if (maximumCapacity < 0.0f || maximumCapacity > 1.0f ||
|
if (maximumCapacity < 0.0f || maximumCapacity > 1.0f) {
|
||||||
maximumCapacity < capacity) {
|
|
||||||
throw new IllegalArgumentException(
|
throw new IllegalArgumentException(
|
||||||
"Illegal value of maximumCapacity " + maximumCapacity +
|
"Illegal value of maximumCapacity " + maximumCapacity +
|
||||||
" used in call to setMaxCapacity for queue " + queueName);
|
" used in call to setMaxCapacity for queue " + queueName);
|
||||||
}
|
}
|
||||||
if (maximumCapacity < capacity) {
|
|
||||||
throw new IllegalArgumentException(
|
|
||||||
"Illegal call to setMaxCapacity. " +
|
|
||||||
"Queue '" + queueName + "' has " +
|
|
||||||
"capacity (" + capacity + ") greater than " +
|
|
||||||
"maximumCapacity (" + maximumCapacity + ")" );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static void checkAbsoluteCapacities(String queueName,
|
||||||
|
float absCapacity, float absMaxCapacity) {
|
||||||
|
if (absMaxCapacity < (absCapacity - EPSILON)) {
|
||||||
|
throw new IllegalArgumentException("Illegal call to setMaxCapacity. "
|
||||||
|
+ "Queue '" + queueName + "' has " + "an absolute capacity (" + absCapacity
|
||||||
|
+ ") greater than " + "its absolute maximumCapacity (" + absMaxCapacity
|
||||||
|
+ ")");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static float computeAbsoluteMaximumCapacity(
|
public static float computeAbsoluteMaximumCapacity(
|
||||||
|
@ -75,18 +79,16 @@ class CSQueueUtils {
|
||||||
final int usedMemory = childQueue.getUsedResources().getMemory();
|
final int usedMemory = childQueue.getUsedResources().getMemory();
|
||||||
|
|
||||||
float queueLimit = 0.0f;
|
float queueLimit = 0.0f;
|
||||||
float utilization = 0.0f;
|
float absoluteUsedCapacity = 0.0f;
|
||||||
float usedCapacity = 0.0f;
|
float usedCapacity = 0.0f;
|
||||||
if (clusterMemory > 0) {
|
if (clusterMemory > 0) {
|
||||||
queueLimit = clusterMemory * childQueue.getAbsoluteCapacity();
|
queueLimit = clusterMemory * childQueue.getAbsoluteCapacity();
|
||||||
final float parentAbsoluteCapacity =
|
absoluteUsedCapacity = ((float)usedMemory / (float)clusterMemory);
|
||||||
(parentQueue == null) ? 1.0f : parentQueue.getAbsoluteCapacity();
|
usedCapacity = (usedMemory / queueLimit);
|
||||||
utilization = (usedMemory / queueLimit);
|
|
||||||
usedCapacity = (usedMemory / (clusterMemory * parentAbsoluteCapacity));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
childQueue.setUtilization(utilization);
|
|
||||||
childQueue.setUsedCapacity(usedCapacity);
|
childQueue.setUsedCapacity(usedCapacity);
|
||||||
|
childQueue.setAbsoluteUsedCapacity(absoluteUsedCapacity);
|
||||||
|
|
||||||
int available =
|
int available =
|
||||||
Math.max((roundUp(minimumAllocation, (int)queueLimit) - usedMemory), 0);
|
Math.max((roundUp(minimumAllocation, (int)queueLimit) - usedMemory), 0);
|
||||||
|
|
|
@ -91,9 +91,9 @@ implements ResourceScheduler, CapacitySchedulerContext {
|
||||||
static final Comparator<CSQueue> queueComparator = new Comparator<CSQueue>() {
|
static final Comparator<CSQueue> queueComparator = new Comparator<CSQueue>() {
|
||||||
@Override
|
@Override
|
||||||
public int compare(CSQueue q1, CSQueue q2) {
|
public int compare(CSQueue q1, CSQueue q2) {
|
||||||
if (q1.getUtilization() < q2.getUtilization()) {
|
if (q1.getUsedCapacity() < q2.getUsedCapacity()) {
|
||||||
return -1;
|
return -1;
|
||||||
} else if (q1.getUtilization() > q2.getUtilization()) {
|
} else if (q1.getUsedCapacity() > q2.getUsedCapacity()) {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -80,6 +80,7 @@ public class LeafQueue implements CSQueue {
|
||||||
private float absoluteCapacity;
|
private float absoluteCapacity;
|
||||||
private float maximumCapacity;
|
private float maximumCapacity;
|
||||||
private float absoluteMaxCapacity;
|
private float absoluteMaxCapacity;
|
||||||
|
private float absoluteUsedCapacity = 0.0f;
|
||||||
private int userLimit;
|
private int userLimit;
|
||||||
private float userLimitFactor;
|
private float userLimitFactor;
|
||||||
|
|
||||||
|
@ -91,7 +92,6 @@ public class LeafQueue implements CSQueue {
|
||||||
private int maxActiveApplicationsPerUser;
|
private int maxActiveApplicationsPerUser;
|
||||||
|
|
||||||
private Resource usedResources = Resources.createResource(0);
|
private Resource usedResources = Resources.createResource(0);
|
||||||
private float utilization = 0.0f;
|
|
||||||
private float usedCapacity = 0.0f;
|
private float usedCapacity = 0.0f;
|
||||||
private volatile int numContainers;
|
private volatile int numContainers;
|
||||||
|
|
||||||
|
@ -210,9 +210,11 @@ public class LeafQueue implements CSQueue {
|
||||||
{
|
{
|
||||||
// Sanity check
|
// Sanity check
|
||||||
CSQueueUtils.checkMaxCapacity(getQueueName(), capacity, maximumCapacity);
|
CSQueueUtils.checkMaxCapacity(getQueueName(), capacity, maximumCapacity);
|
||||||
|
float absCapacity = parent.getAbsoluteCapacity() * capacity;
|
||||||
|
CSQueueUtils.checkAbsoluteCapacities(getQueueName(), absCapacity, absoluteMaxCapacity);
|
||||||
|
|
||||||
this.capacity = capacity;
|
this.capacity = capacity;
|
||||||
this.absoluteCapacity = parent.getAbsoluteCapacity() * capacity;
|
this.absoluteCapacity = absCapacity;
|
||||||
|
|
||||||
this.maximumCapacity = maximumCapacity;
|
this.maximumCapacity = maximumCapacity;
|
||||||
this.absoluteMaxCapacity = absoluteMaxCapacity;
|
this.absoluteMaxCapacity = absoluteMaxCapacity;
|
||||||
|
@ -274,12 +276,11 @@ public class LeafQueue implements CSQueue {
|
||||||
"(int)(maxActiveApplications * (userLimit / 100.0f) * " +
|
"(int)(maxActiveApplications * (userLimit / 100.0f) * " +
|
||||||
"userLimitFactor)," +
|
"userLimitFactor)," +
|
||||||
"1) ]" + "\n" +
|
"1) ]" + "\n" +
|
||||||
"utilization = " + utilization +
|
|
||||||
" [= usedResourcesMemory / " +
|
|
||||||
"(clusterResourceMemory * absoluteCapacity)]" + "\n" +
|
|
||||||
"usedCapacity = " + usedCapacity +
|
"usedCapacity = " + usedCapacity +
|
||||||
" [= usedResourcesMemory / " +
|
" [= usedResourcesMemory / " +
|
||||||
"(clusterResourceMemory * parent.absoluteCapacity)]" + "\n" +
|
"(clusterResourceMemory * absoluteCapacity)]" + "\n" +
|
||||||
|
"absoluteUsedCapacity = " + absoluteUsedCapacity +
|
||||||
|
" [= usedResourcesMemory / clusterResourceMemory]" + "\n" +
|
||||||
"maxAMResourcePercent = " + maxAMResourcePercent +
|
"maxAMResourcePercent = " + maxAMResourcePercent +
|
||||||
" [= configuredMaximumAMResourcePercent ]" + "\n" +
|
" [= configuredMaximumAMResourcePercent ]" + "\n" +
|
||||||
"minimumAllocationFactor = " + minimumAllocationFactor +
|
"minimumAllocationFactor = " + minimumAllocationFactor +
|
||||||
|
@ -313,6 +314,11 @@ public class LeafQueue implements CSQueue {
|
||||||
return absoluteMaxCapacity;
|
return absoluteMaxCapacity;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized float getAbsoluteUsedCapacity() {
|
||||||
|
return absoluteUsedCapacity;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public CSQueue getParent() {
|
public CSQueue getParent() {
|
||||||
return parent;
|
return parent;
|
||||||
|
@ -383,24 +389,21 @@ public class LeafQueue implements CSQueue {
|
||||||
return usedResources;
|
return usedResources;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public synchronized float getUtilization() {
|
|
||||||
return utilization;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public List<CSQueue> getChildQueues() {
|
public List<CSQueue> getChildQueues() {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized void setUtilization(float utilization) {
|
@Override
|
||||||
this.utilization = utilization;
|
|
||||||
}
|
|
||||||
|
|
||||||
public synchronized void setUsedCapacity(float usedCapacity) {
|
public synchronized void setUsedCapacity(float usedCapacity) {
|
||||||
this.usedCapacity = usedCapacity;
|
this.usedCapacity = usedCapacity;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized void setAbsoluteUsedCapacity(float absUsedCapacity) {
|
||||||
|
this.absoluteUsedCapacity = absUsedCapacity;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set maximum capacity - used only for testing.
|
* Set maximum capacity - used only for testing.
|
||||||
* @param maximumCapacity new max capacity
|
* @param maximumCapacity new max capacity
|
||||||
|
@ -408,10 +411,11 @@ public class LeafQueue implements CSQueue {
|
||||||
synchronized void setMaxCapacity(float maximumCapacity) {
|
synchronized void setMaxCapacity(float maximumCapacity) {
|
||||||
// Sanity check
|
// Sanity check
|
||||||
CSQueueUtils.checkMaxCapacity(getQueueName(), capacity, maximumCapacity);
|
CSQueueUtils.checkMaxCapacity(getQueueName(), capacity, maximumCapacity);
|
||||||
|
float absMaxCapacity = CSQueueUtils.computeAbsoluteMaximumCapacity(maximumCapacity, parent);
|
||||||
|
CSQueueUtils.checkAbsoluteCapacities(getQueueName(), absoluteCapacity, absMaxCapacity);
|
||||||
|
|
||||||
this.maximumCapacity = maximumCapacity;
|
this.maximumCapacity = maximumCapacity;
|
||||||
this.absoluteMaxCapacity =
|
this.absoluteMaxCapacity = absMaxCapacity;
|
||||||
CSQueueUtils.computeAbsoluteMaximumCapacity(maximumCapacity, parent);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -516,7 +520,7 @@ public class LeafQueue implements CSQueue {
|
||||||
"absoluteCapacity=" + absoluteCapacity + ", " +
|
"absoluteCapacity=" + absoluteCapacity + ", " +
|
||||||
"usedResources=" + usedResources.getMemory() + "MB, " +
|
"usedResources=" + usedResources.getMemory() + "MB, " +
|
||||||
"usedCapacity=" + getUsedCapacity() + ", " +
|
"usedCapacity=" + getUsedCapacity() + ", " +
|
||||||
"utilization=" + getUtilization() + ", " +
|
"absoluteUsedCapacity=" + getAbsoluteUsedCapacity() + ", " +
|
||||||
"numApps=" + getNumApplications() + ", " +
|
"numApps=" + getNumApplications() + ", " +
|
||||||
"numContainers=" + getNumContainers();
|
"numContainers=" + getNumContainers();
|
||||||
}
|
}
|
||||||
|
@ -1228,7 +1232,8 @@ public class LeafQueue implements CSQueue {
|
||||||
" container=" + container +
|
" container=" + container +
|
||||||
" containerId=" + container.getId() +
|
" containerId=" + container.getId() +
|
||||||
" queue=" + this +
|
" queue=" + this +
|
||||||
" util=" + getUtilization() +
|
" usedCapacity=" + getUsedCapacity() +
|
||||||
|
" absoluteUsedCapacity=" + getAbsoluteUsedCapacity() +
|
||||||
" used=" + usedResources +
|
" used=" + usedResources +
|
||||||
" cluster=" + clusterResource);
|
" cluster=" + clusterResource);
|
||||||
|
|
||||||
|
@ -1241,7 +1246,8 @@ public class LeafQueue implements CSQueue {
|
||||||
" application=" + application.getApplicationId() +
|
" application=" + application.getApplicationId() +
|
||||||
" resource=" + request.getCapability() +
|
" resource=" + request.getCapability() +
|
||||||
" queue=" + this.toString() +
|
" queue=" + this.toString() +
|
||||||
" util=" + getUtilization() +
|
" usedCapacity=" + getUsedCapacity() +
|
||||||
|
" absoluteUsedCapacity=" + getAbsoluteUsedCapacity() +
|
||||||
" used=" + usedResources +
|
" used=" + usedResources +
|
||||||
" cluster=" + clusterResource);
|
" cluster=" + clusterResource);
|
||||||
|
|
||||||
|
@ -1307,7 +1313,8 @@ public class LeafQueue implements CSQueue {
|
||||||
" container=" + container +
|
" container=" + container +
|
||||||
" resource=" + container.getResource() +
|
" resource=" + container.getResource() +
|
||||||
" queue=" + this +
|
" queue=" + this +
|
||||||
" util=" + getUtilization() +
|
" usedCapacity=" + getUsedCapacity() +
|
||||||
|
" absoluteUsedCapacity=" + getAbsoluteUsedCapacity() +
|
||||||
" used=" + usedResources +
|
" used=" + usedResources +
|
||||||
" cluster=" + clusterResource);
|
" cluster=" + clusterResource);
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,9 +67,9 @@ public class ParentQueue implements CSQueue {
|
||||||
private float maximumCapacity;
|
private float maximumCapacity;
|
||||||
private float absoluteCapacity;
|
private float absoluteCapacity;
|
||||||
private float absoluteMaxCapacity;
|
private float absoluteMaxCapacity;
|
||||||
|
private float absoluteUsedCapacity = 0.0f;
|
||||||
|
|
||||||
private float usedCapacity = 0.0f;
|
private float usedCapacity = 0.0f;
|
||||||
private float utilization = 0.0f;
|
|
||||||
|
|
||||||
private final Set<CSQueue> childQueues;
|
private final Set<CSQueue> childQueues;
|
||||||
private final Comparator<CSQueue> queueComparator;
|
private final Comparator<CSQueue> queueComparator;
|
||||||
|
@ -158,9 +158,11 @@ public class ParentQueue implements CSQueue {
|
||||||
) {
|
) {
|
||||||
// Sanity check
|
// Sanity check
|
||||||
CSQueueUtils.checkMaxCapacity(getQueueName(), capacity, maximumCapacity);
|
CSQueueUtils.checkMaxCapacity(getQueueName(), capacity, maximumCapacity);
|
||||||
|
CSQueueUtils.checkAbsoluteCapacities(getQueueName(), absoluteCapacity, absoluteMaxCapacity);
|
||||||
|
|
||||||
this.capacity = capacity;
|
this.capacity = capacity;
|
||||||
this.absoluteCapacity = absoluteCapacity;
|
this.absoluteCapacity = absoluteCapacity;
|
||||||
|
|
||||||
this.maximumCapacity = maximumCapacity;
|
this.maximumCapacity = maximumCapacity;
|
||||||
this.absoluteMaxCapacity = absoluteMaxCapacity;
|
this.absoluteMaxCapacity = absoluteMaxCapacity;
|
||||||
|
|
||||||
|
@ -243,6 +245,11 @@ public class ParentQueue implements CSQueue {
|
||||||
return absoluteMaxCapacity;
|
return absoluteMaxCapacity;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized float getAbsoluteUsedCapacity() {
|
||||||
|
return absoluteUsedCapacity;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public float getMaximumCapacity() {
|
public float getMaximumCapacity() {
|
||||||
return maximumCapacity;
|
return maximumCapacity;
|
||||||
|
@ -264,11 +271,6 @@ public class ParentQueue implements CSQueue {
|
||||||
return usedResources;
|
return usedResources;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public synchronized float getUtilization() {
|
|
||||||
return utilization;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public synchronized List<CSQueue> getChildQueues() {
|
public synchronized List<CSQueue> getChildQueues() {
|
||||||
return new ArrayList<CSQueue>(childQueues);
|
return new ArrayList<CSQueue>(childQueues);
|
||||||
|
@ -351,7 +353,6 @@ public class ParentQueue implements CSQueue {
|
||||||
"absoluteCapacity=" + absoluteCapacity + ", " +
|
"absoluteCapacity=" + absoluteCapacity + ", " +
|
||||||
"usedResources=" + usedResources.getMemory() + "MB, " +
|
"usedResources=" + usedResources.getMemory() + "MB, " +
|
||||||
"usedCapacity=" + getUsedCapacity() + ", " +
|
"usedCapacity=" + getUsedCapacity() + ", " +
|
||||||
"utilization=" + getUtilization() + ", " +
|
|
||||||
"numApps=" + getNumApplications() + ", " +
|
"numApps=" + getNumApplications() + ", " +
|
||||||
"numContainers=" + getNumContainers();
|
"numContainers=" + getNumContainers();
|
||||||
}
|
}
|
||||||
|
@ -490,12 +491,14 @@ public class ParentQueue implements CSQueue {
|
||||||
" #applications: " + getNumApplications());
|
" #applications: " + getNumApplications());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public synchronized void setUsedCapacity(float usedCapacity) {
|
public synchronized void setUsedCapacity(float usedCapacity) {
|
||||||
this.usedCapacity = usedCapacity;
|
this.usedCapacity = usedCapacity;
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized void setUtilization(float utilization) {
|
@Override
|
||||||
this.utilization = utilization;
|
public synchronized void setAbsoluteUsedCapacity(float absUsedCapacity) {
|
||||||
|
this.absoluteUsedCapacity = absUsedCapacity;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -505,10 +508,11 @@ public class ParentQueue implements CSQueue {
|
||||||
synchronized void setMaxCapacity(float maximumCapacity) {
|
synchronized void setMaxCapacity(float maximumCapacity) {
|
||||||
// Sanity check
|
// Sanity check
|
||||||
CSQueueUtils.checkMaxCapacity(getQueueName(), capacity, maximumCapacity);
|
CSQueueUtils.checkMaxCapacity(getQueueName(), capacity, maximumCapacity);
|
||||||
|
float absMaxCapacity = CSQueueUtils.computeAbsoluteMaximumCapacity(maximumCapacity, parent);
|
||||||
|
CSQueueUtils.checkAbsoluteCapacities(getQueueName(), absoluteCapacity, absMaxCapacity);
|
||||||
|
|
||||||
this.maximumCapacity = maximumCapacity;
|
this.maximumCapacity = maximumCapacity;
|
||||||
this.absoluteMaxCapacity =
|
this.absoluteMaxCapacity = absMaxCapacity;
|
||||||
CSQueueUtils.computeAbsoluteMaximumCapacity(maximumCapacity, parent);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -545,7 +549,8 @@ public class ParentQueue implements CSQueue {
|
||||||
|
|
||||||
LOG.info("assignedContainer" +
|
LOG.info("assignedContainer" +
|
||||||
" queue=" + getQueueName() +
|
" queue=" + getQueueName() +
|
||||||
" util=" + getUtilization() +
|
" usedCapacity=" + getUsedCapacity() +
|
||||||
|
" absoluteUsedCapacity=" + getAbsoluteUsedCapacity() +
|
||||||
" used=" + usedResources +
|
" used=" + usedResources +
|
||||||
" cluster=" + clusterResource);
|
" cluster=" + clusterResource);
|
||||||
|
|
||||||
|
@ -556,7 +561,8 @@ public class ParentQueue implements CSQueue {
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("ParentQ=" + getQueueName()
|
LOG.debug("ParentQ=" + getQueueName()
|
||||||
+ " assignedSoFarInThisIteration=" + assignment.getResource()
|
+ " assignedSoFarInThisIteration=" + assignment.getResource()
|
||||||
+ " utilization=" + getUtilization());
|
+ " usedCapacity=" + getUsedCapacity()
|
||||||
|
+ " absoluteUsedCapacity=" + getAbsoluteUsedCapacity());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do not assign more than one container if this isn't the root queue
|
// Do not assign more than one container if this isn't the root queue
|
||||||
|
@ -639,7 +645,7 @@ public class ParentQueue implements CSQueue {
|
||||||
String getChildQueuesToPrint() {
|
String getChildQueuesToPrint() {
|
||||||
StringBuilder sb = new StringBuilder();
|
StringBuilder sb = new StringBuilder();
|
||||||
for (CSQueue q : childQueues) {
|
for (CSQueue q : childQueues) {
|
||||||
sb.append(q.getQueuePath() + "(" + q.getUtilization() + "), ");
|
sb.append(q.getQueuePath() + "(" + q.getUsedCapacity() + "), ");
|
||||||
}
|
}
|
||||||
return sb.toString();
|
return sb.toString();
|
||||||
}
|
}
|
||||||
|
@ -663,7 +669,8 @@ public class ParentQueue implements CSQueue {
|
||||||
|
|
||||||
LOG.info("completedContainer" +
|
LOG.info("completedContainer" +
|
||||||
" queue=" + getQueueName() +
|
" queue=" + getQueueName() +
|
||||||
" util=" + getUtilization() +
|
" usedCapacity=" + getUsedCapacity() +
|
||||||
|
" absoluteUsedCapacity=" + getAbsoluteUsedCapacity() +
|
||||||
" used=" + usedResources +
|
" used=" + usedResources +
|
||||||
" cluster=" + clusterResource);
|
" cluster=" + clusterResource);
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,12 +67,9 @@ class CapacitySchedulerPage extends RmView {
|
||||||
protected void render(Block html) {
|
protected void render(Block html) {
|
||||||
ResponseInfo ri = info("\'" + lqinfo.getQueuePath().substring(5) + "\' Queue Status").
|
ResponseInfo ri = info("\'" + lqinfo.getQueuePath().substring(5) + "\' Queue Status").
|
||||||
_("Queue State:", lqinfo.getQueueState()).
|
_("Queue State:", lqinfo.getQueueState()).
|
||||||
_("Capacity:", percent(lqinfo.getCapacity() / 100)).
|
|
||||||
_("Max Capacity:", percent(lqinfo.getMaxCapacity() / 100)).
|
|
||||||
_("Used Capacity:", percent(lqinfo.getUsedCapacity() / 100)).
|
_("Used Capacity:", percent(lqinfo.getUsedCapacity() / 100)).
|
||||||
_("Absolute Capacity:", percent(lqinfo.getAbsoluteCapacity() / 100)).
|
_("Absolute Capacity:", percent(lqinfo.getAbsoluteCapacity() / 100)).
|
||||||
_("Absolute Max Capacity:", percent(lqinfo.getAbsoluteMaxCapacity() / 100)).
|
_("Absolute Max Capacity:", percent(lqinfo.getAbsoluteMaxCapacity() / 100)).
|
||||||
_("Utilization:", percent(lqinfo.getUtilization() / 100)).
|
|
||||||
_("Used Resources:", lqinfo.getUsedResources().toString()).
|
_("Used Resources:", lqinfo.getUsedResources().toString()).
|
||||||
_("Num Active Applications:", Integer.toString(lqinfo.getNumActiveApplications())).
|
_("Num Active Applications:", Integer.toString(lqinfo.getNumActiveApplications())).
|
||||||
_("Num Pending Applications:", Integer.toString(lqinfo.getNumPendingApplications())).
|
_("Num Pending Applications:", Integer.toString(lqinfo.getNumPendingApplications())).
|
||||||
|
@ -81,8 +78,10 @@ class CapacitySchedulerPage extends RmView {
|
||||||
_("Max Applications Per User:", Integer.toString(lqinfo.getMaxApplicationsPerUser())).
|
_("Max Applications Per User:", Integer.toString(lqinfo.getMaxApplicationsPerUser())).
|
||||||
_("Max Active Applications:", Integer.toString(lqinfo.getMaxActiveApplications())).
|
_("Max Active Applications:", Integer.toString(lqinfo.getMaxActiveApplications())).
|
||||||
_("Max Active Applications Per User:", Integer.toString(lqinfo.getMaxActiveApplicationsPerUser())).
|
_("Max Active Applications Per User:", Integer.toString(lqinfo.getMaxActiveApplicationsPerUser())).
|
||||||
_("User Limit:", Integer.toString(lqinfo.getUserLimit()) + "%").
|
_("Configured Capacity:", percent(lqinfo.getCapacity() / 100)).
|
||||||
_("User Limit Factor:", String.format("%.1f", lqinfo.getUserLimitFactor()));
|
_("Configured Max Capacity:", percent(lqinfo.getMaxCapacity() / 100)).
|
||||||
|
_("Configured Minimum User Limit Percent:", Integer.toString(lqinfo.getUserLimit()) + "%").
|
||||||
|
_("Configured User Limit Factor:", String.format("%.1f", lqinfo.getUserLimitFactor()));
|
||||||
|
|
||||||
html._(InfoBlock.class);
|
html._(InfoBlock.class);
|
||||||
|
|
||||||
|
@ -103,20 +102,20 @@ class CapacitySchedulerPage extends RmView {
|
||||||
ArrayList<CapacitySchedulerQueueInfo> subQueues =
|
ArrayList<CapacitySchedulerQueueInfo> subQueues =
|
||||||
(csqinfo.qinfo == null) ? csqinfo.csinfo.getSubQueues()
|
(csqinfo.qinfo == null) ? csqinfo.csinfo.getSubQueues()
|
||||||
: csqinfo.qinfo.getSubQueues();
|
: csqinfo.qinfo.getSubQueues();
|
||||||
UL<Hamlet> ul = html.ul();
|
UL<Hamlet> ul = html.ul("#pq");
|
||||||
for (CapacitySchedulerQueueInfo info : subQueues) {
|
for (CapacitySchedulerQueueInfo info : subQueues) {
|
||||||
float used = info.getUsedCapacity() / 100;
|
float used = info.getUsedCapacity() / 100;
|
||||||
float set = info.getCapacity() / 100;
|
float absCap = info.getAbsoluteCapacity() / 100;
|
||||||
float max = info.getMaxCapacity() / 100;
|
float absMaxCap = info.getAbsoluteMaxCapacity() / 100;
|
||||||
|
float absUsedCap = info.getAbsoluteUsedCapacity() / 100;
|
||||||
LI<UL<Hamlet>> li = ul.
|
LI<UL<Hamlet>> li = ul.
|
||||||
li().
|
li().
|
||||||
a(_Q).$style(width(max * Q_MAX_WIDTH)).
|
a(_Q).$style(width(absMaxCap * Q_MAX_WIDTH)).
|
||||||
$title(join("capacity:", percent(set), " used:", percent(used),
|
$title(join("Absolute Capacity:", percent(absCap))).
|
||||||
" max capacity:", percent(max))).
|
span().$style(join(Q_GIVEN, ";font-size:1px;", width(absCap/absMaxCap))).
|
||||||
span().$style(join(Q_GIVEN, ";font-size:1px;", width(set/max))).
|
|
||||||
_('.')._().
|
_('.')._().
|
||||||
span().$style(join(width(used*set/max),
|
span().$style(join(width(absUsedCap/absMaxCap),
|
||||||
";font-size:1px;left:0%;", used > 1 ? Q_OVER : Q_UNDER)).
|
";font-size:1px;left:0%;", absUsedCap > absCap ? Q_OVER : Q_UNDER)).
|
||||||
_('.')._().
|
_('.')._().
|
||||||
span(".q", info.getQueuePath().substring(5))._().
|
span(".q", info.getQueuePath().substring(5))._().
|
||||||
span().$class("qstats").$style(left(Q_STATS_POS)).
|
span().$class("qstats").$style(left(Q_STATS_POS)).
|
||||||
|
@ -180,7 +179,6 @@ class CapacitySchedulerPage extends RmView {
|
||||||
_().
|
_().
|
||||||
li().
|
li().
|
||||||
a(_Q).$style(width(Q_MAX_WIDTH)).
|
a(_Q).$style(width(Q_MAX_WIDTH)).
|
||||||
$title(join("used:", percent(used))).
|
|
||||||
span().$style(join(width(used), ";left:0%;",
|
span().$style(join(width(used), ";left:0%;",
|
||||||
used > 1 ? Q_OVER : Q_UNDER))._(".")._().
|
used > 1 ? Q_OVER : Q_UNDER))._(".")._().
|
||||||
span(".q", "root")._().
|
span(".q", "root")._().
|
||||||
|
@ -211,8 +209,7 @@ class CapacitySchedulerPage extends RmView {
|
||||||
_("$(function() {",
|
_("$(function() {",
|
||||||
" $('#cs a span').addClass('ui-corner-all').css('position', 'absolute');",
|
" $('#cs a span').addClass('ui-corner-all').css('position', 'absolute');",
|
||||||
" $('#cs').bind('loaded.jstree', function (e, data) {",
|
" $('#cs').bind('loaded.jstree', function (e, data) {",
|
||||||
" data.inst.open_all();",
|
" data.inst.open_node('#pq', true);",
|
||||||
" data.inst.close_node('#lq', true);",
|
|
||||||
" }).",
|
" }).",
|
||||||
" jstree({",
|
" jstree({",
|
||||||
" core: { animation: 188, html_titles: true },",
|
" core: { animation: 188, html_titles: true },",
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue