diff --git a/.gitignore b/.gitignore index 13b29ff20a3..db58f6af6a5 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,7 @@ .project .settings target +build hadoop-common-project/hadoop-kms/downloads/ hadoop-hdfs-project/hadoop-hdfs/downloads hadoop-hdfs-project/hadoop-hdfs-httpfs/downloads diff --git a/hadoop-common-project/hadoop-common/LICENSE.txt b/LICENSE.txt similarity index 100% rename from hadoop-common-project/hadoop-common/LICENSE.txt rename to LICENSE.txt diff --git a/hadoop-common-project/hadoop-common/NOTICE.txt b/NOTICE.txt similarity index 100% rename from hadoop-common-project/hadoop-common/NOTICE.txt rename to NOTICE.txt diff --git a/hadoop-common-project/hadoop-common/README.txt b/README.txt similarity index 100% rename from hadoop-common-project/hadoop-common/README.txt rename to README.txt diff --git a/dev-support/create-release.sh b/dev-support/create-release.sh index 3976a789ac4..447fbdd6cc6 100755 --- a/dev-support/create-release.sh +++ b/dev-support/create-release.sh @@ -70,8 +70,10 @@ fi ARTIFACTS_DIR="target/artifacts" -# Create staging dir for release artifacts +# mvn clean for sanity +run ${MVN} clean +# Create staging dir for release artifacts run mkdir -p ${ARTIFACTS_DIR} # Create RAT report @@ -80,10 +82,17 @@ run ${MVN} apache-rat:check # Create SRC and BIN tarballs for release, # Using 'install’ goal instead of 'package' so artifacts are available # in the Maven local cache for the site generation -run ${MVN} install -Pdist,docs,src,native -DskipTests -Dtar +run ${MVN} install -Pdist,src,native -DskipTests -Dtar # Create site for release run ${MVN} site site:stage -Pdist -Psrc +run mkdir -p target/staging/hadoop-project/hadoop-project-dist/hadoop-yarn +run mkdir -p target/staging/hadoop-project/hadoop-project-dist/hadoop-mapreduce +run cp ./hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html target/staging/hadoop-project/hadoop-project-dist/hadoop-common/ +run cp ./hadoop-common-project/hadoop-common/CHANGES.txt target/staging/hadoop-project/hadoop-project-dist/hadoop-common/ +run cp ./hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt target/staging/hadoop-project/hadoop-project-dist/hadoop-hdfs/ +run cp ./hadoop-yarn-project/CHANGES.txt target/staging/hadoop-project/hadoop-project-dist/hadoop-yarn/ +run cp ./hadoop-mapreduce-project/CHANGES.txt target/staging/hadoop-project/hadoop-project-dist/hadoop-mapreduce/ run mv target/staging/hadoop-project target/r${HADOOP_VERSION}/ run cd target/ run tar czf hadoop-site-${HADOOP_VERSION}.tar.gz r${HADOOP_VERSION}/* @@ -94,14 +103,19 @@ find . -name rat.txt | xargs -I% cat % > ${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSIO # Stage CHANGES.txt files run cp ./hadoop-common-project/hadoop-common/CHANGES.txt ${ARTIFACTS_DIR}/CHANGES-COMMON-${HADOOP_VERSION}${RC_LABEL}.txt -run cp ./hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ${ARTIFACTS_DIR}/CHANGES-HDFS--${HADOOP_VERSION}${RC_LABEL}.txt +run cp ./hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ${ARTIFACTS_DIR}/CHANGES-HDFS-${HADOOP_VERSION}${RC_LABEL}.txt run cp ./hadoop-mapreduce-project/CHANGES.txt ${ARTIFACTS_DIR}/CHANGES-MAPREDUCE-${HADOOP_VERSION}${RC_LABEL}.txt run cp ./hadoop-yarn-project/CHANGES.txt ${ARTIFACTS_DIR}/CHANGES-YARN-${HADOOP_VERSION}${RC_LABEL}.txt -# Stage BIN tarball +# Prepare and stage BIN tarball +run cd hadoop-dist/target/ +run tar -xzf hadoop-${HADOOP_VERSION}.tar.gz +run cp -r ../../target/r${HADOOP_VERSION}/* hadoop-${HADOOP_VERSION}/share/doc/hadoop/ +run tar -czf hadoop-${HADOOP_VERSION}.tar.gz hadoop-${HADOOP_VERSION} +run cd ../.. run mv hadoop-dist/target/hadoop-${HADOOP_VERSION}.tar.gz ${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSION}${RC_LABEL}.tar.gz -# State SRC tarball +# Stage SRC tarball run mv hadoop-dist/target/hadoop-${HADOOP_VERSION}-src.tar.gz ${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSION}${RC_LABEL}-src.tar.gz # Stage SITE tarball diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index ed671a64ebb..cbeb81987e7 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -16,7 +16,7 @@ ulimit -n 1024 ### Setup some variables. -### SVN_REVISION and BUILD_URL are set by Hudson if it is run by patch process +### BUILD_URL is set by Hudson if it is run by patch process ### Read variables from properties file bindir=$(dirname $0) @@ -36,7 +36,7 @@ BUILD_NATIVE=true PS=${PS:-ps} AWK=${AWK:-awk} WGET=${WGET:-wget} -SVN=${SVN:-svn} +GIT=${GIT:-git} GREP=${GREP:-grep} PATCH=${PATCH:-patch} DIFF=${DIFF:-diff} @@ -59,13 +59,13 @@ printUsage() { echo "--mvn-cmd= The 'mvn' command to use (default \$MAVEN_HOME/bin/mvn, or 'mvn')" echo "--ps-cmd= The 'ps' command to use (default 'ps')" echo "--awk-cmd= The 'awk' command to use (default 'awk')" - echo "--svn-cmd= The 'svn' command to use (default 'svn')" + echo "--git-cmd= The 'git' command to use (default 'git')" echo "--grep-cmd= The 'grep' command to use (default 'grep')" echo "--patch-cmd= The 'patch' command to use (default 'patch')" echo "--diff-cmd= The 'diff' command to use (default 'diff')" echo "--findbugs-home= Findbugs home directory (default FINDBUGS_HOME environment variable)" echo "--forrest-home= Forrest home directory (default FORREST_HOME environment variable)" - echo "--dirty-workspace Allow the local SVN workspace to have uncommitted changes" + echo "--dirty-workspace Allow the local git workspace to have uncommitted changes" echo "--run-tests Run all tests below the base directory" echo "--build-native= If true, then build native components (default 'true')" echo @@ -107,8 +107,8 @@ parseArgs() { --wget-cmd=*) WGET=${i#*=} ;; - --svn-cmd=*) - SVN=${i#*=} + --git-cmd=*) + GIT=${i#*=} ;; --grep-cmd=*) GREP=${i#*=} @@ -197,7 +197,7 @@ checkout () { echo "" ### When run by a developer, if the workspace contains modifications, do not continue ### unless the --dirty-workspace option was set - status=`$SVN stat --ignore-externals | sed -e '/^X[ ]*/D'` + status=`$GIT status --porcelain` if [[ $JENKINS == "false" ]] ; then if [[ "$status" != "" && -z $DIRTY_WORKSPACE ]] ; then echo "ERROR: can't run in a workspace that contains the following modifications" @@ -207,10 +207,12 @@ checkout () { echo else cd $BASEDIR - $SVN revert -R . - rm -rf `$SVN status --no-ignore` - $SVN update + $GIT reset --hard + $GIT clean -xdf + $GIT checkout trunk + $GIT pull --rebase fi + GIT_REVISION=`git rev-parse --verify --short HEAD` return $? } @@ -229,10 +231,10 @@ downloadPatch () { echo "$defect patch is being downloaded at `date` from" echo "$patchURL" $WGET -q -O $PATCH_DIR/patch $patchURL - VERSION=${SVN_REVISION}_${defect}_PATCH-${patchNum} + VERSION=${GIT_REVISION}_${defect}_PATCH-${patchNum} JIRA_COMMENT="Here are the results of testing the latest attachment $patchURL - against trunk revision ${SVN_REVISION}." + against trunk revision ${GIT_REVISION}." ### Copy in any supporting files needed by this process cp -r $SUPPORT_DIR/lib/* ./lib diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml index fd03bfd68da..41c4fb60721 100644 --- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml +++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml @@ -23,6 +23,14 @@ true + + . + + LICENCE.txt + README.txt + NOTICE.txt + + . true diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml index 2ff51d6ffee..564518c540b 100644 --- a/hadoop-common-project/hadoop-auth/pom.xml +++ b/hadoop-common-project/hadoop-auth/pom.xml @@ -61,6 +61,16 @@ org.mortbay.jetty jetty test + + + org.apache.tomcat.embed + tomcat-embed-core + test + + + org.apache.tomcat.embed + tomcat-embed-logging-juli + test javax.servlet diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java index 316cd60a256..9330444c46e 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java @@ -519,9 +519,7 @@ public class AuthenticationFilter implements Filter { StringBuilder sb = new StringBuilder(AuthenticatedURL.AUTH_COOKIE) .append("="); if (token != null && token.length() > 0) { - sb.append("\"") - .append(token) - .append("\""); + sb.append(token); } sb.append("; Version=1"); diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java index 4e4ecc483eb..8f35e13e66a 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java @@ -13,7 +13,22 @@ */ package org.apache.hadoop.security.authentication.client; +import org.apache.catalina.deploy.FilterDef; +import org.apache.catalina.deploy.FilterMap; +import org.apache.catalina.startup.Tomcat; import org.apache.hadoop.security.authentication.server.AuthenticationFilter; +import org.apache.http.HttpResponse; +import org.apache.http.auth.AuthScope; +import org.apache.http.auth.Credentials; +import org.apache.http.client.HttpClient; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpUriRequest; +import org.apache.http.client.params.AuthPolicy; +import org.apache.http.entity.InputStreamEntity; +import org.apache.http.impl.auth.SPNegoSchemeFactory; +import org.apache.http.impl.client.SystemDefaultHttpClient; +import org.apache.http.util.EntityUtils; import org.mortbay.jetty.Server; import org.mortbay.jetty.servlet.Context; import org.mortbay.jetty.servlet.FilterHolder; @@ -24,16 +39,19 @@ import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; +import java.io.BufferedReader; +import java.io.ByteArrayInputStream; +import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.io.OutputStreamWriter; -import java.io.BufferedReader; import java.io.InputStreamReader; import java.io.Writer; import java.net.HttpURLConnection; import java.net.ServerSocket; import java.net.URL; +import java.security.Principal; import java.util.Properties; import org.junit.Assert; @@ -41,10 +59,18 @@ public class AuthenticatorTestCase { private Server server; private String host = null; private int port = -1; + private boolean useTomcat = false; + private Tomcat tomcat = null; Context context; private static Properties authenticatorConfig; + public AuthenticatorTestCase() {} + + public AuthenticatorTestCase(boolean useTomcat) { + this.useTomcat = useTomcat; + } + protected static void setAuthenticationHandlerConfig(Properties config) { authenticatorConfig = config; } @@ -80,7 +106,19 @@ public class AuthenticatorTestCase { } } + protected int getLocalPort() throws Exception { + ServerSocket ss = new ServerSocket(0); + int ret = ss.getLocalPort(); + ss.close(); + return ret; + } + protected void start() throws Exception { + if (useTomcat) startTomcat(); + else startJetty(); + } + + protected void startJetty() throws Exception { server = new Server(0); context = new Context(); context.setContextPath("/foo"); @@ -88,16 +126,42 @@ public class AuthenticatorTestCase { context.addFilter(new FilterHolder(TestFilter.class), "/*", 0); context.addServlet(new ServletHolder(TestServlet.class), "/bar"); host = "localhost"; - ServerSocket ss = new ServerSocket(0); - port = ss.getLocalPort(); - ss.close(); + port = getLocalPort(); server.getConnectors()[0].setHost(host); server.getConnectors()[0].setPort(port); server.start(); System.out.println("Running embedded servlet container at: http://" + host + ":" + port); } + protected void startTomcat() throws Exception { + tomcat = new Tomcat(); + File base = new File(System.getProperty("java.io.tmpdir")); + org.apache.catalina.Context ctx = + tomcat.addContext("/foo",base.getAbsolutePath()); + FilterDef fd = new FilterDef(); + fd.setFilterClass(TestFilter.class.getName()); + fd.setFilterName("TestFilter"); + FilterMap fm = new FilterMap(); + fm.setFilterName("TestFilter"); + fm.addURLPattern("/*"); + fm.addServletName("/bar"); + ctx.addFilterDef(fd); + ctx.addFilterMap(fm); + tomcat.addServlet(ctx, "/bar", TestServlet.class.getName()); + ctx.addServletMapping("/bar", "/bar"); + host = "localhost"; + port = getLocalPort(); + tomcat.setHostname(host); + tomcat.setPort(port); + tomcat.start(); + } + protected void stop() throws Exception { + if (useTomcat) stopTomcat(); + else stopJetty(); + } + + protected void stopJetty() throws Exception { try { server.stop(); } catch (Exception e) { @@ -109,6 +173,18 @@ public class AuthenticatorTestCase { } } + protected void stopTomcat() throws Exception { + try { + tomcat.stop(); + } catch (Exception e) { + } + + try { + tomcat.destroy(); + } catch (Exception e) { + } + } + protected String getBaseURL() { return "http://" + host + ":" + port + "/foo/bar"; } @@ -165,4 +241,57 @@ public class AuthenticatorTestCase { } } + private SystemDefaultHttpClient getHttpClient() { + final SystemDefaultHttpClient httpClient = new SystemDefaultHttpClient(); + httpClient.getAuthSchemes().register(AuthPolicy.SPNEGO, new SPNegoSchemeFactory(true)); + Credentials use_jaas_creds = new Credentials() { + public String getPassword() { + return null; + } + + public Principal getUserPrincipal() { + return null; + } + }; + + httpClient.getCredentialsProvider().setCredentials( + AuthScope.ANY, use_jaas_creds); + return httpClient; + } + + private void doHttpClientRequest(HttpClient httpClient, HttpUriRequest request) throws Exception { + HttpResponse response = null; + try { + response = httpClient.execute(request); + final int httpStatus = response.getStatusLine().getStatusCode(); + Assert.assertEquals(HttpURLConnection.HTTP_OK, httpStatus); + } finally { + if (response != null) EntityUtils.consumeQuietly(response.getEntity()); + } + } + + protected void _testAuthenticationHttpClient(Authenticator authenticator, boolean doPost) throws Exception { + start(); + try { + SystemDefaultHttpClient httpClient = getHttpClient(); + doHttpClientRequest(httpClient, new HttpGet(getBaseURL())); + + // Always do a GET before POST to trigger the SPNego negotiation + if (doPost) { + HttpPost post = new HttpPost(getBaseURL()); + byte [] postBytes = POST.getBytes(); + ByteArrayInputStream bis = new ByteArrayInputStream(postBytes); + InputStreamEntity entity = new InputStreamEntity(bis, postBytes.length); + + // Important that the entity is not repeatable -- this means if + // we have to renegotiate (e.g. b/c the cookie wasn't handled properly) + // the test will fail. + Assert.assertFalse(entity.isRepeatable()); + post.setEntity(entity); + doHttpClientRequest(httpClient, post); + } + } finally { + stop(); + } + } } diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java index 53d23c467a4..6c49d15f09a 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java @@ -20,16 +20,36 @@ import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHand import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler; import org.junit.Assert; import org.junit.Before; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; +import org.junit.runner.RunWith; import org.junit.Test; import java.io.File; import java.net.HttpURLConnection; import java.net.URL; +import java.util.Arrays; +import java.util.Collection; import java.util.Properties; import java.util.concurrent.Callable; +@RunWith(Parameterized.class) public class TestKerberosAuthenticator extends KerberosSecurityTestcase { + private boolean useTomcat = false; + + public TestKerberosAuthenticator(boolean useTomcat) { + this.useTomcat = useTomcat; + } + + @Parameterized.Parameters + public static Collection booleans() { + return Arrays.asList(new Object[][] { + { false }, + { true } + }); + } + @Before public void setup() throws Exception { // create keytab @@ -53,7 +73,7 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase { @Test(timeout=60000) public void testFallbacktoPseudoAuthenticator() throws Exception { - AuthenticatorTestCase auth = new AuthenticatorTestCase(); + AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat); Properties props = new Properties(); props.setProperty(AuthenticationFilter.AUTH_TYPE, "simple"); props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "false"); @@ -63,7 +83,7 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase { @Test(timeout=60000) public void testFallbacktoPseudoAuthenticatorAnonymous() throws Exception { - AuthenticatorTestCase auth = new AuthenticatorTestCase(); + AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat); Properties props = new Properties(); props.setProperty(AuthenticationFilter.AUTH_TYPE, "simple"); props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "true"); @@ -73,7 +93,7 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase { @Test(timeout=60000) public void testNotAuthenticated() throws Exception { - AuthenticatorTestCase auth = new AuthenticatorTestCase(); + AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat); AuthenticatorTestCase.setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration()); auth.start(); try { @@ -89,7 +109,7 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase { @Test(timeout=60000) public void testAuthentication() throws Exception { - final AuthenticatorTestCase auth = new AuthenticatorTestCase(); + final AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat); AuthenticatorTestCase.setAuthenticationHandlerConfig( getAuthenticationHandlerConfiguration()); KerberosTestUtils.doAsClient(new Callable() { @@ -103,7 +123,7 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase { @Test(timeout=60000) public void testAuthenticationPost() throws Exception { - final AuthenticatorTestCase auth = new AuthenticatorTestCase(); + final AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat); AuthenticatorTestCase.setAuthenticationHandlerConfig( getAuthenticationHandlerConfiguration()); KerberosTestUtils.doAsClient(new Callable() { @@ -114,4 +134,32 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase { } }); } + + @Test(timeout=60000) + public void testAuthenticationHttpClient() throws Exception { + final AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat); + AuthenticatorTestCase.setAuthenticationHandlerConfig( + getAuthenticationHandlerConfiguration()); + KerberosTestUtils.doAsClient(new Callable() { + @Override + public Void call() throws Exception { + auth._testAuthenticationHttpClient(new KerberosAuthenticator(), false); + return null; + } + }); + } + + @Test(timeout=60000) + public void testAuthenticationHttpClientPost() throws Exception { + final AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat); + AuthenticatorTestCase.setAuthenticationHandlerConfig( + getAuthenticationHandlerConfiguration()); + KerberosTestUtils.doAsClient(new Callable() { + @Override + public Void call() throws Exception { + auth._testAuthenticationHttpClient(new KerberosAuthenticator(), true); + return null; + } + }); + } } diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 45e38d392f9..0b9cfdcdecb 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -125,6 +125,12 @@ Trunk (Unreleased) HADOOP-10485. Remove dead classes in hadoop-streaming. (wheat9) + HADOOP-11013. CLASSPATH handling should be consolidated, debuggable (aw) + + HADOOP-11041. VersionInfo specifies subversion (Tsuyoshi OZAWA via aw) + + HADOOP-10373 create tools/hadoop-amazon for aws/EMR support (stevel) + BUG FIXES HADOOP-9451. Fault single-layer config if node group topology is enabled. @@ -232,9 +238,6 @@ Trunk (Unreleased) HADOOP-8813. Add InterfaceAudience and InterfaceStability annotations to RPC Server and Client classes. (Brandon Li via suresh) - HADOOP-8815. RandomDatum needs to override hashCode(). - (Brandon Li via suresh) - HADOOP-8436. NPE In getLocalPathForWrite ( path, conf ) when the required context item is not configured (Brahma Reddy Battula via harsh) @@ -323,62 +326,16 @@ Trunk (Unreleased) HADOOP-10996. Stop violence in the *_HOME (aw) + HADOOP-10748. HttpServer2 should not load JspServlet. (wheat9) + + HADOOP-11033. shell scripts ignore JAVA_HOME on OS X. (aw) + OPTIMIZATIONS HADOOP-7761. Improve the performance of raw comparisons. (todd) HADOOP-8589. ViewFs tests fail when tests and home dirs are nested (sanjay Radia) - BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS - - HADOOP-10734. Implement high-performance secure random number sources. - (Yi Liu via Colin Patrick McCabe) - - HADOOP-10603. Crypto input and output streams implementing Hadoop stream - interfaces. (Yi Liu and Charles Lamb) - - HADOOP-10628. Javadoc and few code style improvement for Crypto - input and output streams. (Yi Liu via clamb) - - HADOOP-10632. Minor improvements to Crypto input and output streams. - (Yi Liu) - - HADOOP-10635. Add a method to CryptoCodec to generate SRNs for IV. (Yi Liu) - - HADOOP-10653. Add a new constructor for CryptoInputStream that - receives current position of wrapped stream. (Yi Liu) - - HADOOP-10662. NullPointerException in CryptoInputStream while wrapped - stream is not ByteBufferReadable. Add tests using normal stream. (Yi Liu) - - HADOOP-10713. Refactor CryptoCodec#generateSecureRandom to take a byte[]. - (wang via yliu) - - HADOOP-10693. Implementation of AES-CTR CryptoCodec using JNI to OpenSSL. - (Yi Liu via cmccabe) - - HADOOP-10803. Update OpensslCipher#getInstance to accept CipherSuite#name - format. (Yi Liu) - - HADOOP-10735. Fall back AesCtrCryptoCodec implementation from OpenSSL to - JCE if non native support. (Yi Liu) - - HADOOP-10870. Failed to load OpenSSL cipher error logs on systems with old - openssl versions (cmccabe) - - HADOOP-10853. Refactor get instance of CryptoCodec and support create via - algorithm/mode/padding. (Yi Liu) - - HADOOP-10919. Copy command should preserve raw.* namespace - extended attributes. (clamb) - - HDFS-6873. Constants in CommandWithDestination should be static. (clamb) - - HADOOP-10871. incorrect prototype in OpensslSecureRandom.c (cmccabe) - - HADOOP-10886. CryptoCodec#getCodecclasses throws NPE when configurations not - loaded. (umamahesh) - Release 2.6.0 - UNRELEASED INCOMPATIBLE CHANGES @@ -516,6 +473,29 @@ Release 2.6.0 - UNRELEASED HADOOP-10998. Fix bash tab completion code to work (Jim Hester via aw) + HADOOP-10880. Move HTTP delegation tokens out of URL querystring to + a header. (tucu) + + HADOOP-11005. Fix HTTP content type for ReconfigurationServlet. + (Lei Xu via wang) + + HADOOP-10814. Update Tomcat version used by HttpFS and KMS to latest + 6.x version. (rkanter via tucu) + + HADOOP-10994. KeyProviderCryptoExtension should use CryptoCodec for + generation/decryption of keys. (tucu) + + HADOOP-11021. Configurable replication factor in the hadoop archive + command. (Zhe Zhang via wang) + + HADOOP-11030. Define a variable jackson.version instead of using constant + at multiple places. (Juan Yu via kasha) + + HADOOP-10990. Add missed NFSv3 request and response classes (brandonli) + + HADOOP-10863. KMS should have a blacklist for decrypting EEKs. + (asuresh via tucu) + OPTIMIZATIONS HADOOP-10838. Byte array native checksumming. (James Thomas via todd) @@ -568,6 +548,8 @@ Release 2.6.0 - UNRELEASED schedules incoming calls and multiplexes outgoing calls. (Chris Li via Arpit Agarwal) + HADOOP-10833. Remove unused cache in UserProvider. (Benoy Antony) + BUG FIXES HADOOP-10781. Unportable getgrouplist() usage breaks FreeBSD (Dmitry @@ -699,6 +681,68 @@ Release 2.6.0 - UNRELEASED HADOOP-10989. Work around buggy getgrouplist() implementations on Linux that return 0 on failure. (cnauroth) + HADOOP-8815. RandomDatum needs to override hashCode(). + (Brandon Li via suresh) + + BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS + + HADOOP-10734. Implement high-performance secure random number sources. + (Yi Liu via Colin Patrick McCabe) + + HADOOP-10603. Crypto input and output streams implementing Hadoop stream + interfaces. (Yi Liu and Charles Lamb) + + HADOOP-10628. Javadoc and few code style improvement for Crypto + input and output streams. (Yi Liu via clamb) + + HADOOP-10632. Minor improvements to Crypto input and output streams. + (Yi Liu) + + HADOOP-10635. Add a method to CryptoCodec to generate SRNs for IV. (Yi Liu) + + HADOOP-10653. Add a new constructor for CryptoInputStream that + receives current position of wrapped stream. (Yi Liu) + + HADOOP-10662. NullPointerException in CryptoInputStream while wrapped + stream is not ByteBufferReadable. Add tests using normal stream. (Yi Liu) + + HADOOP-10713. Refactor CryptoCodec#generateSecureRandom to take a byte[]. + (wang via yliu) + + HADOOP-10693. Implementation of AES-CTR CryptoCodec using JNI to OpenSSL. + (Yi Liu via cmccabe) + + HADOOP-10803. Update OpensslCipher#getInstance to accept CipherSuite#name + format. (Yi Liu) + + HADOOP-10735. Fall back AesCtrCryptoCodec implementation from OpenSSL to + JCE if non native support. (Yi Liu) + + HADOOP-10870. Failed to load OpenSSL cipher error logs on systems with old + openssl versions (cmccabe) + + HADOOP-10853. Refactor get instance of CryptoCodec and support create via + algorithm/mode/padding. (Yi Liu) + + HADOOP-10919. Copy command should preserve raw.* namespace + extended attributes. (clamb) + + HDFS-6873. Constants in CommandWithDestination should be static. (clamb) + + HADOOP-10871. incorrect prototype in OpensslSecureRandom.c (cmccabe) + + HADOOP-10886. CryptoCodec#getCodecclasses throws NPE when configurations not + loaded. (umamahesh) + -- + + HADOOP-10911. hadoop.auth cookie after HADOOP-10710 still not proper + according to RFC2109. (gchanan via tucu) + + HADOOP-11036. Add build directory to .gitignore (Tsuyoshi OZAWA via aw) + + HADOOP-11012. hadoop fs -text of zero-length file causes EOFException + (Eric Payne via jlowe) + Release 2.5.1 - UNRELEASED INCOMPATIBLE CHANGES @@ -706,11 +750,16 @@ Release 2.5.1 - UNRELEASED NEW FEATURES IMPROVEMENTS + + HADOOP-10956. Fix create-release script to include docs and necessary txt + files. (kasha) OPTIMIZATIONS BUG FIXES + HADOOP-11001. Fix test-patch to work with the git repo. (kasha) + Release 2.5.0 - 2014-08-11 INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 09f1c5a2d32..ae495be0e65 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -224,6 +224,10 @@ compile + + org.htrace + htrace-core + org.apache.zookeeper zookeeper diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop b/hadoop-common-project/hadoop-common/src/main/bin/hadoop index 24c4d18e829..64c67587dc6 100755 --- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop +++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop @@ -114,6 +114,7 @@ case ${COMMAND} in ;; archive) CLASS=org.apache.hadoop.tools.HadoopArchives + hadoop_debug "Injecting TOOL_PATH into CLASSPATH" hadoop_add_classpath "${TOOL_PATH}" ;; checknative) @@ -136,10 +137,12 @@ case ${COMMAND} in ;; distch) CLASS=org.apache.hadoop.tools.DistCh + hadoop_debug "Injecting TOOL_PATH into CLASSPATH" hadoop_add_classpath "${TOOL_PATH}" ;; distcp) CLASS=org.apache.hadoop.tools.DistCp + hadoop_debug "Injecting TOOL_PATH into CLASSPATH" hadoop_add_classpath "${TOOL_PATH}" ;; fs) @@ -168,11 +171,11 @@ case ${COMMAND} in esac # Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS +hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS" HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}" hadoop_add_param HADOOP_OPTS Xmx "${JAVA_HEAP_MAX}" hadoop_finalize -export CLASSPATH hadoop_java_exec "${COMMAND}" "${CLASS}" "$@" diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh index 0cf8bcfc78e..40494b3ba4e 100644 --- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh +++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh @@ -129,6 +129,11 @@ while [[ -z "${_hadoop_common_done}" ]]; do hadoop_exit_with_usage 1 fi ;; + --debug) + shift + # shellcheck disable=SC2034 + HADOOP_SHELL_SCRIPT_DEBUG=true + ;; --help|-help|-h|help|--h|--\?|-\?|\?) hadoop_exit_with_usage 0 ;; diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh index 800e024485e..d430188cbf0 100644 --- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh +++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh @@ -21,6 +21,13 @@ function hadoop_error echo "$*" 1>&2 } +function hadoop_debug +{ + if [[ -n "${HADOOP_SHELL_SCRIPT_DEBUG}" ]]; then + echo "DEBUG: $*" 1>&2 + fi +} + function hadoop_bootstrap_init { # NOTE: This function is not user replaceable. @@ -62,6 +69,7 @@ function hadoop_bootstrap_init # defaults export HADOOP_OPTS=${HADOOP_OPTS:-"-Djava.net.preferIPv4Stack=true"} + hadoop_debug "Initial HADOOP_OPTS=${HADOOP_OPTS}" } function hadoop_find_confdir @@ -80,6 +88,8 @@ function hadoop_find_confdir conf_dir="etc/hadoop" fi export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-${HADOOP_PREFIX}/${conf_dir}}" + + hadoop_debug "HADOOP_CONF_DIR=${HADOOP_CONF_DIR}" } function hadoop_exec_hadoopenv @@ -105,6 +115,7 @@ function hadoop_basic_init # CLASSPATH initially contains $HADOOP_CONF_DIR CLASSPATH="${HADOOP_CONF_DIR}" + hadoop_debug "Initial CLASSPATH=${HADOOP_CONF_DIR}" if [[ -z "${HADOOP_COMMON_HOME}" ]] && [[ -d "${HADOOP_PREFIX}/${HADOOP_COMMON_DIR}" ]]; then @@ -116,19 +127,19 @@ function hadoop_basic_init # define HADOOP_HDFS_HOME if [[ -z "${HADOOP_HDFS_HOME}" ]] && - [[ -d "${HADOOP_PREFIX}/${HDFS_DIR}" ]]; then + [[ -d "${HADOOP_PREFIX}/${HDFS_DIR}" ]]; then export HADOOP_HDFS_HOME="${HADOOP_PREFIX}" fi # define HADOOP_YARN_HOME if [[ -z "${HADOOP_YARN_HOME}" ]] && - [[ -d "${HADOOP_PREFIX}/${YARN_DIR}" ]]; then + [[ -d "${HADOOP_PREFIX}/${YARN_DIR}" ]]; then export HADOOP_YARN_HOME="${HADOOP_PREFIX}" fi # define HADOOP_MAPRED_HOME if [[ -z "${HADOOP_MAPRED_HOME}" ]] && - [[ -d "${HADOOP_PREFIX}/${MAPRED_DIR}" ]]; then + [[ -d "${HADOOP_PREFIX}/${MAPRED_DIR}" ]]; then export HADOOP_MAPRED_HOME="${HADOOP_PREFIX}" fi @@ -274,6 +285,9 @@ function hadoop_add_param if [[ ! ${!1} =~ $2 ]] ; then # shellcheck disable=SC2086 eval $1="'${!1} $3'" + hadoop_debug "$1 accepted $3" + else + hadoop_debug "$1 declined $3" fi } @@ -283,8 +297,8 @@ function hadoop_add_classpath # $1 = directory, file, wildcard, whatever to add # $2 = before or after, which determines where in the # classpath this object should go. default is after - # return 0 = success - # return 1 = failure (duplicate, doesn't exist, whatever) + # return 0 = success (added or duplicate) + # return 1 = failure (doesn't exist, whatever) # However, with classpath (& JLP), we can do dedupe # along with some sanity checking (e.g., missing directories) @@ -295,23 +309,29 @@ function hadoop_add_classpath if [[ $1 =~ ^.*\*$ ]]; then local mp=$(dirname "$1") if [[ ! -d "${mp}" ]]; then + hadoop_debug "Rejected CLASSPATH: $1 (not a dir)" return 1 fi # no wildcard in the middle, so check existence # (doesn't matter *what* it is) elif [[ ! $1 =~ ^.*\*.*$ ]] && [[ ! -e "$1" ]]; then + hadoop_debug "Rejected CLASSPATH: $1 (does not exist)" return 1 fi - if [[ -z "${CLASSPATH}" ]]; then CLASSPATH=$1 + hadoop_debug "Initial CLASSPATH=$1" elif [[ ":${CLASSPATH}:" != *":$1:"* ]]; then if [[ "$2" = "before" ]]; then CLASSPATH="$1:${CLASSPATH}" + hadoop_debug "Prepend CLASSPATH: $1" else CLASSPATH+=:$1 + hadoop_debug "Append CLASSPATH: $1" fi + else + hadoop_debug "Dupe CLASSPATH: $1" fi return 0 } @@ -331,14 +351,20 @@ function hadoop_add_colonpath if [[ -z "${!1}" ]]; then # shellcheck disable=SC2086 eval $1="'$2'" + hadoop_debug "Initial colonpath($1): $2" elif [[ "$3" = "before" ]]; then # shellcheck disable=SC2086 eval $1="'$2:${!1}'" + hadoop_debug "Prepend colonpath($1): $2" else # shellcheck disable=SC2086 eval $1+="'$2'" + hadoop_debug "Append colonpath($1): $2" fi + return 0 fi + hadoop_debug "Rejected colonpath($1): $2" + return 1 } function hadoop_add_javalibpath @@ -397,6 +423,7 @@ function hadoop_add_to_classpath_hdfs function hadoop_add_to_classpath_yarn { + local i # # get all of the yarn jars+config in the path # @@ -459,7 +486,7 @@ function hadoop_add_to_classpath_userpath local i local j let c=0 - + if [[ -n "${HADOOP_CLASSPATH}" ]]; then # I wonder if Java runs on VMS. for i in $(echo "${HADOOP_CLASSPATH}" | tr : '\n'); do @@ -490,10 +517,12 @@ function hadoop_os_tricks # examples for OS X and Linux. Vendors, replace this with your special sauce. case ${HADOOP_OS_TYPE} in Darwin) - if [[ -x /usr/libexec/java_home ]]; then - export JAVA_HOME="$(/usr/libexec/java_home)" - else - export JAVA_HOME=/Library/Java/Home + if [[ -z "${JAVA_HOME}" ]]; then + if [[ -x /usr/libexec/java_home ]]; then + export JAVA_HOME="$(/usr/libexec/java_home)" + else + export JAVA_HOME=/Library/Java/Home + fi fi ;; Linux) @@ -715,6 +744,11 @@ function hadoop_java_exec local command=$1 local class=$2 shift 2 + + hadoop_debug "Final CLASSPATH: ${CLASSPATH}" + hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}" + + export CLASSPATH #shellcheck disable=SC2086 exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@" } @@ -727,6 +761,11 @@ function hadoop_start_daemon local command=$1 local class=$2 shift 2 + + hadoop_debug "Final CLASSPATH: ${CLASSPATH}" + hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}" + + export CLASSPATH #shellcheck disable=SC2086 exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@" } @@ -807,6 +846,9 @@ function hadoop_start_secure_daemon # note that shellcheck will throw a # bogus for-our-use-case 2086 here. # it doesn't properly support multi-line situations + + hadoop_debug "Final CLASSPATH: ${CLASSPATH}" + hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}" exec "${jsvc}" \ "-Dproc_${daemonname}" \ diff --git a/hadoop-common-project/hadoop-common/src/main/bin/rcc b/hadoop-common-project/hadoop-common/src/main/bin/rcc index dc6158a8ea4..74253539fbe 100755 --- a/hadoop-common-project/hadoop-common/src/main/bin/rcc +++ b/hadoop-common-project/hadoop-common/src/main/bin/rcc @@ -23,6 +23,7 @@ this="$bin/$script" DEFAULT_LIBEXEC_DIR="$bin"/../libexec HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} +# shellcheck disable=SC2034 HADOOP_NEW_CONFIG=true . "$HADOOP_LIBEXEC_DIR/hadoop-config.sh" @@ -33,10 +34,10 @@ fi CLASS='org.apache.hadoop.record.compiler.generated.Rcc' # Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS -HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" +hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS" +HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}" -hadoop_add_param HADOOP_OPTS Xmx "$JAVA_HEAP_MAX" +hadoop_add_param HADOOP_OPTS Xmx "${JAVA_HEAP_MAX}" hadoop_finalize -export CLASSPATH hadoop_java_exec rcc "${CLASS}" "$@" diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java index 3fa162b87af..eb1fb6b7d58 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java @@ -200,6 +200,7 @@ public class ReconfigurationServlet extends HttpServlet { protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { LOG.info("GET"); + resp.setContentType("text/html"); PrintWriter out = resp.getWriter(); Reconfigurable reconf = getReconfigurable(req); @@ -214,6 +215,7 @@ public class ReconfigurationServlet extends HttpServlet { protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { LOG.info("POST"); + resp.setContentType("text/html"); PrintWriter out = resp.getWriter(); Reconfigurable reconf = getReconfigurable(req); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java index 9de7f95200f..9bd1846552d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java @@ -24,6 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.PerformanceAdvisory; import org.apache.hadoop.util.ReflectionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -48,7 +49,7 @@ public abstract class CryptoCodec implements Configurable { * * @param conf * the configuration - * @param CipherSuite + * @param cipherSuite * algorithm/mode/padding * @return CryptoCodec the codec object. Null value will be returned if no * crypto codec classes with cipher suite configured. @@ -66,15 +67,18 @@ public abstract class CryptoCodec implements Configurable { CryptoCodec c = ReflectionUtils.newInstance(klass, conf); if (c.getCipherSuite().getName().equals(cipherSuite.getName())) { if (codec == null) { - LOG.debug("Using crypto codec {}.", klass.getName()); + PerformanceAdvisory.LOG.debug("Using crypto codec {}.", + klass.getName()); codec = c; } } else { - LOG.warn("Crypto codec {} doesn't meet the cipher suite {}.", + PerformanceAdvisory.LOG.debug( + "Crypto codec {} doesn't meet the cipher suite {}.", klass.getName(), cipherSuite.getName()); } } catch (Exception e) { - LOG.warn("Crypto codec {} is not available.", klass.getName()); + PerformanceAdvisory.LOG.debug("Crypto codec {} is not available.", + klass.getName()); } } @@ -108,7 +112,8 @@ public abstract class CryptoCodec implements Configurable { cipherSuite.getConfigSuffix(); String codecString = conf.get(configName); if (codecString == null) { - LOG.warn("No crypto codec classes with cipher suite configured."); + PerformanceAdvisory.LOG.debug( + "No crypto codec classes with cipher suite configured."); return null; } for (String c : Splitter.on(',').trimResults().omitEmptyStrings(). @@ -117,9 +122,9 @@ public abstract class CryptoCodec implements Configurable { Class cls = conf.getClassByName(c); result.add(cls.asSubclass(CryptoCodec.class)); } catch (ClassCastException e) { - LOG.warn("Class " + c + " is not a CryptoCodec."); + PerformanceAdvisory.LOG.debug("Class {} is not a CryptoCodec.", c); } catch (ClassNotFoundException e) { - LOG.warn("Crypto codec " + c + " not found."); + PerformanceAdvisory.LOG.debug("Crypto codec {} not found.", c); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java index 264652b202a..2eb16ee4747 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java @@ -32,6 +32,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.util.NativeCodeLoader; import com.google.common.base.Preconditions; +import org.apache.hadoop.util.PerformanceAdvisory; /** * OpenSSL cipher using JNI. @@ -82,6 +83,7 @@ public final class OpensslCipher { String loadingFailure = null; try { if (!NativeCodeLoader.buildSupportsOpenssl()) { + PerformanceAdvisory.LOG.debug("Build does not support openssl"); loadingFailure = "build does not support openssl."; } else { initIDs(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java index 250315177a2..30583eb576c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java @@ -108,6 +108,7 @@ public class JavaKeyStoreProvider extends KeyProvider { private final Map cache = new HashMap(); private JavaKeyStoreProvider(URI uri, Configuration conf) throws IOException { + super(conf); this.uri = uri; path = ProviderUtils.unnestUri(uri); fs = path.getFileSystem(conf); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java index a34ae10a71a..36ccbada0bc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java @@ -56,6 +56,8 @@ public abstract class KeyProvider { "hadoop.security.key.default.bitlength"; public static final int DEFAULT_BITLENGTH = 128; + private final Configuration conf; + /** * The combination of both the key version name and the key material. */ @@ -353,6 +355,24 @@ public abstract class KeyProvider { } } + /** + * Constructor. + * + * @param conf configuration for the provider + */ + public KeyProvider(Configuration conf) { + this.conf = new Configuration(conf); + } + + /** + * Return the provider configuration. + * + * @return the provider configuration + */ + public Configuration getConf() { + return conf; + } + /** * A helper function to create an options object. * @param conf the configuration to use diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java index 026f285f4c4..e2fb5cb3b8e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java @@ -19,6 +19,7 @@ package org.apache.hadoop.crypto.key; import java.io.IOException; +import java.nio.ByteBuffer; import java.security.GeneralSecurityException; import java.security.SecureRandom; @@ -29,6 +30,9 @@ import javax.crypto.spec.SecretKeySpec; import com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.crypto.CryptoCodec; +import org.apache.hadoop.crypto.Decryptor; +import org.apache.hadoop.crypto.Encryptor; /** * A KeyProvider with Cryptographic Extensions specifically for generating @@ -239,18 +243,25 @@ public class KeyProviderCryptoExtension extends Preconditions.checkNotNull(encryptionKey, "No KeyVersion exists for key '%s' ", encryptionKeyName); // Generate random bytes for new key and IV - Cipher cipher = Cipher.getInstance("AES/CTR/NoPadding"); + + CryptoCodec cc = CryptoCodec.getInstance(keyProvider.getConf()); final byte[] newKey = new byte[encryptionKey.getMaterial().length]; - RANDOM.get().nextBytes(newKey); - final byte[] iv = new byte[cipher.getBlockSize()]; - RANDOM.get().nextBytes(iv); + cc.generateSecureRandom(newKey); + final byte[] iv = new byte[cc.getCipherSuite().getAlgorithmBlockSize()]; + cc.generateSecureRandom(iv); // Encryption key IV is derived from new key's IV final byte[] encryptionIV = EncryptedKeyVersion.deriveIV(iv); - // Encrypt the new key - cipher.init(Cipher.ENCRYPT_MODE, - new SecretKeySpec(encryptionKey.getMaterial(), "AES"), - new IvParameterSpec(encryptionIV)); - final byte[] encryptedKey = cipher.doFinal(newKey); + Encryptor encryptor = cc.createEncryptor(); + encryptor.init(encryptionKey.getMaterial(), encryptionIV); + int keyLen = newKey.length; + ByteBuffer bbIn = ByteBuffer.allocateDirect(keyLen); + ByteBuffer bbOut = ByteBuffer.allocateDirect(keyLen); + bbIn.put(newKey); + bbIn.flip(); + encryptor.encrypt(bbIn, bbOut); + bbOut.flip(); + byte[] encryptedKey = new byte[keyLen]; + bbOut.get(encryptedKey); return new EncryptedKeyVersion(encryptionKeyName, encryptionKey.getVersionName(), iv, new KeyVersion(encryptionKey.getName(), EEK, encryptedKey)); @@ -274,19 +285,25 @@ public class KeyProviderCryptoExtension extends KeyProviderCryptoExtension.EEK, encryptedKeyVersion.getEncryptedKeyVersion().getVersionName() ); - final byte[] encryptionKeyMaterial = encryptionKey.getMaterial(); + // Encryption key IV is determined from encrypted key's IV final byte[] encryptionIV = EncryptedKeyVersion.deriveIV(encryptedKeyVersion.getEncryptedKeyIv()); - // Init the cipher with encryption key parameters - Cipher cipher = Cipher.getInstance("AES/CTR/NoPadding"); - cipher.init(Cipher.DECRYPT_MODE, - new SecretKeySpec(encryptionKeyMaterial, "AES"), - new IvParameterSpec(encryptionIV)); - // Decrypt the encrypted key + + CryptoCodec cc = CryptoCodec.getInstance(keyProvider.getConf()); + Decryptor decryptor = cc.createDecryptor(); + decryptor.init(encryptionKey.getMaterial(), encryptionIV); final KeyVersion encryptedKV = encryptedKeyVersion.getEncryptedKeyVersion(); - final byte[] decryptedKey = cipher.doFinal(encryptedKV.getMaterial()); + int keyLen = encryptedKV.getMaterial().length; + ByteBuffer bbIn = ByteBuffer.allocateDirect(keyLen); + ByteBuffer bbOut = ByteBuffer.allocateDirect(keyLen); + bbIn.put(encryptedKV.getMaterial()); + bbIn.flip(); + decryptor.decrypt(bbIn, bbOut); + bbOut.flip(); + byte[] decryptedKey = new byte[keyLen]; + bbOut.get(decryptedKey); return new KeyVersion(encryptionKey.getName(), EK, decryptedKey); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderExtension.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderExtension.java index ba048b5a3e9..ec4c3b745ea 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderExtension.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderExtension.java @@ -40,6 +40,7 @@ public abstract class KeyProviderExtension private E extension; public KeyProviderExtension(KeyProvider keyProvider, E extensions) { + super(keyProvider.getConf()); this.keyProvider = keyProvider; this.extension = extensions; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/UserProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/UserProvider.java index e09b3f8d432..bf8f2fed063 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/UserProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/UserProvider.java @@ -44,7 +44,8 @@ public class UserProvider extends KeyProvider { private final Credentials credentials; private final Map cache = new HashMap(); - private UserProvider() throws IOException { + private UserProvider(Configuration conf) throws IOException { + super(conf); user = UserGroupInformation.getCurrentUser(); credentials = user.getCredentials(); } @@ -145,7 +146,7 @@ public class UserProvider extends KeyProvider { public KeyProvider createProvider(URI providerName, Configuration conf) throws IOException { if (SCHEME_NAME.equals(providerName.getScheme())) { - return new UserProvider(); + return new UserProvider(conf); } return null; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java index bce1eb5dd3d..dc9e6cb96f5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java @@ -283,6 +283,7 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension, } public KMSClientProvider(URI uri, Configuration conf) throws IOException { + super(conf); Path path = ProviderUtils.unnestUri(uri); URL url = path.toUri().toURL(); kmsUrl = createServiceURL(url); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java index b1fa9883373..6c53a0a2179 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java @@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.util.NativeCodeLoader; import com.google.common.base.Preconditions; +import org.apache.hadoop.util.PerformanceAdvisory; /** * OpenSSL secure random using JNI. @@ -67,6 +68,8 @@ public class OpensslSecureRandom extends Random { public OpensslSecureRandom() { if (!nativeEnabled) { + PerformanceAdvisory.LOG.debug("Build does not support openssl, " + + "falling back to Java SecureRandom."); fallback = new java.security.SecureRandom(); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java index 511ca7f7549..c8d1b69ddaf 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java @@ -381,7 +381,8 @@ public abstract class ChecksumFileSystem extends FilterFileSystem { long blockSize, Progressable progress) throws IOException { - super(DataChecksum.newCrc32(), fs.getBytesPerSum(), 4); + super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, + fs.getBytesPerSum())); int bytesPerSum = fs.getBytesPerSum(); this.datas = fs.getRawFileSystem().create(file, overwrite, bufferSize, replication, blockSize, progress); @@ -405,10 +406,11 @@ public abstract class ChecksumFileSystem extends FilterFileSystem { } @Override - protected void writeChunk(byte[] b, int offset, int len, byte[] checksum) + protected void writeChunk(byte[] b, int offset, int len, byte[] checksum, + int ckoff, int cklen) throws IOException { datas.write(b, offset, len); - sums.write(checksum); + sums.write(checksum, ckoff, cklen); } @Override diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java index 4be3b291190..ab5cd13e0c3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java @@ -337,7 +337,8 @@ public abstract class ChecksumFs extends FilterFs { final short replication, final long blockSize, final Progressable progress, final ChecksumOpt checksumOpt, final boolean createParent) throws IOException { - super(DataChecksum.newCrc32(), fs.getBytesPerSum(), 4); + super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, + fs.getBytesPerSum())); // checksumOpt is passed down to the raw fs. Unless it implements // checksum impelemts internally, checksumOpt will be ignored. @@ -370,10 +371,11 @@ public abstract class ChecksumFs extends FilterFs { } @Override - protected void writeChunk(byte[] b, int offset, int len, byte[] checksum) + protected void writeChunk(byte[] b, int offset, int len, byte[] checksum, + int ckoff, int cklen) throws IOException { datas.write(b, offset, len); - sums.write(checksum); + sums.write(checksum, ckoff, cklen); } @Override diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java index 49c919af196..19cbb6f9354 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java @@ -18,13 +18,14 @@ package org.apache.hadoop.fs; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.util.DataChecksum; + import java.io.IOException; import java.io.OutputStream; import java.util.zip.Checksum; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - /** * This is a generic output stream for generating checksums for * data before it is written to the underlying stream @@ -33,7 +34,7 @@ import org.apache.hadoop.classification.InterfaceStability; @InterfaceStability.Unstable abstract public class FSOutputSummer extends OutputStream { // data checksum - private Checksum sum; + private final DataChecksum sum; // internal buffer for storing data before it is checksumed private byte buf[]; // internal buffer for storing checksum @@ -41,18 +42,24 @@ abstract public class FSOutputSummer extends OutputStream { // The number of valid bytes in the buffer. private int count; - protected FSOutputSummer(Checksum sum, int maxChunkSize, int checksumSize) { + // We want this value to be a multiple of 3 because the native code checksums + // 3 chunks simultaneously. The chosen value of 9 strikes a balance between + // limiting the number of JNI calls and flushing to the underlying stream + // relatively frequently. + private static final int BUFFER_NUM_CHUNKS = 9; + + protected FSOutputSummer(DataChecksum sum) { this.sum = sum; - this.buf = new byte[maxChunkSize]; - this.checksum = new byte[checksumSize]; + this.buf = new byte[sum.getBytesPerChecksum() * BUFFER_NUM_CHUNKS]; + this.checksum = new byte[sum.getChecksumSize() * BUFFER_NUM_CHUNKS]; this.count = 0; } /* write the data chunk in b staring at offset with - * a length of len, and its checksum + * a length of len > 0, and its checksum */ - protected abstract void writeChunk(byte[] b, int offset, int len, byte[] checksum) - throws IOException; + protected abstract void writeChunk(byte[] b, int bOffset, int bLen, + byte[] checksum, int checksumOffset, int checksumLen) throws IOException; /** * Check if the implementing OutputStream is closed and should no longer @@ -66,7 +73,6 @@ abstract public class FSOutputSummer extends OutputStream { /** Write one byte */ @Override public synchronized void write(int b) throws IOException { - sum.update(b); buf[count++] = (byte)b; if(count == buf.length) { flushBuffer(); @@ -111,18 +117,17 @@ abstract public class FSOutputSummer extends OutputStream { */ private int write1(byte b[], int off, int len) throws IOException { if(count==0 && len>=buf.length) { - // local buffer is empty and user data has one chunk - // checksum and output data + // local buffer is empty and user buffer size >= local buffer size, so + // simply checksum the user buffer and send it directly to the underlying + // stream final int length = buf.length; - sum.update(b, off, length); - writeChecksumChunk(b, off, length, false); + writeChecksumChunks(b, off, length); return length; } // copy user data to local buffer int bytesToCopy = buf.length-count; bytesToCopy = (len params = ImmutableMap. builder() + .put("acceptRanges", "true") + .put("dirAllowed", "false") + .put("gzip", "true") + .put("useFileMappedBuffer", "true") + .build(); + holder.setInitParameters(params); + ctx.setWelcomeFiles(new String[] {"index.html"}); + ctx.addServlet(holder, "/"); ctx.setDisplayName(name); ctx.setContextPath("/"); ctx.setWar(appDir + "/" + name); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java index fafa29543e0..53d31d6fb96 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java @@ -37,6 +37,7 @@ import org.apache.hadoop.fs.HardLink; import org.apache.hadoop.io.SecureIOUtils.AlreadyExistsException; import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.util.Shell; +import org.apache.hadoop.util.PerformanceAdvisory; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -196,7 +197,7 @@ public class NativeIO { // This can happen if the user has an older version of libhadoop.so // installed - in this case we can continue without native IO // after warning - LOG.error("Unable to initialize NativeIO libraries", t); + PerformanceAdvisory.LOG.debug("Unable to initialize NativeIO libraries", t); } } } @@ -574,7 +575,7 @@ public class NativeIO { // This can happen if the user has an older version of libhadoop.so // installed - in this case we can continue without native IO // after warning - LOG.error("Unable to initialize NativeIO libraries", t); + PerformanceAdvisory.LOG.debug("Unable to initialize NativeIO libraries", t); } } } @@ -593,7 +594,7 @@ public class NativeIO { // This can happen if the user has an older version of libhadoop.so // installed - in this case we can continue without native IO // after warning - LOG.error("Unable to initialize NativeIO libraries", t); + PerformanceAdvisory.LOG.debug("Unable to initialize NativeIO libraries", t); } } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java index 158445f8367..2f482c290ed 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java @@ -88,6 +88,7 @@ import org.apache.hadoop.util.ProtoUtil; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; +import org.htrace.Trace; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; @@ -694,6 +695,9 @@ public class Client { if (LOG.isDebugEnabled()) { LOG.debug("Connecting to "+server); } + if (Trace.isTracing()) { + Trace.addTimelineAnnotation("IPC client connecting to " + server); + } short numRetries = 0; Random rand = null; while (true) { @@ -758,6 +762,10 @@ public class Client { // update last activity time touch(); + if (Trace.isTracing()) { + Trace.addTimelineAnnotation("IPC client connected to " + server); + } + // start the receiver thread after the socket connection has been set // up start(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java index 64615d22f85..0ccdb71d0ee 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java @@ -48,6 +48,9 @@ import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.util.ProtoUtil; import org.apache.hadoop.util.Time; +import org.htrace.Sampler; +import org.htrace.Trace; +import org.htrace.TraceScope; import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.BlockingService; @@ -191,6 +194,16 @@ public class ProtobufRpcEngine implements RpcEngine { + method.getName() + "]"); } + TraceScope traceScope = null; + // if Tracing is on then start a new span for this rpc. + // guard it in the if statement to make sure there isn't + // any extra string manipulation. + if (Trace.isTracing()) { + traceScope = Trace.startSpan( + method.getDeclaringClass().getCanonicalName() + + "." + method.getName()); + } + RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method); if (LOG.isTraceEnabled()) { @@ -212,8 +225,13 @@ public class ProtobufRpcEngine implements RpcEngine { remoteId + ": " + method.getName() + " {" + e + "}"); } - + if (Trace.isTracing()) { + traceScope.getSpan().addTimelineAnnotation( + "Call got exception: " + e.getMessage()); + } throw new ServiceException(e); + } finally { + if (traceScope != null) traceScope.close(); } if (LOG.isDebugEnabled()) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index 24dd0c21b82..021e03537b4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -79,6 +79,7 @@ import org.apache.hadoop.conf.Configuration.IntegerRanges; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.io.DataOutputBuffer; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.ipc.ProtobufRpcEngine.RpcResponseMessageWrapper; @@ -115,6 +116,10 @@ import org.apache.hadoop.util.ProtoUtil; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; +import org.htrace.Span; +import org.htrace.Trace; +import org.htrace.TraceInfo; +import org.htrace.TraceScope; import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.ByteString; @@ -506,6 +511,7 @@ public abstract class Server { private ByteBuffer rpcResponse; // the response for this call private final RPC.RpcKind rpcKind; private final byte[] clientId; + private final Span traceSpan; // the tracing span on the server side public Call(int id, int retryCount, Writable param, Connection connection) { @@ -515,6 +521,11 @@ public abstract class Server { public Call(int id, int retryCount, Writable param, Connection connection, RPC.RpcKind kind, byte[] clientId) { + this(id, retryCount, param, connection, kind, clientId, null); + } + + public Call(int id, int retryCount, Writable param, Connection connection, + RPC.RpcKind kind, byte[] clientId, Span span) { this.callId = id; this.retryCount = retryCount; this.rpcRequest = param; @@ -523,6 +534,7 @@ public abstract class Server { this.rpcResponse = null; this.rpcKind = kind; this.clientId = clientId; + this.traceSpan = span; } @Override @@ -1921,9 +1933,18 @@ public abstract class Server { RpcErrorCodeProto.FATAL_DESERIALIZING_REQUEST, err); } + Span traceSpan = null; + if (header.hasTraceInfo()) { + // If the incoming RPC included tracing info, always continue the trace + TraceInfo parentSpan = new TraceInfo(header.getTraceInfo().getTraceId(), + header.getTraceInfo().getParentId()); + traceSpan = Trace.startSpan(rpcRequest.toString(), parentSpan).detach(); + } + Call call = new Call(header.getCallId(), header.getRetryCount(), - rpcRequest, this, ProtoUtil.convert(header.getRpcKind()), header - .getClientId().toByteArray()); + rpcRequest, this, ProtoUtil.convert(header.getRpcKind()), + header.getClientId().toByteArray(), traceSpan); + callQueue.put(call); // queue the call; maybe blocked here incRpcCount(); // Increment the rpc count } @@ -2067,6 +2088,7 @@ public abstract class Server { ByteArrayOutputStream buf = new ByteArrayOutputStream(INITIAL_RESP_BUF_SIZE); while (running) { + TraceScope traceScope = null; try { final Call call = callQueue.take(); // pop the queue; maybe blocked here if (LOG.isDebugEnabled()) { @@ -2083,6 +2105,10 @@ public abstract class Server { Writable value = null; CurCall.set(call); + if (call.traceSpan != null) { + traceScope = Trace.continueSpan(call.traceSpan); + } + try { // Make the call as the user via Subject.doAs, thus associating // the call with the Subject @@ -2156,9 +2182,22 @@ public abstract class Server { } catch (InterruptedException e) { if (running) { // unexpected -- log it LOG.info(Thread.currentThread().getName() + " unexpectedly interrupted", e); + if (Trace.isTracing()) { + traceScope.getSpan().addTimelineAnnotation("unexpectedly interrupted: " + + StringUtils.stringifyException(e)); + } } } catch (Exception e) { LOG.info(Thread.currentThread().getName() + " caught an exception", e); + if (Trace.isTracing()) { + traceScope.getSpan().addTimelineAnnotation("Exception: " + + StringUtils.stringifyException(e)); + } + } finally { + if (traceScope != null) { + traceScope.close(); + } + IOUtils.cleanup(LOG, traceScope); } } LOG.debug(Thread.currentThread().getName() + ": exiting"); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java index 04ab4dc2699..4b2dfe0de10 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java @@ -41,6 +41,8 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.*; +import org.htrace.Trace; +import org.htrace.TraceScope; /** An RpcEngine implementation for Writable data. */ @InterfaceStability.Evolving @@ -227,9 +229,19 @@ public class WritableRpcEngine implements RpcEngine { if (LOG.isDebugEnabled()) { startTime = Time.now(); } - - ObjectWritable value = (ObjectWritable) - client.call(RPC.RpcKind.RPC_WRITABLE, new Invocation(method, args), remoteId); + TraceScope traceScope = null; + if (Trace.isTracing()) { + traceScope = Trace.startSpan( + method.getDeclaringClass().getCanonicalName() + + "." + method.getName()); + } + ObjectWritable value; + try { + value = (ObjectWritable) + client.call(RPC.RpcKind.RPC_WRITABLE, new Invocation(method, args), remoteId); + } finally { + if (traceScope != null) traceScope.close(); + } if (LOG.isDebugEnabled()) { long callTime = Time.now() - startTime; LOG.debug("Call: " + method.getName() + " " + callTime); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java index 908ca1468d1..40333fcc5df 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java @@ -24,6 +24,7 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.util.NativeCodeLoader; +import org.apache.hadoop.util.PerformanceAdvisory; public class JniBasedUnixGroupsMappingWithFallback implements GroupMappingServiceProvider { @@ -37,7 +38,7 @@ public class JniBasedUnixGroupsMappingWithFallback implements if (NativeCodeLoader.isNativeCodeLoaded()) { this.impl = new JniBasedUnixGroupsMapping(); } else { - LOG.debug("Falling back to shell based"); + PerformanceAdvisory.LOG.debug("Falling back to shell based"); this.impl = new ShellBasedUnixGroupsMapping(); } if (LOG.isDebugEnabled()){ diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/UserProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/UserProvider.java index 99d6d0060d8..262cbadd71a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/UserProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/UserProvider.java @@ -21,9 +21,7 @@ package org.apache.hadoop.security.alias; import java.io.IOException; import java.net.URI; import java.util.ArrayList; -import java.util.HashMap; import java.util.List; -import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -41,8 +39,6 @@ public class UserProvider extends CredentialProvider { public static final String SCHEME_NAME = "user"; private final UserGroupInformation user; private final Credentials credentials; - private final Map cache = new HashMap(); private UserProvider() throws IOException { user = UserGroupInformation.getCurrentUser(); @@ -86,7 +82,6 @@ public class UserProvider extends CredentialProvider { throw new IOException("Credential " + name + " does not exist in " + this); } - cache.remove(name); } @Override diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java index f78602ab040..d250df10b2e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java @@ -221,7 +221,13 @@ public class AccessControlList implements Writable { return groups; } - public boolean isUserAllowed(UserGroupInformation ugi) { + /** + * Checks if a user represented by the provided {@link UserGroupInformation} + * is a member of the Access Control List + * @param ugi UserGroupInformation to check if contained in the ACL + * @return true if ugi is member of the list + */ + public final boolean isUserInList(UserGroupInformation ugi) { if (allAllowed || users.contains(ugi.getShortUserName())) { return true; } else { @@ -234,6 +240,10 @@ public class AccessControlList implements Writable { return false; } + public boolean isUserAllowed(UserGroupInformation ugi) { + return isUserInList(ugi); + } + /** * Returns descriptive way of users and groups that are part of this ACL. * Use {@link #getAclString()} to get the exact String that can be given to diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java index d955ada8571..5aeb1772c81 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java @@ -125,6 +125,8 @@ public class DelegationTokenAuthenticatedURL extends AuthenticatedURL { } } + private boolean useQueryStringforDelegationToken = false; + /** * Creates an DelegationTokenAuthenticatedURL. *

@@ -170,6 +172,34 @@ public class DelegationTokenAuthenticatedURL extends AuthenticatedURL { super(obtainDelegationTokenAuthenticator(authenticator), connConfigurator); } + /** + * Sets if delegation token should be transmitted in the URL query string. + * By default it is transmitted using the + * {@link DelegationTokenAuthenticator#DELEGATION_TOKEN_HEADER} HTTP header. + *

+ * This method is provided to enable WebHDFS backwards compatibility. + * + * @param useQueryString TRUE if the token is transmitted in the + * URL query string, FALSE if the delegation token is transmitted + * using the {@link DelegationTokenAuthenticator#DELEGATION_TOKEN_HEADER} HTTP + * header. + */ + @Deprecated + protected void setUseQueryStringForDelegationToken(boolean useQueryString) { + useQueryStringforDelegationToken = useQueryString; + } + + /** + * Returns if delegation token is transmitted as a HTTP header. + * + * @return TRUE if the token is transmitted in the URL query + * string, FALSE if the delegation token is transmitted using the + * {@link DelegationTokenAuthenticator#DELEGATION_TOKEN_HEADER} HTTP header. + */ + public boolean useQueryStringForDelegationToken() { + return useQueryStringforDelegationToken; + } + /** * Returns an authenticated {@link HttpURLConnection}, it uses a Delegation * Token only if the given auth token is an instance of {@link Token} and @@ -235,23 +265,41 @@ public class DelegationTokenAuthenticatedURL extends AuthenticatedURL { * @throws IOException if an IO error occurred. * @throws AuthenticationException if an authentication exception occurred. */ + @SuppressWarnings("unchecked") public HttpURLConnection openConnection(URL url, Token token, String doAs) throws IOException, AuthenticationException { Preconditions.checkNotNull(url, "url"); Preconditions.checkNotNull(token, "token"); Map extraParams = new HashMap(); - - // delegation token - Credentials creds = UserGroupInformation.getCurrentUser().getCredentials(); - if (!creds.getAllTokens().isEmpty()) { - InetSocketAddress serviceAddr = new InetSocketAddress(url.getHost(), - url.getPort()); - Text service = SecurityUtil.buildTokenService(serviceAddr); - org.apache.hadoop.security.token.Token dt = - creds.getToken(service); - if (dt != null) { - extraParams.put(KerberosDelegationTokenAuthenticator.DELEGATION_PARAM, - dt.encodeToUrlString()); + org.apache.hadoop.security.token.Token dToken + = null; + // if we have valid auth token, it takes precedence over a delegation token + // and we don't even look for one. + if (!token.isSet()) { + // delegation token + Credentials creds = UserGroupInformation.getCurrentUser(). + getCredentials(); + if (!creds.getAllTokens().isEmpty()) { + InetSocketAddress serviceAddr = new InetSocketAddress(url.getHost(), + url.getPort()); + Text service = SecurityUtil.buildTokenService(serviceAddr); + dToken = creds.getToken(service); + if (dToken != null) { + if (useQueryStringForDelegationToken()) { + // delegation token will go in the query string, injecting it + extraParams.put( + KerberosDelegationTokenAuthenticator.DELEGATION_PARAM, + dToken.encodeToUrlString()); + } else { + // delegation token will go as request header, setting it in the + // auth-token to ensure no authentication handshake is triggered + // (if we have a delegation token, we are authenticated) + // the delegation token header is injected in the connection request + // at the end of this method. + token.delegationToken = (org.apache.hadoop.security.token.Token + ) dToken; + } + } } } @@ -261,7 +309,14 @@ public class DelegationTokenAuthenticatedURL extends AuthenticatedURL { } url = augmentURL(url, extraParams); - return super.openConnection(url, token); + HttpURLConnection conn = super.openConnection(url, token); + if (!token.isSet() && !useQueryStringForDelegationToken() && dToken != null) { + // injecting the delegation token header in the connection request + conn.setRequestProperty( + DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER, + dToken.encodeToUrlString()); + } + return conn; } /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java index 670ec551a09..e4d942491fe 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java @@ -331,8 +331,7 @@ public abstract class DelegationTokenAuthenticationHandler HttpServletResponse response) throws IOException, AuthenticationException { AuthenticationToken token; - String delegationParam = ServletUtils.getParameter(request, - KerberosDelegationTokenAuthenticator.DELEGATION_PARAM); + String delegationParam = getDelegationToken(request); if (delegationParam != null) { try { Token dt = @@ -356,4 +355,15 @@ public abstract class DelegationTokenAuthenticationHandler return token; } + private String getDelegationToken(HttpServletRequest request) + throws IOException { + String dToken = request.getHeader( + DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER); + if (dToken == null) { + dToken = ServletUtils.getParameter(request, + KerberosDelegationTokenAuthenticator.DELEGATION_PARAM); + } + return dToken; + } + } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java index ec192dab8ca..18df56ccf3f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java @@ -56,6 +56,9 @@ public abstract class DelegationTokenAuthenticator implements Authenticator { public static final String OP_PARAM = "op"; + public static final String DELEGATION_TOKEN_HEADER = + "X-Hadoop-Delegation-Token"; + public static final String DELEGATION_PARAM = "delegation"; public static final String TOKEN_PARAM = "token"; public static final String RENEWER_PARAM = "renewer"; @@ -101,15 +104,23 @@ public abstract class DelegationTokenAuthenticator implements Authenticator { authenticator.setConnectionConfigurator(configurator); } - private boolean hasDelegationToken(URL url) { - String queryStr = url.getQuery(); - return (queryStr != null) && queryStr.contains(DELEGATION_PARAM + "="); + private boolean hasDelegationToken(URL url, AuthenticatedURL.Token token) { + boolean hasDt = false; + if (token instanceof DelegationTokenAuthenticatedURL.Token) { + hasDt = ((DelegationTokenAuthenticatedURL.Token) token). + getDelegationToken() != null; + } + if (!hasDt) { + String queryStr = url.getQuery(); + hasDt = (queryStr != null) && queryStr.contains(DELEGATION_PARAM + "="); + } + return hasDt; } @Override public void authenticate(URL url, AuthenticatedURL.Token token) throws IOException, AuthenticationException { - if (!hasDelegationToken(url)) { + if (!hasDelegationToken(url, token)) { authenticator.authenticate(url, token); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanReceiverHost.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanReceiverHost.java new file mode 100644 index 00000000000..b8c7b311ffa --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanReceiverHost.java @@ -0,0 +1,153 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.tracing; + +import java.io.IOException; +import java.util.Collection; +import java.util.HashSet; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.ShutdownHookManager; +import org.htrace.HTraceConfiguration; +import org.htrace.SpanReceiver; +import org.htrace.Trace; + +/** + * This class provides functions for reading the names of SpanReceivers from + * the Hadoop configuration, adding those SpanReceivers to the Tracer, + * and closing those SpanReceivers when appropriate. + * This class does nothing If no SpanReceiver is configured. + */ +@InterfaceAudience.Private +public class SpanReceiverHost { + public static final String SPAN_RECEIVERS_CONF_KEY = "hadoop.trace.spanreceiver.classes"; + private static final Log LOG = LogFactory.getLog(SpanReceiverHost.class); + private Collection receivers = new HashSet(); + private boolean closed = false; + + private static enum SingletonHolder { + INSTANCE; + Object lock = new Object(); + SpanReceiverHost host = null; + } + + public static SpanReceiverHost getInstance(Configuration conf) { + if (SingletonHolder.INSTANCE.host != null) { + return SingletonHolder.INSTANCE.host; + } + synchronized (SingletonHolder.INSTANCE.lock) { + if (SingletonHolder.INSTANCE.host != null) { + return SingletonHolder.INSTANCE.host; + } + SpanReceiverHost host = new SpanReceiverHost(); + host.loadSpanReceivers(conf); + SingletonHolder.INSTANCE.host = host; + ShutdownHookManager.get().addShutdownHook(new Runnable() { + public void run() { + SingletonHolder.INSTANCE.host.closeReceivers(); + } + }, 0); + return SingletonHolder.INSTANCE.host; + } + } + + /** + * Reads the names of classes specified in the + * "hadoop.trace.spanreceiver.classes" property and instantiates and registers + * them with the Tracer as SpanReceiver's. + * + * The nullary constructor is called during construction, but if the classes + * specified implement the Configurable interface, setConfiguration() will be + * called on them. This allows SpanReceivers to use values from the Hadoop + * configuration. + */ + public void loadSpanReceivers(Configuration conf) { + Class implClass = null; + String[] receiverNames = conf.getTrimmedStrings(SPAN_RECEIVERS_CONF_KEY); + if (receiverNames == null || receiverNames.length == 0) { + return; + } + for (String className : receiverNames) { + className = className.trim(); + try { + implClass = Class.forName(className); + receivers.add(loadInstance(implClass, conf)); + LOG.info("SpanReceiver " + className + " was loaded successfully."); + } catch (ClassNotFoundException e) { + LOG.warn("Class " + className + " cannot be found.", e); + } catch (IOException e) { + LOG.warn("Load SpanReceiver " + className + " failed.", e); + } + } + for (SpanReceiver rcvr : receivers) { + Trace.addReceiver(rcvr); + } + } + + private SpanReceiver loadInstance(Class implClass, Configuration conf) + throws IOException { + SpanReceiver impl; + try { + Object o = ReflectionUtils.newInstance(implClass, conf); + impl = (SpanReceiver)o; + impl.configure(wrapHadoopConf(conf)); + } catch (SecurityException e) { + throw new IOException(e); + } catch (IllegalArgumentException e) { + throw new IOException(e); + } catch (RuntimeException e) { + throw new IOException(e); + } + + return impl; + } + + private static HTraceConfiguration wrapHadoopConf(final Configuration conf) { + return new HTraceConfiguration() { + public static final String HTRACE_CONF_PREFIX = "hadoop."; + + @Override + public String get(String key) { + return conf.get(HTRACE_CONF_PREFIX + key); + } + + @Override + public String get(String key, String defaultValue) { + return conf.get(HTRACE_CONF_PREFIX + key, defaultValue); + } + }; + } + + /** + * Calls close() on all SpanReceivers created by this SpanReceiverHost. + */ + public synchronized void closeReceivers() { + if (closed) return; + closed = true; + for (SpanReceiver rcvr : receivers) { + try { + rcvr.close(); + } catch (IOException e) { + LOG.warn("Unable to close SpanReceiver correctly: " + e.getMessage(), e); + } + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java index 1636af68a36..9f0ee35711c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java @@ -339,6 +339,7 @@ public class DataChecksum implements Checksum { byte[] data, int dataOff, int dataLen, byte[] checksums, int checksumsOff, String fileName, long basePos) throws ChecksumException { + if (type.size == 0) return; if (NativeCrc32.isAvailable()) { NativeCrc32.verifyChunkedSumsByteArray(bytesPerChecksum, type.id, @@ -421,6 +422,7 @@ public class DataChecksum implements Checksum { public void calculateChunkedSums( byte[] data, int dataOffset, int dataLength, byte[] sums, int sumsOffset) { + if (type.size == 0) return; if (NativeCrc32.isAvailable()) { NativeCrc32.calculateChunkedSumsByteArray(bytesPerChecksum, type.id, diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCrc32.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCrc32.java index 2f21ae1a03d..0807d2cbde2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCrc32.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCrc32.java @@ -42,7 +42,7 @@ class NativeCrc32 { * modified. * * @param bytesPerSum the chunk size (eg 512 bytes) - * @param checksumType the DataChecksum type constant + * @param checksumType the DataChecksum type constant (NULL is not supported) * @param sums the DirectByteBuffer pointing at the beginning of the * stored checksums * @param data the DirectByteBuffer pointing at the beginning of the diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PerformanceAdvisory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PerformanceAdvisory.java new file mode 100644 index 00000000000..3304ebb4731 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PerformanceAdvisory.java @@ -0,0 +1,25 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.util; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class PerformanceAdvisory { + public static final Logger LOG = + LoggerFactory.getLogger(PerformanceAdvisory.class); +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java index 79f8692842d..36b5ff11bc8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java @@ -27,6 +27,8 @@ import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.UserInformation import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.*; import org.apache.hadoop.security.SaslRpcServer.AuthMethod; import org.apache.hadoop.security.UserGroupInformation; +import org.htrace.Span; +import org.htrace.Trace; import com.google.protobuf.ByteString; @@ -165,6 +167,15 @@ public abstract class ProtoUtil { RpcRequestHeaderProto.Builder result = RpcRequestHeaderProto.newBuilder(); result.setRpcKind(convert(rpcKind)).setRpcOp(operation).setCallId(callId) .setRetryCount(retryCount).setClientId(ByteString.copyFrom(uuid)); + + // Add tracing info if we are currently tracing. + if (Trace.isTracing()) { + Span s = Trace.currentSpan(); + result.setTraceInfo(RPCTraceInfoProto.newBuilder() + .setParentId(s.getSpanId()) + .setTraceId(s.getTraceId()).build()); + } + return result.build(); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java index 9296d54912e..1d96d996fa3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java @@ -170,7 +170,8 @@ public class VersionInfo { public static void main(String[] args) { LOG.debug("version: "+ getVersion()); System.out.println("Hadoop " + getVersion()); - System.out.println("Subversion " + getUrl() + " -r " + getRevision()); + System.out.println("Source code repository " + getUrl() + " -r " + + getRevision()); System.out.println("Compiled by " + getUser() + " on " + getDate()); System.out.println("Compiled with protoc " + getProtocVersion()); System.out.println("From source with checksum " + getSrcChecksum()); diff --git a/hadoop-common-project/hadoop-common/src/main/native/README b/hadoop-common-project/hadoop-common/src/main/native/README index 8c5af78f0f2..3ad449537c0 100644 --- a/hadoop-common-project/hadoop-common/src/main/native/README +++ b/hadoop-common-project/hadoop-common/src/main/native/README @@ -1,10 +1,11 @@ Package: libhadoop -Authors: Arun C Murthy MOTIVATION -The libhadoop package contains the native code for any of hadoop (http://hadoop.apache.org/core). +The libhadoop package contains the native code for Apache Hadoop (http://hadoop.apache.org/). IMPROVEMENTS -Any suggestions for improvements or patched should be sent to core-dev@hadoop.apache.org. Please go through http://wiki.apache.org/hadoop/HowToContribute for more information on how to contribute. +Any suggestions for improvements or patched should be sent to common-dev@hadoop.apache.org. + +Please see http://wiki.apache.org/hadoop/HowToContribute for more information on how to contribute. diff --git a/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto b/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto index e8c4adac367..c8791508b5a 100644 --- a/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto +++ b/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto @@ -53,6 +53,18 @@ enum RpcKindProto { +/** + * Used to pass through the information necessary to continue + * a trace after an RPC is made. All we need is the traceid + * (so we know the overarching trace this message is a part of), and + * the id of the current span when this message was sent, so we know + * what span caused the new span we will create when this message is received. + */ +message RPCTraceInfoProto { + optional int64 traceId = 1; + optional int64 parentId = 2; +} + message RpcRequestHeaderProto { // the header for the RpcRequest enum OperationProto { RPC_FINAL_PACKET = 0; // The final RPC Packet @@ -67,6 +79,7 @@ message RpcRequestHeaderProto { // the header for the RpcRequest // clientId + callId uniquely identifies a request // retry count, 1 means this is the first retry optional sint32 retryCount = 5 [default = -1]; + optional RPCTraceInfoProto traceInfo = 6; // tracing info } diff --git a/hadoop-common-project/hadoop-common/src/site/apt/Tracing.apt.vm b/hadoop-common-project/hadoop-common/src/site/apt/Tracing.apt.vm new file mode 100644 index 00000000000..f777dd23c16 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/site/apt/Tracing.apt.vm @@ -0,0 +1,169 @@ +~~ Licensed under the Apache License, Version 2.0 (the "License"); +~~ you may not use this file except in compliance with the License. +~~ You may obtain a copy of the License at +~~ +~~ http://www.apache.org/licenses/LICENSE-2.0 +~~ +~~ Unless required by applicable law or agreed to in writing, software +~~ distributed under the License is distributed on an "AS IS" BASIS, +~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +~~ See the License for the specific language governing permissions and +~~ limitations under the License. See accompanying LICENSE file. + + --- + Hadoop Distributed File System-${project.version} - Enabling Dapper-like Tracing + --- + --- + ${maven.build.timestamp} + +Enabling Dapper-like Tracing in HDFS + +%{toc|section=1|fromDepth=0} + +* {Dapper-like Tracing in HDFS} + +** HTrace + + {{{https://issues.apache.org/jira/browse/HDFS-5274}HDFS-5274}} + added support for tracing requests through HDFS, + using the open source tracing library, {{{https://github.com/cloudera/htrace}HTrace}}. + Setting up tracing is quite simple, however it requires some very minor changes to your client code. + +** SpanReceivers + + The tracing system works by collecting information in structs called 'Spans'. + It is up to you to choose how you want to receive this information + by implementing the SpanReceiver interface, which defines one method: + ++---- +public void receiveSpan(Span span); ++---- + + Configure what SpanReceivers you'd like to use + by putting a comma separated list of the fully-qualified class name of + classes implementing SpanReceiver + in <<>> property: <<>>. + ++---- + + hadoop.trace.spanreceiver.classes + org.htrace.impl.LocalFileSpanReceiver + + + hadoop.local-file-span-receiver.path + /var/log/hadoop/htrace.out + ++---- + +** Setting up ZipkinSpanReceiver + + Instead of implementing SpanReceiver by yourself, + you can use <<>> which uses + {{{https://github.com/twitter/zipkin}Zipkin}} + for collecting and dispalying tracing data. + + In order to use <<>>, + you need to download and setup {{{https://github.com/twitter/zipkin}Zipkin}} first. + + you also need to add the jar of <<>> to the classpath of Hadoop on each node. + Here is example setup procedure. + ++---- + $ git clone https://github.com/cloudera/htrace + $ cd htrace/htrace-zipkin + $ mvn compile assembly:single + $ cp target/htrace-zipkin-*-jar-with-dependencies.jar $HADOOP_HOME/share/hadoop/hdfs/lib/ ++---- + + The sample configuration for <<>> is shown below. + By adding these to <<>> of NameNode and DataNodes, + <<>> is initialized on the startup. + You also need this configuration on the client node in addition to the servers. + ++---- + + hadoop.trace.spanreceiver.classes + org.htrace.impl.ZipkinSpanReceiver + + + hadoop.zipkin.collector-hostname + 192.168.1.2 + + + hadoop.zipkin.collector-port + 9410 + ++---- + +** Turning on tracing by HTrace API + + In order to turn on Dapper-like tracing, + you will need to wrap the traced logic with <> as shown below. + When there is running tracing spans, + the tracing information is propagated to servers along with RPC requests. + + In addition, you need to initialize <<>> once per process. + ++---- +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.tracing.SpanReceiverHost; +import org.htrace.Sampler; +import org.htrace.Trace; +import org.htrace.TraceScope; + +... + + SpanReceiverHost.getInstance(new HdfsConfiguration()); + +... + + TraceScope ts = Trace.startSpan("Gets", Sampler.ALWAYS); + try { + ... // traced logic + } finally { + if (ts != null) ts.close(); + } ++---- + +** Sample code for tracing + + The <<>> shown below is the wrapper of FsShell + which start tracing span before invoking HDFS shell command. + ++---- +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FsShell; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.tracing.SpanReceiverHost; +import org.apache.hadoop.util.ToolRunner; +import org.htrace.Sampler; +import org.htrace.Trace; +import org.htrace.TraceScope; + +public class TracingFsShell { + public static void main(String argv[]) throws Exception { + Configuration conf = new Configuration(); + FsShell shell = new FsShell(); + conf.setQuietMode(false); + shell.setConf(conf); + int res = 0; + SpanReceiverHost.init(new HdfsConfiguration()); + TraceScope ts = null; + try { + ts = Trace.startSpan("FsShell", Sampler.ALWAYS); + res = ToolRunner.run(shell, argv); + } finally { + shell.close(); + if (ts != null) ts.close(); + } + System.exit(res); + } +} ++---- + + You can compile and execute this code as shown below. + ++---- +$ javac -cp `hadoop classpath` TracingFsShell.java +$ HADOOP_CLASSPATH=. hdfs TracingFsShell -put sample.txt /tmp/ ++---- diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestCachingKeyProvider.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestCachingKeyProvider.java index 2eff6991c3d..b8d29a6d029 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestCachingKeyProvider.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestCachingKeyProvider.java @@ -19,6 +19,7 @@ package org.apache.hadoop.crypto.key; import java.util.Date; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.kms.KMSClientProvider; import org.junit.Assert; import org.junit.Test; @@ -32,6 +33,7 @@ public class TestCachingKeyProvider { KeyProvider mockProv = Mockito.mock(KeyProvider.class); Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey); Mockito.when(mockProv.getCurrentKey(Mockito.eq("k2"))).thenReturn(null); + Mockito.when(mockProv.getConf()).thenReturn(new Configuration()); KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100); // asserting caching @@ -58,6 +60,7 @@ public class TestCachingKeyProvider { Mockito.when(mockProv.getKeyVersion(Mockito.eq("k1@0"))) .thenReturn(mockKey); Mockito.when(mockProv.getKeyVersion(Mockito.eq("k2@0"))).thenReturn(null); + Mockito.when(mockProv.getConf()).thenReturn(new Configuration()); KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100); // asserting caching @@ -88,6 +91,7 @@ public class TestCachingKeyProvider { KeyProvider mockProv = Mockito.mock(KeyProvider.class); Mockito.when(mockProv.getMetadata(Mockito.eq("k1"))).thenReturn(mockMeta); Mockito.when(mockProv.getMetadata(Mockito.eq("k2"))).thenReturn(null); + Mockito.when(mockProv.getConf()).thenReturn(new Configuration()); KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100); // asserting caching @@ -112,6 +116,7 @@ public class TestCachingKeyProvider { KeyProvider.KeyVersion mockKey = Mockito.mock(KeyProvider.KeyVersion.class); KeyProvider mockProv = Mockito.mock(KeyProvider.class); Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey); + Mockito.when(mockProv.getConf()).thenReturn(new Configuration()); KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100); Assert.assertEquals(mockKey, cache.getCurrentKey("k1")); Mockito.verify(mockProv, Mockito.times(1)).getCurrentKey(Mockito.eq("k1")); @@ -134,6 +139,7 @@ public class TestCachingKeyProvider { .thenReturn(mockKey); Mockito.when(mockProv.getMetadata(Mockito.eq("k1"))).thenReturn( new KMSClientProvider.KMSMetadata("c", 0, "l", null, new Date(), 1)); + Mockito.when(mockProv.getConf()).thenReturn(new Configuration()); KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100); Assert.assertEquals(mockKey, cache.getCurrentKey("k1")); Mockito.verify(mockProv, Mockito.times(1)).getCurrentKey(Mockito.eq("k1")); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProvider.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProvider.java index 892cec82ff6..c3335a37aa0 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProvider.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProvider.java @@ -159,6 +159,10 @@ public class TestKeyProvider { private int size; private byte[] material; + public MyKeyProvider(Configuration conf) { + super(conf); + } + @Override public KeyVersion getKeyVersion(String versionName) throws IOException { @@ -216,7 +220,7 @@ public class TestKeyProvider { @Test public void testMaterialGeneration() throws Exception { - MyKeyProvider kp = new MyKeyProvider(); + MyKeyProvider kp = new MyKeyProvider(new Configuration()); KeyProvider.Options options = new KeyProvider.Options(new Configuration()); options.setCipher(CIPHER); options.setBitLength(128); @@ -225,10 +229,19 @@ public class TestKeyProvider { Assert.assertEquals(CIPHER, kp.algorithm); Assert.assertNotNull(kp.material); - kp = new MyKeyProvider(); + kp = new MyKeyProvider(new Configuration()); kp.rollNewVersion("hello"); Assert.assertEquals(128, kp.size); Assert.assertEquals(CIPHER, kp.algorithm); Assert.assertNotNull(kp.material); } + + @Test + public void testConfiguration() throws Exception { + Configuration conf = new Configuration(false); + conf.set("a", "A"); + MyKeyProvider kp = new MyKeyProvider(conf); + Assert.assertEquals("A", kp.getConf().get("a")); + } + } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderDelegationTokenExtension.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderDelegationTokenExtension.java index 52dedf00512..df5d3e88846 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderDelegationTokenExtension.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderDelegationTokenExtension.java @@ -29,13 +29,18 @@ import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.token.Token; import org.junit.Assert; import org.junit.Test; +import org.mockito.Mockito; public class TestKeyProviderDelegationTokenExtension { public static abstract class MockKeyProvider extends KeyProvider implements DelegationTokenExtension { + + public MockKeyProvider() { + super(new Configuration(false)); + } } - + @Test public void testCreateExtension() throws Exception { Configuration conf = new Configuration(); @@ -50,9 +55,11 @@ public class TestKeyProviderDelegationTokenExtension { Assert.assertNull(kpDTE1.addDelegationTokens("user", credentials)); MockKeyProvider mock = mock(MockKeyProvider.class); + Mockito.when(mock.getConf()).thenReturn(new Configuration()); when(mock.addDelegationTokens("renewer", credentials)).thenReturn( - new Token[] { new Token(null, null, new Text("kind"), new Text( - "service")) }); + new Token[]{new Token(null, null, new Text("kind"), new Text( + "service"))} + ); KeyProviderDelegationTokenExtension kpDTE2 = KeyProviderDelegationTokenExtension .createKeyProviderDelegationTokenExtension(mock); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java index 0c8a6acf4a9..70a2f037b39 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java @@ -42,29 +42,14 @@ public class TestTextCommand { System.getProperty("test.build.data", "build/test/data/") + "/testText"; private static final String AVRO_FILENAME = new Path(TEST_ROOT_DIR, "weather.avro").toUri().getPath(); + private static final String TEXT_FILENAME = + new Path(TEST_ROOT_DIR, "testtextfile.txt").toUri().getPath(); /** * Tests whether binary Avro data files are displayed correctly. */ @Test (timeout = 30000) public void testDisplayForAvroFiles() throws Exception { - // Create a small Avro data file on the local file system. - createAvroFile(generateWeatherAvroBinaryData()); - - // Prepare and call the Text command's protected getInputStream method - // using reflection. - Configuration conf = new Configuration(); - URI localPath = new URI(AVRO_FILENAME); - PathData pathData = new PathData(localPath, conf); - Display.Text text = new Display.Text(); - text.setConf(conf); - Method method = text.getClass().getDeclaredMethod( - "getInputStream", PathData.class); - method.setAccessible(true); - InputStream stream = (InputStream) method.invoke(text, pathData); - String output = inputStreamToString(stream); - - // Check the output. String expectedOutput = "{\"station\":\"011990-99999\",\"time\":-619524000000,\"temp\":0}" + System.getProperty("line.separator") + @@ -77,18 +62,72 @@ public class TestTextCommand { "{\"station\":\"012650-99999\",\"time\":-655509600000,\"temp\":78}" + System.getProperty("line.separator"); + String output = readUsingTextCommand(AVRO_FILENAME, + generateWeatherAvroBinaryData()); assertEquals(expectedOutput, output); } + /** + * Tests that a zero-length file is displayed correctly. + */ + @Test (timeout = 30000) + public void testEmptyTextFil() throws Exception { + byte[] emptyContents = { }; + String output = readUsingTextCommand(TEXT_FILENAME, emptyContents); + assertTrue("".equals(output)); + } + + /** + * Tests that a one-byte file is displayed correctly. + */ + @Test (timeout = 30000) + public void testOneByteTextFil() throws Exception { + byte[] oneByteContents = { 'x' }; + String output = readUsingTextCommand(TEXT_FILENAME, oneByteContents); + assertTrue(new String(oneByteContents).equals(output)); + } + + /** + * Tests that a one-byte file is displayed correctly. + */ + @Test (timeout = 30000) + public void testTwoByteTextFil() throws Exception { + byte[] twoByteContents = { 'x', 'y' }; + String output = readUsingTextCommand(TEXT_FILENAME, twoByteContents); + assertTrue(new String(twoByteContents).equals(output)); + } + + // Create a file on the local file system and read it using + // the Display.Text class. + private String readUsingTextCommand(String fileName, byte[] fileContents) + throws Exception { + createFile(fileName, fileContents); + + // Prepare and call the Text command's protected getInputStream method + // using reflection. + Configuration conf = new Configuration(); + URI localPath = new URI(fileName); + PathData pathData = new PathData(localPath, conf); + Display.Text text = new Display.Text() { + @Override + public InputStream getInputStream(PathData item) throws IOException { + return super.getInputStream(item); + } + }; + text.setConf(conf); + InputStream stream = (InputStream) text.getInputStream(pathData); + return inputStreamToString(stream); + } + private String inputStreamToString(InputStream stream) throws IOException { StringWriter writer = new StringWriter(); IOUtils.copy(stream, writer); return writer.toString(); } - private void createAvroFile(byte[] contents) throws IOException { + private void createFile(String fileName, byte[] contents) throws IOException { (new File(TEST_ROOT_DIR)).mkdir(); - File file = new File(AVRO_FILENAME); + File file = new File(fileName); file.createNewFile(); FileOutputStream stream = new FileOutputStream(file); stream.write(contents); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java index c9d255dc5aa..7880fa1368b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java @@ -284,11 +284,13 @@ public class TestDelegationTokenAuthenticationHandlerWithMocks { @Test public void testAuthenticate() throws Exception { - testValidDelegationToken(); - testInvalidDelegationToken(); + testValidDelegationTokenQueryString(); + testValidDelegationTokenHeader(); + testInvalidDelegationTokenQueryString(); + testInvalidDelegationTokenHeader(); } - private void testValidDelegationToken() throws Exception { + private void testValidDelegationTokenQueryString() throws Exception { HttpServletRequest request = Mockito.mock(HttpServletRequest.class); HttpServletResponse response = Mockito.mock(HttpServletResponse.class); Token dToken = @@ -307,7 +309,26 @@ public class TestDelegationTokenAuthenticationHandlerWithMocks { Assert.assertTrue(token.isExpired()); } - private void testInvalidDelegationToken() throws Exception { + private void testValidDelegationTokenHeader() throws Exception { + HttpServletRequest request = Mockito.mock(HttpServletRequest.class); + HttpServletResponse response = Mockito.mock(HttpServletResponse.class); + Token dToken = + handler.getTokenManager().createToken( + UserGroupInformation.getCurrentUser(), "user"); + Mockito.when(request.getHeader(Mockito.eq( + DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER))).thenReturn( + dToken.encodeToUrlString()); + + AuthenticationToken token = handler.authenticate(request, response); + Assert.assertEquals(UserGroupInformation.getCurrentUser(). + getShortUserName(), token.getUserName()); + Assert.assertEquals(0, token.getExpires()); + Assert.assertEquals(handler.getType(), + token.getType()); + Assert.assertTrue(token.isExpired()); + } + + private void testInvalidDelegationTokenQueryString() throws Exception { HttpServletRequest request = Mockito.mock(HttpServletRequest.class); HttpServletResponse response = Mockito.mock(HttpServletResponse.class); Mockito.when(request.getQueryString()).thenReturn( @@ -323,4 +344,21 @@ public class TestDelegationTokenAuthenticationHandlerWithMocks { } } + private void testInvalidDelegationTokenHeader() throws Exception { + HttpServletRequest request = Mockito.mock(HttpServletRequest.class); + HttpServletResponse response = Mockito.mock(HttpServletResponse.class); + Mockito.when(request.getHeader(Mockito.eq( + DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER))).thenReturn( + "invalid"); + + try { + handler.authenticate(request, response); + Assert.fail(); + } catch (AuthenticationException ex) { + //NOP + } catch (Exception ex) { + Assert.fail(); + } + } + } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java index 1b452f18241..118abff2a56 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java @@ -149,6 +149,15 @@ public class TestWebDelegationToken { throws ServletException, IOException { resp.setStatus(HttpServletResponse.SC_OK); resp.getWriter().write("ping"); + if (req.getHeader(DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER) + != null) { + resp.setHeader("UsingHeader", "true"); + } + if (req.getQueryString() != null && + req.getQueryString().contains( + DelegationTokenAuthenticator.DELEGATION_PARAM + "=")) { + resp.setHeader("UsingQueryString", "true"); + } } @Override @@ -314,7 +323,20 @@ public class TestWebDelegationToken { } @Test - public void testDelegationTokenAuthenticatorCalls() throws Exception { + public void testDelegationTokenAuthenticatorCallsWithHeader() + throws Exception { + testDelegationTokenAuthenticatorCalls(false); + } + + @Test + public void testDelegationTokenAuthenticatorCallsWithQueryString() + throws Exception { + testDelegationTokenAuthenticatorCalls(true); + } + + + private void testDelegationTokenAuthenticatorCalls(final boolean useQS) + throws Exception { final Server jetty = createJettyServer(); Context context = new Context(); context.setContextPath("/foo"); @@ -324,14 +346,15 @@ public class TestWebDelegationToken { try { jetty.start(); - URL nonAuthURL = new URL(getJettyURL() + "/foo/bar"); + final URL nonAuthURL = new URL(getJettyURL() + "/foo/bar"); URL authURL = new URL(getJettyURL() + "/foo/bar?authenticated=foo"); URL authURL2 = new URL(getJettyURL() + "/foo/bar?authenticated=bar"); DelegationTokenAuthenticatedURL.Token token = new DelegationTokenAuthenticatedURL.Token(); - DelegationTokenAuthenticatedURL aUrl = + final DelegationTokenAuthenticatedURL aUrl = new DelegationTokenAuthenticatedURL(); + aUrl.setUseQueryStringForDelegationToken(useQS); try { aUrl.getDelegationToken(nonAuthURL, token, FOO_USER); @@ -379,6 +402,27 @@ public class TestWebDelegationToken { Assert.assertTrue(ex.getMessage().contains("401")); } + aUrl.getDelegationToken(authURL, token, "foo"); + + UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); + ugi.addToken(token.getDelegationToken()); + ugi.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + HttpURLConnection conn = aUrl.openConnection(nonAuthURL, new DelegationTokenAuthenticatedURL.Token()); + Assert.assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode()); + if (useQS) { + Assert.assertNull(conn.getHeaderField("UsingHeader")); + Assert.assertNotNull(conn.getHeaderField("UsingQueryString")); + } else { + Assert.assertNotNull(conn.getHeaderField("UsingHeader")); + Assert.assertNull(conn.getHeaderField("UsingQueryString")); + } + return null; + } + }); + + } finally { jetty.stop(); } diff --git a/hadoop-common-project/hadoop-kms/pom.xml b/hadoop-common-project/hadoop-kms/pom.xml index b65e67a5bbc..b1ca3077b30 100644 --- a/hadoop-common-project/hadoop-kms/pom.xml +++ b/hadoop-common-project/hadoop-kms/pom.xml @@ -34,7 +34,6 @@ Apache Hadoop KMS - 6.0.36 ${project.build.directory}/${project.artifactId}-${project.version}/share/hadoop/kms/tomcat diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java index faec70a7554..43b07fec63d 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java @@ -26,10 +26,10 @@ import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersi import org.apache.hadoop.crypto.key.kms.KMSRESTConstants; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authorize.AuthorizationException; import org.apache.hadoop.crypto.key.kms.KMSClientProvider; import org.apache.hadoop.security.token.delegation.web.HttpUserGroupInformation; + import javax.ws.rs.Consumes; import javax.ws.rs.DELETE; import javax.ws.rs.DefaultValue; @@ -73,29 +73,14 @@ public class KMS { kmsAudit= KMSWebApp.getKMSAudit(); } - - private static final String UNAUTHORIZED_MSG_WITH_KEY = - "User:%s not allowed to do '%s' on '%s'"; - - private static final String UNAUTHORIZED_MSG_WITHOUT_KEY = - "User:%s not allowed to do '%s'"; - private void assertAccess(KMSACLs.Type aclType, UserGroupInformation ugi, KMSOp operation) throws AccessControlException { - assertAccess(aclType, ugi, operation, null); + KMSWebApp.getACLs().assertAccess(aclType, ugi, operation, null); } - - private void assertAccess(KMSACLs.Type aclType, - UserGroupInformation ugi, KMSOp operation, String key) - throws AccessControlException { - if (!KMSWebApp.getACLs().hasAccess(aclType, ugi)) { - KMSWebApp.getUnauthorizedCallsMeter().mark(); - kmsAudit.unauthorized(ugi, operation, key); - throw new AuthorizationException(String.format( - (key != null) ? UNAUTHORIZED_MSG_WITH_KEY - : UNAUTHORIZED_MSG_WITHOUT_KEY, - ugi.getShortUserName(), operation, key)); - } + + private void assertAccess(KMSACLs.Type aclType, UserGroupInformation ugi, + KMSOp operation, String key) throws AccessControlException { + KMSWebApp.getACLs().assertAccess(aclType, ugi, operation, key); } private static KeyProvider.KeyVersion removeKeyMaterial( diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java index a6c5bf4c2a5..8a10bb2be92 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java @@ -19,8 +19,11 @@ package org.apache.hadoop.crypto.key.kms.server; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.key.kms.server.KMS.KMSOp; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; +import org.apache.hadoop.security.authorize.AuthorizationException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -39,14 +42,23 @@ import java.util.concurrent.TimeUnit; public class KMSACLs implements Runnable { private static final Logger LOG = LoggerFactory.getLogger(KMSACLs.class); + private static final String UNAUTHORIZED_MSG_WITH_KEY = + "User:%s not allowed to do '%s' on '%s'"; + + private static final String UNAUTHORIZED_MSG_WITHOUT_KEY = + "User:%s not allowed to do '%s'"; public enum Type { CREATE, DELETE, ROLLOVER, GET, GET_KEYS, GET_METADATA, SET_KEY_MATERIAL, GENERATE_EEK, DECRYPT_EEK; - public String getConfigKey() { + public String getAclConfigKey() { return KMSConfiguration.CONFIG_PREFIX + "acl." + this.toString(); } + + public String getBlacklistConfigKey() { + return KMSConfiguration.CONFIG_PREFIX + "blacklist." + this.toString(); + } } public static final String ACL_DEFAULT = AccessControlList.WILDCARD_ACL_VALUE; @@ -54,6 +66,7 @@ public class KMSACLs implements Runnable { public static final int RELOADER_SLEEP_MILLIS = 1000; private volatile Map acls; + private volatile Map blacklistedAcls; private ScheduledExecutorService executorService; private long lastReload; @@ -70,12 +83,20 @@ public class KMSACLs implements Runnable { private void setACLs(Configuration conf) { Map tempAcls = new HashMap(); + Map tempBlacklist = new HashMap(); for (Type aclType : Type.values()) { - String aclStr = conf.get(aclType.getConfigKey(), ACL_DEFAULT); + String aclStr = conf.get(aclType.getAclConfigKey(), ACL_DEFAULT); tempAcls.put(aclType, new AccessControlList(aclStr)); + String blacklistStr = conf.get(aclType.getBlacklistConfigKey()); + if (blacklistStr != null) { + // Only add if blacklist is present + tempBlacklist.put(aclType, new AccessControlList(blacklistStr)); + LOG.info("'{}' Blacklist '{}'", aclType, blacklistStr); + } LOG.info("'{}' ACL '{}'", aclType, aclStr); } acls = tempAcls; + blacklistedAcls = tempBlacklist; } @Override @@ -109,12 +130,38 @@ public class KMSACLs implements Runnable { lastReload = System.currentTimeMillis(); Configuration conf = KMSConfiguration.getACLsConf(); // triggering the resource loading. - conf.get(Type.CREATE.getConfigKey()); + conf.get(Type.CREATE.getAclConfigKey()); return conf; } + /** + * First Check if user is in ACL for the KMS operation, if yes, then + * return true if user is not present in any configured blacklist for + * the operation + * @param type KMS Operation + * @param ugi UserGroupInformation of user + * @return true is user has access + */ public boolean hasAccess(Type type, UserGroupInformation ugi) { - return acls.get(type).isUserAllowed(ugi); + boolean access = acls.get(type).isUserAllowed(ugi); + if (access) { + AccessControlList blacklist = blacklistedAcls.get(type); + access = (blacklist == null) || !blacklist.isUserInList(ugi); + } + return access; + } + + public void assertAccess(KMSACLs.Type aclType, + UserGroupInformation ugi, KMSOp operation, String key) + throws AccessControlException { + if (!KMSWebApp.getACLs().hasAccess(aclType, ugi)) { + KMSWebApp.getUnauthorizedCallsMeter().mark(); + KMSWebApp.getKMSAudit().unauthorized(ugi, operation, key); + throw new AuthorizationException(String.format( + (key != null) ? UNAUTHORIZED_MSG_WITH_KEY + : UNAUTHORIZED_MSG_WITHOUT_KEY, + ugi.getShortUserName(), operation, key)); + } } } diff --git a/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm b/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm index e0cbd780fd5..e947c9b3982 100644 --- a/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm +++ b/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm @@ -274,8 +274,13 @@ $ keytool -genkey -alias tomcat -keyalg RSA KMS ACLs configuration are defined in the KMS <<>> configuration file. This file is hot-reloaded when it changes. - KMS supports a fine grained access control via a set ACL - configuration properties: + KMS supports both fine grained access control as well as blacklist for kms + operations via a set ACL configuration properties. + + A user accessing KMS is first checked for inclusion in the Access Control + List for the requested operation and then checked for exclusion in the + Black list for the operation before access is granted. + +---+ @@ -288,6 +293,16 @@ $ keytool -genkey -alias tomcat -keyalg RSA + + hadoop.kms.blacklist.CREATE + hdfs,foo + + Blacklist for create-key operations. + If the user does is in the Blacklist, the key material is not returned + as part of the response. + + + hadoop.kms.acl.DELETE * @@ -296,6 +311,14 @@ $ keytool -genkey -alias tomcat -keyalg RSA + + hadoop.kms.blacklist.DELETE + hdfs,foo + + Blacklist for delete-key operations. + + + hadoop.kms.acl.ROLLOVER * @@ -306,6 +329,14 @@ $ keytool -genkey -alias tomcat -keyalg RSA + + hadoop.kms.blacklist.ROLLOVER + hdfs,foo + + Blacklist for rollover-key operations. + + + hadoop.kms.acl.GET * @@ -314,6 +345,14 @@ $ keytool -genkey -alias tomcat -keyalg RSA + + hadoop.kms.blacklist.GET + hdfs,foo + + ACL for get-key-version and get-current-key operations. + + + hadoop.kms.acl.GET_KEYS * @@ -322,6 +361,14 @@ $ keytool -genkey -alias tomcat -keyalg RSA + + hadoop.kms.blacklist.GET_KEYS + hdfs,foo + + Blacklist for get-keys operation. + + + hadoop.kms.acl.GET_METADATA * @@ -330,6 +377,14 @@ $ keytool -genkey -alias tomcat -keyalg RSA + + hadoop.kms.blacklist.GET_METADATA + hdfs,foo + + Blacklist for get-key-metadata and get-keys-metadata operations. + + + hadoop.kms.acl.SET_KEY_MATERIAL * @@ -339,6 +394,15 @@ $ keytool -genkey -alias tomcat -keyalg RSA + + hadoop.kms.blacklist.SET_KEY_MATERIAL + hdfs,foo + + Complimentary Blacklist for CREATE and ROLLOVER operation to allow the client + to provide the key material when creating or rolling a key. + + + hadoop.kms.acl.GENERATE_EEK * @@ -348,6 +412,15 @@ $ keytool -genkey -alias tomcat -keyalg RSA + + hadoop.kms.blacklist.GENERATE_EEK + hdfs,foo + + Blacklist for generateEncryptedKey + CryptoExtension operations + + + hadoop.kms.acl.DECRYPT_EEK * @@ -357,6 +430,17 @@ $ keytool -genkey -alias tomcat -keyalg RSA + + + hadoop.kms.blacklist.DECRYPT_EEK + hdfs,foo + + Blacklist for decrypt EncryptedKey + CryptoExtension operations + + + + +---+ ** KMS Delegation Token Configuration diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java index be0a229b8d7..52f6354cea4 100644 --- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java @@ -268,6 +268,8 @@ public class TestKMS { List principals = new ArrayList(); principals.add("HTTP/localhost"); principals.add("client"); + principals.add("hdfs"); + principals.add("otheradmin"); principals.add("client/host"); principals.add("client1"); for (KMSACLs.Type type : KMSACLs.Type.values()) { @@ -621,12 +623,12 @@ public class TestKMS { conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT"); for (KMSACLs.Type type : KMSACLs.Type.values()) { - conf.set(type.getConfigKey(), type.toString()); + conf.set(type.getAclConfigKey(), type.toString()); } - conf.set(KMSACLs.Type.CREATE.getConfigKey(), + conf.set(KMSACLs.Type.CREATE.getAclConfigKey(), KMSACLs.Type.CREATE.toString() + ",SET_KEY_MATERIAL"); - conf.set(KMSACLs.Type.ROLLOVER.getConfigKey(), + conf.set(KMSACLs.Type.ROLLOVER.getAclConfigKey(), KMSACLs.Type.ROLLOVER.toString() + ",SET_KEY_MATERIAL"); writeConf(testDir, conf); @@ -884,7 +886,7 @@ public class TestKMS { // test ACL reloading Thread.sleep(10); // to ensure the ACLs file modifiedTime is newer - conf.set(KMSACLs.Type.CREATE.getConfigKey(), "foo"); + conf.set(KMSACLs.Type.CREATE.getAclConfigKey(), "foo"); writeConf(testDir, conf); Thread.sleep(1000); @@ -914,6 +916,92 @@ public class TestKMS { }); } + @Test + public void testKMSBlackList() throws Exception { + Configuration conf = new Configuration(); + conf.set("hadoop.security.authentication", "kerberos"); + UserGroupInformation.setConfiguration(conf); + File testDir = getTestDir(); + conf = createBaseKMSConf(testDir); + conf.set("hadoop.kms.authentication.type", "kerberos"); + conf.set("hadoop.kms.authentication.kerberos.keytab", + keytab.getAbsolutePath()); + conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost"); + conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT"); + for (KMSACLs.Type type : KMSACLs.Type.values()) { + conf.set(type.getAclConfigKey(), " "); + } + conf.set(KMSACLs.Type.CREATE.getAclConfigKey(), "client,hdfs,otheradmin"); + conf.set(KMSACLs.Type.GENERATE_EEK.getAclConfigKey(), "client,hdfs,otheradmin"); + conf.set(KMSACLs.Type.DECRYPT_EEK.getAclConfigKey(), "client,hdfs,otheradmin"); + conf.set(KMSACLs.Type.DECRYPT_EEK.getBlacklistConfigKey(), "hdfs,otheradmin"); + + writeConf(testDir, conf); + + runServer(null, null, testDir, new KMSCallable() { + @Override + public Void call() throws Exception { + final Configuration conf = new Configuration(); + conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128); + final URI uri = createKMSUri(getKMSUrl()); + + doAs("client", new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + try { + KMSClientProvider kp = new KMSClientProvider(uri, conf); + KeyProvider.KeyVersion kv = kp.createKey("ck0", + new KeyProvider.Options(conf)); + EncryptedKeyVersion eek = + kp.generateEncryptedKey("ck0"); + kp.decryptEncryptedKey(eek); + Assert.assertNull(kv.getMaterial()); + } catch (Exception ex) { + Assert.fail(ex.getMessage()); + } + return null; + } + }); + + doAs("hdfs", new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + try { + KMSClientProvider kp = new KMSClientProvider(uri, conf); + KeyProvider.KeyVersion kv = kp.createKey("ck1", + new KeyProvider.Options(conf)); + EncryptedKeyVersion eek = + kp.generateEncryptedKey("ck1"); + kp.decryptEncryptedKey(eek); + Assert.fail("admin user must not be allowed to decrypt !!"); + } catch (Exception ex) { + } + return null; + } + }); + + doAs("otheradmin", new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + try { + KMSClientProvider kp = new KMSClientProvider(uri, conf); + KeyProvider.KeyVersion kv = kp.createKey("ck2", + new KeyProvider.Options(conf)); + EncryptedKeyVersion eek = + kp.generateEncryptedKey("ck2"); + kp.decryptEncryptedKey(eek); + Assert.fail("admin user must not be allowed to decrypt !!"); + } catch (Exception ex) { + } + return null; + } + }); + + return null; + } + }); + } + @Test public void testServicePrincipalACLs() throws Exception { Configuration conf = new Configuration(); @@ -927,9 +1015,9 @@ public class TestKMS { conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost"); conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT"); for (KMSACLs.Type type : KMSACLs.Type.values()) { - conf.set(type.getConfigKey(), " "); + conf.set(type.getAclConfigKey(), " "); } - conf.set(KMSACLs.Type.CREATE.getConfigKey(), "client"); + conf.set(KMSACLs.Type.CREATE.getAclConfigKey(), "client"); writeConf(testDir, conf); diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSACLs.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSACLs.java index 7c0ad3bc9d1..abdf3c21d02 100644 --- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSACLs.java +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSACLs.java @@ -37,7 +37,7 @@ public class TestKMSACLs { public void testCustom() { Configuration conf = new Configuration(false); for (KMSACLs.Type type : KMSACLs.Type.values()) { - conf.set(type.getConfigKey(), type.toString() + " "); + conf.set(type.getAclConfigKey(), type.toString() + " "); } KMSACLs acls = new KMSACLs(conf); for (KMSACLs.Type type : KMSACLs.Type.values()) { diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3FileAttributes.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3FileAttributes.java index 9936b8d0ee7..47126d6a372 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3FileAttributes.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3FileAttributes.java @@ -53,9 +53,19 @@ public class Nfs3FileAttributes { * For Hadoop, currently this field is always zero. */ public static class Specdata3 { - final static int specdata1 = 0; - final static int specdata2 = 0; + final int specdata1; + final int specdata2; + public Specdata3() { + specdata1 = 0; + specdata2 = 0; + } + + public Specdata3(int specdata1, int specdata2) { + this.specdata1 = specdata1; + this.specdata2 = specdata2; + } + public int getSpecdata1() { return specdata1; } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/ACCESS3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/ACCESS3Request.java index 2470108d97a..ea1ba86b9cf 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/ACCESS3Request.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/ACCESS3Request.java @@ -19,13 +19,24 @@ package org.apache.hadoop.nfs.nfs3.request; import java.io.IOException; +import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.oncrpc.XDR; /** * ACCESS3 Request */ public class ACCESS3Request extends RequestWithHandle { - public ACCESS3Request(XDR xdr) throws IOException { - super(xdr); + public static ACCESS3Request deserialize(XDR xdr) throws IOException { + FileHandle handle = readHandle(xdr); + return new ACCESS3Request(handle); + } + + public ACCESS3Request(FileHandle handle) { + super(handle); + } + + @Override + public void serialize(XDR xdr) { + handle.serialize(xdr); } } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/COMMIT3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/COMMIT3Request.java index 810c41bdd84..ba84d4298f1 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/COMMIT3Request.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/COMMIT3Request.java @@ -19,6 +19,7 @@ package org.apache.hadoop.nfs.nfs3.request; import java.io.IOException; +import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.oncrpc.XDR; /** @@ -28,10 +29,17 @@ public class COMMIT3Request extends RequestWithHandle { private final long offset; private final int count; - public COMMIT3Request(XDR xdr) throws IOException { - super(xdr); - offset = xdr.readHyper(); - count = xdr.readInt(); + public static COMMIT3Request deserialize(XDR xdr) throws IOException { + FileHandle handle = readHandle(xdr); + long offset = xdr.readHyper(); + int count = xdr.readInt(); + return new COMMIT3Request(handle, offset, count); + } + + public COMMIT3Request(FileHandle handle, long offset, int count) { + super(handle); + this.offset = offset; + this.count = count; } public long getOffset() { @@ -41,4 +49,11 @@ public class COMMIT3Request extends RequestWithHandle { public int getCount() { return this.count; } + + @Override + public void serialize(XDR xdr) { + handle.serialize(xdr); + xdr.writeLongAsHyper(offset); + xdr.writeInt(count); + } } \ No newline at end of file diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/CREATE3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/CREATE3Request.java index b444c99ca79..473d5276463 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/CREATE3Request.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/CREATE3Request.java @@ -29,8 +29,8 @@ import org.apache.hadoop.oncrpc.XDR; public class CREATE3Request extends RequestWithHandle { private final String name; private final int mode; - private SetAttr3 objAttr = null; - private long verf; + private final SetAttr3 objAttr; + private long verf = 0; public CREATE3Request(FileHandle handle, String name, int mode, SetAttr3 objAttr, long verf) { @@ -41,12 +41,12 @@ public class CREATE3Request extends RequestWithHandle { this.verf = verf; } - public CREATE3Request(XDR xdr) throws IOException { - super(xdr); - name = xdr.readString(); - mode = xdr.readInt(); - - objAttr = new SetAttr3(); + public static CREATE3Request deserialize(XDR xdr) throws IOException { + FileHandle handle = readHandle(xdr); + String name = xdr.readString(); + int mode = xdr.readInt(); + SetAttr3 objAttr = new SetAttr3(); + long verf = 0; if ((mode == Nfs3Constant.CREATE_UNCHECKED) || (mode == Nfs3Constant.CREATE_GUARDED)) { objAttr.deserialize(xdr); @@ -55,6 +55,7 @@ public class CREATE3Request extends RequestWithHandle { } else { throw new IOException("Wrong create mode:" + mode); } + return new CREATE3Request(handle, name, mode, objAttr, verf); } public String getName() { @@ -81,4 +82,5 @@ public class CREATE3Request extends RequestWithHandle { xdr.writeInt(mode); objAttr.serialize(xdr); } + } \ No newline at end of file diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/FSINFO3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/FSINFO3Request.java index 26b65be6173..92c8ed87860 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/FSINFO3Request.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/FSINFO3Request.java @@ -19,13 +19,24 @@ package org.apache.hadoop.nfs.nfs3.request; import java.io.IOException; +import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.oncrpc.XDR; /** * FSINFO3 Request */ public class FSINFO3Request extends RequestWithHandle { - public FSINFO3Request(XDR xdr) throws IOException { - super(xdr); + public static FSINFO3Request deserialize(XDR xdr) throws IOException { + FileHandle handle = readHandle(xdr); + return new FSINFO3Request(handle); + } + + public FSINFO3Request(FileHandle handle) { + super(handle); + } + + @Override + public void serialize(XDR xdr) { + handle.serialize(xdr); } } \ No newline at end of file diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/FSSTAT3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/FSSTAT3Request.java index 90bec155432..c6c620df4d2 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/FSSTAT3Request.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/FSSTAT3Request.java @@ -19,13 +19,24 @@ package org.apache.hadoop.nfs.nfs3.request; import java.io.IOException; +import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.oncrpc.XDR; /** * FSSTAT3 Request */ public class FSSTAT3Request extends RequestWithHandle { - public FSSTAT3Request(XDR xdr) throws IOException { - super(xdr); + public static FSSTAT3Request deserialize(XDR xdr) throws IOException { + FileHandle handle = readHandle(xdr); + return new FSSTAT3Request(handle); + } + + public FSSTAT3Request(FileHandle handle) { + super(handle); + } + + @Override + public void serialize(XDR xdr) { + handle.serialize(xdr); } } \ No newline at end of file diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/GETATTR3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/GETATTR3Request.java index e1d69d1f570..b06b4b1b2b3 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/GETATTR3Request.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/GETATTR3Request.java @@ -19,13 +19,24 @@ package org.apache.hadoop.nfs.nfs3.request; import java.io.IOException; +import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.oncrpc.XDR; /** * GETATTR3 Request */ public class GETATTR3Request extends RequestWithHandle { - public GETATTR3Request(XDR xdr) throws IOException { - super(xdr); + public static GETATTR3Request deserialize(XDR xdr) throws IOException { + FileHandle handle = readHandle(xdr); + return new GETATTR3Request(handle); + } + + public GETATTR3Request(FileHandle handle) { + super(handle); + } + + @Override + public void serialize(XDR xdr) { + handle.serialize(xdr); } } \ No newline at end of file diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/LINK3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/LINK3Request.java new file mode 100644 index 00000000000..2e959f59f90 --- /dev/null +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/LINK3Request.java @@ -0,0 +1,61 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.nfs.nfs3.request; + +import java.io.IOException; + +import org.apache.hadoop.nfs.nfs3.FileHandle; +import org.apache.hadoop.oncrpc.XDR; + +/** + * LINK3 Request + */ +public class LINK3Request extends RequestWithHandle { + private final FileHandle fromDirHandle; + private final String fromName; + + public LINK3Request(FileHandle handle, FileHandle fromDirHandle, + String fromName) { + super(handle); + this.fromDirHandle = fromDirHandle; + this.fromName = fromName; + } + + public static LINK3Request deserialize(XDR xdr) throws IOException { + FileHandle handle = readHandle(xdr); + FileHandle fromDirHandle = readHandle(xdr); + String fromName = xdr.readString(); + return new LINK3Request(handle, fromDirHandle, fromName); + } + + public FileHandle getFromDirHandle() { + return fromDirHandle; + } + + public String getFromName() { + return fromName; + } + + @Override + public void serialize(XDR xdr) { + handle.serialize(xdr); + fromDirHandle.serialize(xdr); + xdr.writeInt(fromName.length()); + xdr.writeFixedOpaque(fromName.getBytes(), fromName.length()); + } +} diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/LOOKUP3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/LOOKUP3Request.java index e461ec32648..4661821a68b 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/LOOKUP3Request.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/LOOKUP3Request.java @@ -35,9 +35,10 @@ public class LOOKUP3Request extends RequestWithHandle { this.name = name; } - public LOOKUP3Request(XDR xdr) throws IOException { - super(xdr); - name = xdr.readString(); + public static LOOKUP3Request deserialize(XDR xdr) throws IOException { + FileHandle handle = readHandle(xdr); + String name = xdr.readString(); + return new LOOKUP3Request(handle, name); } public String getName() { @@ -51,7 +52,7 @@ public class LOOKUP3Request extends RequestWithHandle { @Override @VisibleForTesting public void serialize(XDR xdr) { - super.serialize(xdr); + handle.serialize(xdr); xdr.writeInt(name.getBytes().length); xdr.writeFixedOpaque(name.getBytes()); } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/MKDIR3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/MKDIR3Request.java index 170de8cf56d..b3ef828a7ec 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/MKDIR3Request.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/MKDIR3Request.java @@ -19,6 +19,7 @@ package org.apache.hadoop.nfs.nfs3.request; import java.io.IOException; +import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.oncrpc.XDR; /** @@ -28,13 +29,20 @@ public class MKDIR3Request extends RequestWithHandle { private final String name; private final SetAttr3 objAttr; - public MKDIR3Request(XDR xdr) throws IOException { - super(xdr); - name = xdr.readString(); - objAttr = new SetAttr3(); + public static MKDIR3Request deserialize(XDR xdr) throws IOException { + FileHandle handle = readHandle(xdr); + String name = xdr.readString(); + SetAttr3 objAttr = new SetAttr3(); objAttr.deserialize(xdr); + return new MKDIR3Request(handle, name, objAttr); } - + + public MKDIR3Request(FileHandle handle, String name, SetAttr3 objAttr) { + super(handle); + this.name = name; + this.objAttr = objAttr; + } + public String getName() { return name; } @@ -42,4 +50,12 @@ public class MKDIR3Request extends RequestWithHandle { public SetAttr3 getObjAttr() { return objAttr; } + + @Override + public void serialize(XDR xdr) { + handle.serialize(xdr); + xdr.writeInt(name.getBytes().length); + xdr.writeFixedOpaque(name.getBytes()); + objAttr.serialize(xdr); + } } \ No newline at end of file diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/MKNOD3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/MKNOD3Request.java new file mode 100644 index 00000000000..4a13f879ea3 --- /dev/null +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/MKNOD3Request.java @@ -0,0 +1,89 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.nfs.nfs3.request; + +import java.io.IOException; + +import org.apache.hadoop.nfs.NfsFileType; +import org.apache.hadoop.nfs.nfs3.FileHandle; +import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes.Specdata3; +import org.apache.hadoop.oncrpc.XDR; + +/** + * MKNOD3 Request + */ +public class MKNOD3Request extends RequestWithHandle { + private final String name; + private int type; + private SetAttr3 objAttr = null; + private Specdata3 spec = null; + + public MKNOD3Request(FileHandle handle, String name, int type, + SetAttr3 objAttr, Specdata3 spec) { + super(handle); + this.name = name; + this.type = type; + this.objAttr = objAttr; + this.spec = spec; + } + + public static MKNOD3Request deserialize(XDR xdr) throws IOException { + FileHandle handle = readHandle(xdr); + String name = xdr.readString(); + int type = xdr.readInt(); + SetAttr3 objAttr = new SetAttr3(); + Specdata3 spec = null; + if (type == NfsFileType.NFSCHR.toValue() + || type == NfsFileType.NFSBLK.toValue()) { + objAttr.deserialize(xdr); + spec = new Specdata3(xdr.readInt(), xdr.readInt()); + } else if (type == NfsFileType.NFSSOCK.toValue() + || type == NfsFileType.NFSFIFO.toValue()) { + objAttr.deserialize(xdr); + } + return new MKNOD3Request(handle, name, type, objAttr, spec); + } + + public String getName() { + return name; + } + + public int getType() { + return type; + } + + public SetAttr3 getObjAttr() { + return objAttr; + } + + public Specdata3 getSpec() { + return spec; + } + + @Override + public void serialize(XDR xdr) { + handle.serialize(xdr); + xdr.writeInt(name.length()); + xdr.writeFixedOpaque(name.getBytes(), name.length()); + objAttr.serialize(xdr); + if (spec != null) { + xdr.writeInt(spec.getSpecdata1()); + xdr.writeInt(spec.getSpecdata2()); + } + } +} diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/NFS3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/NFS3Request.java new file mode 100644 index 00000000000..cffa215f313 --- /dev/null +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/NFS3Request.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.nfs.nfs3.request; + +import java.io.IOException; + +import org.apache.hadoop.nfs.nfs3.FileHandle; +import org.apache.hadoop.oncrpc.XDR; + +/** + * An NFS request that uses {@link FileHandle} to identify a file. + */ +public abstract class NFS3Request { + + /** + * Deserialize a handle from an XDR object + */ + static FileHandle readHandle(XDR xdr) throws IOException { + FileHandle handle = new FileHandle(); + if (!handle.deserialize(xdr)) { + throw new IOException("can't deserialize file handle"); + } + return handle; + } + + /** + * Subclass should implement. Usually handle is the first to be serialized + */ + public abstract void serialize(XDR xdr); +} diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/PATHCONF3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/PATHCONF3Request.java index d5142646878..bff80384ca7 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/PATHCONF3Request.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/PATHCONF3Request.java @@ -19,13 +19,24 @@ package org.apache.hadoop.nfs.nfs3.request; import java.io.IOException; +import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.oncrpc.XDR; /** * PATHCONF3 Request */ public class PATHCONF3Request extends RequestWithHandle { - public PATHCONF3Request(XDR xdr) throws IOException { - super(xdr); + public static PATHCONF3Request deserialize(XDR xdr) throws IOException { + FileHandle handle = readHandle(xdr); + return new PATHCONF3Request(handle); + } + + public PATHCONF3Request(FileHandle handle) { + super(handle); + } + + @Override + public void serialize(XDR xdr) { + handle.serialize(xdr); } } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READ3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READ3Request.java index 6d95f5e9f84..5898ec588ff 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READ3Request.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READ3Request.java @@ -31,10 +31,11 @@ public class READ3Request extends RequestWithHandle { private final long offset; private final int count; - public READ3Request(XDR xdr) throws IOException { - super(xdr); - offset = xdr.readHyper(); - count = xdr.readInt(); + public static READ3Request deserialize(XDR xdr) throws IOException { + FileHandle handle = readHandle(xdr); + long offset = xdr.readHyper(); + int count = xdr.readInt(); + return new READ3Request(handle, offset, count); } @VisibleForTesting diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READDIR3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READDIR3Request.java index c9835b9a321..79245c1ced6 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READDIR3Request.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READDIR3Request.java @@ -19,6 +19,7 @@ package org.apache.hadoop.nfs.nfs3.request; import java.io.IOException; +import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.oncrpc.XDR; /** @@ -29,13 +30,22 @@ public class READDIR3Request extends RequestWithHandle { private final long cookieVerf; private final int count; - public READDIR3Request(XDR xdr) throws IOException { - super(xdr); - cookie = xdr.readHyper(); - cookieVerf = xdr.readHyper(); - count = xdr.readInt(); + public static READDIR3Request deserialize(XDR xdr) throws IOException { + FileHandle handle = readHandle(xdr); + long cookie = xdr.readHyper(); + long cookieVerf = xdr.readHyper(); + int count = xdr.readInt(); + return new READDIR3Request(handle, cookie, cookieVerf, count); } - + + public READDIR3Request(FileHandle handle, long cookie, long cookieVerf, + int count) { + super(handle); + this.cookie = cookie; + this.cookieVerf = cookieVerf; + this.count = count; + } + public long getCookie() { return this.cookie; } @@ -47,4 +57,12 @@ public class READDIR3Request extends RequestWithHandle { public long getCount() { return this.count; } + + @Override + public void serialize(XDR xdr) { + handle.serialize(xdr); + xdr.writeLongAsHyper(cookie); + xdr.writeLongAsHyper(cookieVerf); + xdr.writeInt(count); + } } \ No newline at end of file diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READDIRPLUS3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READDIRPLUS3Request.java index 2994fe996ba..c1e43652e85 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READDIRPLUS3Request.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READDIRPLUS3Request.java @@ -19,6 +19,7 @@ package org.apache.hadoop.nfs.nfs3.request; import java.io.IOException; +import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.oncrpc.XDR; /** @@ -30,14 +31,25 @@ public class READDIRPLUS3Request extends RequestWithHandle { private final int dirCount; private final int maxCount; - public READDIRPLUS3Request(XDR xdr) throws IOException { - super(xdr); - cookie = xdr.readHyper(); - cookieVerf = xdr.readHyper(); - dirCount = xdr.readInt(); - maxCount = xdr.readInt(); + public static READDIRPLUS3Request deserialize(XDR xdr) throws IOException { + FileHandle handle = readHandle(xdr); + long cookie = xdr.readHyper(); + long cookieVerf = xdr.readHyper(); + int dirCount = xdr.readInt(); + int maxCount = xdr.readInt(); + return new READDIRPLUS3Request(handle, cookie, cookieVerf, dirCount, + maxCount); } + public READDIRPLUS3Request(FileHandle handle, long cookie, long cookieVerf, + int dirCount, int maxCount) { + super(handle); + this.cookie = cookie; + this.cookieVerf = cookieVerf; + this.dirCount = dirCount; + this.maxCount = maxCount; + } + public long getCookie() { return this.cookie; } @@ -53,4 +65,13 @@ public class READDIRPLUS3Request extends RequestWithHandle { public int getMaxCount() { return maxCount; } + + @Override + public void serialize(XDR xdr) { + handle.serialize(xdr); + xdr.writeLongAsHyper(cookie); + xdr.writeLongAsHyper(cookieVerf); + xdr.writeInt(dirCount); + xdr.writeInt(maxCount); + } } \ No newline at end of file diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READLINK3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READLINK3Request.java index 3b0e8a4dbe2..15fe8f0feed 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READLINK3Request.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READLINK3Request.java @@ -19,6 +19,7 @@ package org.apache.hadoop.nfs.nfs3.request; import java.io.IOException; +import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.oncrpc.XDR; /** @@ -26,7 +27,17 @@ import org.apache.hadoop.oncrpc.XDR; */ public class READLINK3Request extends RequestWithHandle { - public READLINK3Request(XDR xdr) throws IOException { - super(xdr); + public static READLINK3Request deserialize(XDR xdr) throws IOException { + FileHandle handle = readHandle(xdr); + return new READLINK3Request(handle); + } + + public READLINK3Request(FileHandle handle) { + super(handle); + } + + @Override + public void serialize(XDR xdr) { + handle.serialize(xdr); } } \ No newline at end of file diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/REMOVE3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/REMOVE3Request.java index 901d80332e0..ffd47b0e5dc 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/REMOVE3Request.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/REMOVE3Request.java @@ -19,6 +19,7 @@ package org.apache.hadoop.nfs.nfs3.request; import java.io.IOException; +import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.oncrpc.XDR; /** @@ -27,12 +28,25 @@ import org.apache.hadoop.oncrpc.XDR; public class REMOVE3Request extends RequestWithHandle { private final String name; - public REMOVE3Request(XDR xdr) throws IOException { - super(xdr); - name = xdr.readString(); + public static REMOVE3Request deserialize(XDR xdr) throws IOException { + FileHandle handle = readHandle(xdr); + String name = xdr.readString(); + return new REMOVE3Request(handle, name); } + public REMOVE3Request(FileHandle handle, String name) { + super(handle); + this.name = name; + } + public String getName() { return this.name; } + + @Override + public void serialize(XDR xdr) { + handle.serialize(xdr); + xdr.writeInt(name.getBytes().length); + xdr.writeFixedOpaque(name.getBytes()); + } } \ No newline at end of file diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RENAME3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RENAME3Request.java index 6fdccffdea3..5144e8a4910 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RENAME3Request.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RENAME3Request.java @@ -25,23 +25,26 @@ import org.apache.hadoop.oncrpc.XDR; /** * RENAME3 Request */ -public class RENAME3Request { +public class RENAME3Request extends NFS3Request { private final FileHandle fromDirHandle; private final String fromName; private final FileHandle toDirHandle; private final String toName; - public RENAME3Request(XDR xdr) throws IOException { - fromDirHandle = new FileHandle(); - if (!fromDirHandle.deserialize(xdr)) { - throw new IOException("can't deserialize file handle"); - } - fromName = xdr.readString(); - toDirHandle = new FileHandle(); - if (!toDirHandle.deserialize(xdr)) { - throw new IOException("can't deserialize file handle"); - } - toName = xdr.readString(); + public static RENAME3Request deserialize(XDR xdr) throws IOException { + FileHandle fromDirHandle = readHandle(xdr); + String fromName = xdr.readString(); + FileHandle toDirHandle = readHandle(xdr); + String toName = xdr.readString(); + return new RENAME3Request(fromDirHandle, fromName, toDirHandle, toName); + } + + public RENAME3Request(FileHandle fromDirHandle, String fromName, + FileHandle toDirHandle, String toName) { + this.fromDirHandle = fromDirHandle; + this.fromName = fromName; + this.toDirHandle = toDirHandle; + this.toName = toName; } public FileHandle getFromDirHandle() { @@ -59,4 +62,14 @@ public class RENAME3Request { public String getToName() { return toName; } + + @Override + public void serialize(XDR xdr) { + fromDirHandle.serialize(xdr); + xdr.writeInt(fromName.getBytes().length); + xdr.writeFixedOpaque(fromName.getBytes()); + toDirHandle.serialize(xdr); + xdr.writeInt(toName.getBytes().length); + xdr.writeFixedOpaque(toName.getBytes()); + } } \ No newline at end of file diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RMDIR3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RMDIR3Request.java index 8fd5b7026b3..e9977fa5488 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RMDIR3Request.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RMDIR3Request.java @@ -19,6 +19,7 @@ package org.apache.hadoop.nfs.nfs3.request; import java.io.IOException; +import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.oncrpc.XDR; /** @@ -27,12 +28,25 @@ import org.apache.hadoop.oncrpc.XDR; public class RMDIR3Request extends RequestWithHandle { private final String name; - public RMDIR3Request(XDR xdr) throws IOException { - super(xdr); - name = xdr.readString(); + public static RMDIR3Request deserialize(XDR xdr) throws IOException { + FileHandle handle = readHandle(xdr); + String name = xdr.readString(); + return new RMDIR3Request(handle, name); } + public RMDIR3Request(FileHandle handle, String name) { + super(handle); + this.name = name; + } + public String getName() { return this.name; } + + @Override + public void serialize(XDR xdr) { + handle.serialize(xdr); + xdr.writeInt(name.getBytes().length); + xdr.writeFixedOpaque(name.getBytes()); + } } \ No newline at end of file diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RequestWithHandle.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RequestWithHandle.java index a3b19a12be3..9f9539c1d4e 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RequestWithHandle.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RequestWithHandle.java @@ -17,33 +17,19 @@ */ package org.apache.hadoop.nfs.nfs3.request; -import java.io.IOException; - import org.apache.hadoop.nfs.nfs3.FileHandle; -import org.apache.hadoop.oncrpc.XDR; /** * An NFS request that uses {@link FileHandle} to identify a file. */ -public class RequestWithHandle { +public abstract class RequestWithHandle extends NFS3Request { protected final FileHandle handle; RequestWithHandle(FileHandle handle) { this.handle = handle; } - - RequestWithHandle(XDR xdr) throws IOException { - handle = new FileHandle(); - if (!handle.deserialize(xdr)) { - throw new IOException("can't deserialize file handle"); - } - } public FileHandle getHandle() { return this.handle; } - - public void serialize(XDR xdr) { - handle.serialize(xdr); - } } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SETATTR3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SETATTR3Request.java index 05e8c0380b2..c5f668cf9a0 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SETATTR3Request.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SETATTR3Request.java @@ -20,6 +20,7 @@ package org.apache.hadoop.nfs.nfs3.request; import java.io.IOException; import org.apache.hadoop.nfs.NfsTime; +import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.oncrpc.XDR; /** @@ -38,16 +39,26 @@ public class SETATTR3Request extends RequestWithHandle { private final boolean check; private final NfsTime ctime; - public SETATTR3Request(XDR xdr) throws IOException { - super(xdr); - attr = new SetAttr3(); + public static SETATTR3Request deserialize(XDR xdr) throws IOException { + FileHandle handle = readHandle(xdr); + SetAttr3 attr = new SetAttr3(); attr.deserialize(xdr); - check = xdr.readBoolean(); + boolean check = xdr.readBoolean(); + NfsTime ctime; if (check) { ctime = NfsTime.deserialize(xdr); } else { ctime = null; } + return new SETATTR3Request(handle, attr, check, ctime); + } + + public SETATTR3Request(FileHandle handle, SetAttr3 attr, boolean check, + NfsTime ctime) { + super(handle); + this.attr = attr; + this.check = check; + this.ctime = ctime; } public SetAttr3 getAttr() { @@ -61,4 +72,14 @@ public class SETATTR3Request extends RequestWithHandle { public NfsTime getCtime() { return ctime; } + + @Override + public void serialize(XDR xdr) { + handle.serialize(xdr); + attr.serialize(xdr); + xdr.writeBoolean(check); + if (check) { + ctime.serialize(xdr); + } + } } \ No newline at end of file diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SYMLINK3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SYMLINK3Request.java index 6e74d1aa61b..288079449dc 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SYMLINK3Request.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SYMLINK3Request.java @@ -19,6 +19,7 @@ package org.apache.hadoop.nfs.nfs3.request; import java.io.IOException; +import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.oncrpc.XDR; /** @@ -29,14 +30,23 @@ public class SYMLINK3Request extends RequestWithHandle { private final SetAttr3 symAttr; private final String symData; // It contains the target - public SYMLINK3Request(XDR xdr) throws IOException { - super(xdr); - name = xdr.readString(); - symAttr = new SetAttr3(); + public static SYMLINK3Request deserialize(XDR xdr) throws IOException { + FileHandle handle = readHandle(xdr); + String name = xdr.readString(); + SetAttr3 symAttr = new SetAttr3(); symAttr.deserialize(xdr); - symData = xdr.readString(); + String symData = xdr.readString(); + return new SYMLINK3Request(handle, name, symAttr, symData); } + public SYMLINK3Request(FileHandle handle, String name, SetAttr3 symAttr, + String symData) { + super(handle); + this.name = name; + this.symAttr = symAttr; + this.symData = symData; + } + public String getName() { return name; } @@ -48,4 +58,14 @@ public class SYMLINK3Request extends RequestWithHandle { public String getSymData() { return symData; } + + @Override + public void serialize(XDR xdr) { + handle.serialize(xdr); + xdr.writeInt(name.getBytes().length); + xdr.writeFixedOpaque(name.getBytes()); + symAttr.serialize(xdr); + xdr.writeInt(symData.getBytes().length); + xdr.writeFixedOpaque(symData.getBytes()); + } } \ No newline at end of file diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SetAttr3.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SetAttr3.java index 373425f5aa9..e8e637c44cd 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SetAttr3.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SetAttr3.java @@ -52,6 +52,15 @@ public class SetAttr3 { size = 0; updateFields = EnumSet.noneOf(SetAttrField.class); } + + public SetAttr3(int mode, int uid, int gid, long size, NfsTime atime, + NfsTime mtime, EnumSet updateFields) { + this.mode = mode; + this.uid = uid; + this.gid = gid; + this.size = size; + this.updateFields = updateFields; + } public int getMode() { return mode; diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/WRITE3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/WRITE3Request.java index 8a1ff8a1d5a..d85dcbbd78c 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/WRITE3Request.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/WRITE3Request.java @@ -33,12 +33,13 @@ public class WRITE3Request extends RequestWithHandle { private final WriteStableHow stableHow; private final ByteBuffer data; - public WRITE3Request(XDR xdr) throws IOException { - super(xdr); - offset = xdr.readHyper(); - count = xdr.readInt(); - stableHow = WriteStableHow.fromValue(xdr.readInt()); - data = ByteBuffer.wrap(xdr.readFixedOpaque(xdr.readInt())); + public static WRITE3Request deserialize(XDR xdr) throws IOException { + FileHandle handle = readHandle(xdr); + long offset = xdr.readHyper(); + int count = xdr.readInt(); + WriteStableHow stableHow = WriteStableHow.fromValue(xdr.readInt()); + ByteBuffer data = ByteBuffer.wrap(xdr.readFixedOpaque(xdr.readInt())); + return new WRITE3Request(handle, offset, count, stableHow, data); } public WRITE3Request(FileHandle handle, final long offset, final int count, diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/LINK3Response.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/LINK3Response.java new file mode 100644 index 00000000000..834ee3ccd3d --- /dev/null +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/LINK3Response.java @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.nfs.nfs3.response; + +import org.apache.hadoop.oncrpc.XDR; +import org.apache.hadoop.oncrpc.security.Verifier; + +public class LINK3Response extends NFS3Response { + private final WccData fromDirWcc; + private final WccData linkDirWcc; + + public LINK3Response(int status) { + this(status, new WccData(null, null), new WccData(null, null)); + } + + public LINK3Response(int status, WccData fromDirWcc, + WccData linkDirWcc) { + super(status); + this.fromDirWcc = fromDirWcc; + this.linkDirWcc = linkDirWcc; + } + + public WccData getFromDirWcc() { + return fromDirWcc; + } + + public WccData getLinkDirWcc() { + return linkDirWcc; + } + + @Override + public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) { + super.writeHeaderAndResponse(out, xid, verifier); + fromDirWcc.serialize(out); + linkDirWcc.serialize(out); + + return out; + } +} diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/MKNOD3Response.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/MKNOD3Response.java new file mode 100644 index 00000000000..292094ebb3e --- /dev/null +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/MKNOD3Response.java @@ -0,0 +1,68 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.nfs.nfs3.response; + +import org.apache.hadoop.nfs.nfs3.FileHandle; +import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; +import org.apache.hadoop.nfs.nfs3.Nfs3Status; +import org.apache.hadoop.oncrpc.XDR; +import org.apache.hadoop.oncrpc.security.Verifier; + +public class MKNOD3Response extends NFS3Response { + private final FileHandle objFileHandle; + private final Nfs3FileAttributes objPostOpAttr; + private final WccData dirWcc; + + public MKNOD3Response(int status) { + this(status, null, null, new WccData(null, null)); + } + + public MKNOD3Response(int status, FileHandle handle, + Nfs3FileAttributes attrs, WccData dirWcc) { + super(status); + this.objFileHandle = handle; + this.objPostOpAttr = attrs; + this.dirWcc = dirWcc; + } + + public FileHandle getObjFileHandle() { + return objFileHandle; + } + + public Nfs3FileAttributes getObjPostOpAttr() { + return objPostOpAttr; + } + + public WccData getDirWcc() { + return dirWcc; + } + + @Override + public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) { + super.writeHeaderAndResponse(out, xid, verifier); + if (this.getStatus() == Nfs3Status.NFS3_OK) { + out.writeBoolean(true); + objFileHandle.serialize(out); + out.writeBoolean(true); + objPostOpAttr.serialize(out); + } + dirWcc.serialize(out); + + return out; + } +} diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml index 149f4048b00..679ae1dcd29 100644 --- a/hadoop-dist/pom.xml +++ b/hadoop-dist/pom.xml @@ -114,6 +114,9 @@ run rm -rf hadoop-${project.version} run mkdir hadoop-${project.version} run cd hadoop-${project.version} + run cp $ROOT/LICENSE.txt . + run cp $ROOT/NOTICE.txt . + run cp $ROOT/README.txt . run cp -r $ROOT/hadoop-common-project/hadoop-common/target/hadoop-common-${project.version}/* . run cp -r $ROOT/hadoop-common-project/hadoop-nfs/target/hadoop-nfs-${project.version}/* . run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-hdfs-${project.version}/* . diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml index 8701bb0ad6f..24fa87b8b57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml @@ -34,7 +34,6 @@ Apache Hadoop HttpFS - 6.0.36 REPO NOT AVAIL REPO NOT AVAIL REVISION NOT AVAIL diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java index cf44af56758..ede65c62837 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java @@ -421,7 +421,7 @@ class OpenFileCtx { if (existantWriteCtx != null) { if (!existantWriteCtx.getReplied()) { if (LOG.isDebugEnabled()) { - LOG.debug("Repeated write request which hasn't be served: xid=" + LOG.debug("Repeated write request which hasn't been served: xid=" + xid + ", drop it."); } } else { @@ -579,7 +579,7 @@ class OpenFileCtx { * writing, and there is no other threads writing (i.e., asyncStatus is * false), start the writing and set asyncStatus to true. * - * @return True if the new write is sequencial and we can start writing + * @return True if the new write is sequential and we can start writing * (including the case that there is already a thread writing). */ private synchronized boolean checkAndStartWrite( @@ -898,7 +898,7 @@ class OpenFileCtx { long offset = nextOffset.get(); if (range.getMin() > offset) { if (LOG.isDebugEnabled()) { - LOG.debug("The next sequencial write has not arrived yet"); + LOG.debug("The next sequential write has not arrived yet"); } processCommits(nextOffset.get()); // handle race this.asyncStatus = false; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java index 0c7aebeebf9..0d591d63963 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java @@ -268,7 +268,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { GETATTR3Request request = null; try { - request = new GETATTR3Request(xdr); + request = GETATTR3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid GETATTR request"); response.setStatus(Nfs3Status.NFS3ERR_INVAL); @@ -360,7 +360,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { SETATTR3Request request = null; try { - request = new SETATTR3Request(xdr); + request = SETATTR3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid SETATTR request"); response.setStatus(Nfs3Status.NFS3ERR_INVAL); @@ -445,7 +445,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { LOOKUP3Request request = null; try { - request = new LOOKUP3Request(xdr); + request = LOOKUP3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid LOOKUP request"); return new LOOKUP3Response(Nfs3Status.NFS3ERR_INVAL); @@ -513,7 +513,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { ACCESS3Request request = null; try { - request = new ACCESS3Request(xdr); + request = ACCESS3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid ACCESS request"); return new ACCESS3Response(Nfs3Status.NFS3ERR_INVAL); @@ -581,7 +581,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { READLINK3Request request = null; try { - request = new READLINK3Request(xdr); + request = READLINK3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid READLINK request"); return new READLINK3Response(Nfs3Status.NFS3ERR_INVAL); @@ -655,7 +655,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { READ3Request request = null; try { - request = new READ3Request(xdr); + request = READ3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid READ request"); return new READ3Response(Nfs3Status.NFS3ERR_INVAL); @@ -788,7 +788,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { WRITE3Request request = null; try { - request = new WRITE3Request(xdr); + request = WRITE3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid WRITE request"); return new WRITE3Response(Nfs3Status.NFS3ERR_INVAL); @@ -870,7 +870,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { CREATE3Request request = null; try { - request = new CREATE3Request(xdr); + request = CREATE3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid CREATE request"); return new CREATE3Response(Nfs3Status.NFS3ERR_INVAL); @@ -1003,7 +1003,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { MKDIR3Request request = null; try { - request = new MKDIR3Request(xdr); + request = MKDIR3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid MKDIR request"); return new MKDIR3Response(Nfs3Status.NFS3ERR_INVAL); @@ -1099,7 +1099,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { REMOVE3Request request = null; try { - request = new REMOVE3Request(xdr); + request = REMOVE3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid REMOVE request"); return new REMOVE3Response(Nfs3Status.NFS3ERR_INVAL); @@ -1179,7 +1179,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { RMDIR3Request request = null; try { - request = new RMDIR3Request(xdr); + request = RMDIR3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid RMDIR request"); return new RMDIR3Response(Nfs3Status.NFS3ERR_INVAL); @@ -1264,7 +1264,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { RENAME3Request request = null; try { - request = new RENAME3Request(xdr); + request = RENAME3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid RENAME request"); return new RENAME3Response(Nfs3Status.NFS3ERR_INVAL); @@ -1360,7 +1360,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { SYMLINK3Request request = null; try { - request = new SYMLINK3Request(xdr); + request = SYMLINK3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid SYMLINK request"); response.setStatus(Nfs3Status.NFS3ERR_INVAL); @@ -1423,7 +1423,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { throw io; } // This happens when startAfter was just deleted - LOG.info("Cookie cound't be found: " + new String(startAfter) + LOG.info("Cookie couldn't be found: " + new String(startAfter) + ", do listing from beginning"); dlisting = dfsClient .listPaths(dirFileIdPath, HdfsFileStatus.EMPTY_NAME); @@ -1453,7 +1453,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { READDIR3Request request = null; try { - request = new READDIR3Request(xdr); + request = READDIR3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid READDIR request"); return new READDIR3Response(Nfs3Status.NFS3ERR_INVAL); @@ -1611,7 +1611,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { READDIRPLUS3Request request = null; try { - request = new READDIRPLUS3Request(xdr); + request = READDIRPLUS3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid READDIRPLUS request"); return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL); @@ -1788,7 +1788,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { FSSTAT3Request request = null; try { - request = new FSSTAT3Request(xdr); + request = FSSTAT3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid FSSTAT request"); return new FSSTAT3Response(Nfs3Status.NFS3ERR_INVAL); @@ -1862,7 +1862,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { FSINFO3Request request = null; try { - request = new FSINFO3Request(xdr); + request = FSINFO3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid FSINFO request"); return new FSINFO3Response(Nfs3Status.NFS3ERR_INVAL); @@ -1926,7 +1926,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { PATHCONF3Request request = null; try { - request = new PATHCONF3Request(xdr); + request = PATHCONF3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid PATHCONF request"); return new PATHCONF3Response(Nfs3Status.NFS3ERR_INVAL); @@ -1977,7 +1977,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { COMMIT3Request request = null; try { - request = new COMMIT3Request(xdr); + request = COMMIT3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid COMMIT request"); response.setStatus(Nfs3Status.NFS3ERR_INVAL); diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java index 3fc0d991883..05b976da8be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java @@ -17,12 +17,71 @@ */ package org.apache.hadoop.hdfs.nfs.nfs3; -import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.io.IOException; import java.net.InetSocketAddress; import java.nio.ByteBuffer; +import java.util.EnumSet; + +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; +import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.nfs.nfs3.FileHandle; +import org.apache.hadoop.nfs.nfs3.Nfs3Constant; +import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow; +import org.apache.hadoop.nfs.nfs3.Nfs3Status; +import org.apache.hadoop.nfs.nfs3.request.ACCESS3Request; +import org.apache.hadoop.nfs.nfs3.request.COMMIT3Request; +import org.apache.hadoop.nfs.nfs3.request.CREATE3Request; +import org.apache.hadoop.nfs.nfs3.request.FSINFO3Request; +import org.apache.hadoop.nfs.nfs3.request.FSSTAT3Request; +import org.apache.hadoop.nfs.nfs3.request.GETATTR3Request; +import org.apache.hadoop.nfs.nfs3.request.LOOKUP3Request; +import org.apache.hadoop.nfs.nfs3.request.MKDIR3Request; +import org.apache.hadoop.nfs.nfs3.request.PATHCONF3Request; +import org.apache.hadoop.nfs.nfs3.request.READ3Request; +import org.apache.hadoop.nfs.nfs3.request.READDIR3Request; +import org.apache.hadoop.nfs.nfs3.request.READDIRPLUS3Request; +import org.apache.hadoop.nfs.nfs3.request.READLINK3Request; +import org.apache.hadoop.nfs.nfs3.request.REMOVE3Request; +import org.apache.hadoop.nfs.nfs3.request.RENAME3Request; +import org.apache.hadoop.nfs.nfs3.request.RMDIR3Request; +import org.apache.hadoop.nfs.nfs3.request.SETATTR3Request; +import org.apache.hadoop.nfs.nfs3.request.SYMLINK3Request; +import org.apache.hadoop.nfs.nfs3.request.SetAttr3; +import org.apache.hadoop.nfs.nfs3.request.SetAttr3.SetAttrField; +import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; +import org.apache.hadoop.nfs.nfs3.response.ACCESS3Response; +import org.apache.hadoop.nfs.nfs3.response.COMMIT3Response; +import org.apache.hadoop.nfs.nfs3.response.CREATE3Response; +import org.apache.hadoop.nfs.nfs3.response.FSINFO3Response; +import org.apache.hadoop.nfs.nfs3.response.FSSTAT3Response; +import org.apache.hadoop.nfs.nfs3.response.GETATTR3Response; +import org.apache.hadoop.nfs.nfs3.response.LOOKUP3Response; +import org.apache.hadoop.nfs.nfs3.response.MKDIR3Response; +import org.apache.hadoop.nfs.nfs3.response.PATHCONF3Response; +import org.apache.hadoop.nfs.nfs3.response.READ3Response; +import org.apache.hadoop.nfs.nfs3.response.READDIR3Response; +import org.apache.hadoop.nfs.nfs3.response.READDIRPLUS3Response; +import org.apache.hadoop.nfs.nfs3.response.READLINK3Response; +import org.apache.hadoop.nfs.nfs3.response.REMOVE3Response; +import org.apache.hadoop.nfs.nfs3.response.RENAME3Response; +import org.apache.hadoop.nfs.nfs3.response.RMDIR3Response; +import org.apache.hadoop.nfs.nfs3.response.SETATTR3Response; +import org.apache.hadoop.nfs.nfs3.response.SYMLINK3Response; +import org.apache.hadoop.nfs.nfs3.response.WRITE3Response; +import org.apache.hadoop.oncrpc.XDR; +import org.apache.hadoop.oncrpc.security.SecurityHandler; +import org.apache.hadoop.security.authorize.DefaultImpersonationProvider; +import org.apache.hadoop.security.authorize.ProxyUsers; import org.jboss.netty.channel.Channel; import org.junit.AfterClass; import org.junit.Assert; @@ -31,46 +90,6 @@ import org.junit.BeforeClass; import org.junit.Test; import org.mockito.Mockito; -import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DFSTestUtil; -import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; -import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; -import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; -import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.nfs.nfs3.FileHandle; -import org.apache.hadoop.nfs.nfs3.Nfs3Constant; -import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow; -import org.apache.hadoop.nfs.nfs3.Nfs3Status; -import org.apache.hadoop.nfs.nfs3.request.LOOKUP3Request; -import org.apache.hadoop.nfs.nfs3.request.READ3Request; -import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; -import org.apache.hadoop.nfs.nfs3.response.ACCESS3Response; -import org.apache.hadoop.nfs.nfs3.response.COMMIT3Response; -import org.apache.hadoop.nfs.nfs3.response.CREATE3Response; -import org.apache.hadoop.nfs.nfs3.response.FSSTAT3Response; -import org.apache.hadoop.nfs.nfs3.response.FSINFO3Response; -import org.apache.hadoop.nfs.nfs3.response.GETATTR3Response; -import org.apache.hadoop.nfs.nfs3.response.LOOKUP3Response; -import org.apache.hadoop.nfs.nfs3.response.PATHCONF3Response; -import org.apache.hadoop.nfs.nfs3.response.READ3Response; -import org.apache.hadoop.nfs.nfs3.response.REMOVE3Response; -import org.apache.hadoop.nfs.nfs3.response.RMDIR3Response; -import org.apache.hadoop.nfs.nfs3.response.RENAME3Response; -import org.apache.hadoop.nfs.nfs3.response.READDIR3Response; -import org.apache.hadoop.nfs.nfs3.response.READDIRPLUS3Response; -import org.apache.hadoop.nfs.nfs3.response.READLINK3Response; -import org.apache.hadoop.nfs.nfs3.response.SETATTR3Response; -import org.apache.hadoop.nfs.nfs3.response.SYMLINK3Response; -import org.apache.hadoop.nfs.nfs3.response.WRITE3Response; -import org.apache.hadoop.nfs.nfs3.request.SetAttr3; -import org.apache.hadoop.oncrpc.XDR; -import org.apache.hadoop.oncrpc.security.SecurityHandler; -import org.apache.hadoop.security.authorize.DefaultImpersonationProvider; -import org.apache.hadoop.security.authorize.ProxyUsers; - /** * Tests for {@link RpcProgramNfs3} @@ -143,8 +162,9 @@ public class TestRpcProgramNfs3 { long dirId = status.getFileId(); FileHandle handle = new FileHandle(dirId); XDR xdr_req = new XDR(); - handle.serialize(xdr_req); - + GETATTR3Request req = new GETATTR3Request(handle); + req.serialize(xdr_req); + // Attempt by an unpriviledged user should fail. GETATTR3Response response1 = nfsd.getattr(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, @@ -165,13 +185,12 @@ public class TestRpcProgramNfs3 { long dirId = status.getFileId(); XDR xdr_req = new XDR(); FileHandle handle = new FileHandle(dirId); - handle.serialize(xdr_req); - xdr_req.writeString("bar"); - SetAttr3 symAttr = new SetAttr3(); - symAttr.serialize(xdr_req); - xdr_req.writeBoolean(false); + SetAttr3 symAttr = new SetAttr3(0, 1, 0, 0, null, null, + EnumSet.of(SetAttrField.UID)); + SETATTR3Request req = new SETATTR3Request(handle, symAttr, false, null); + req.serialize(xdr_req); - // Attempt by an unpriviledged user should fail. + // Attempt by an unprivileged user should fail. SETATTR3Response response1 = nfsd.setattr(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); @@ -214,7 +233,8 @@ public class TestRpcProgramNfs3 { long dirId = status.getFileId(); FileHandle handle = new FileHandle(dirId); XDR xdr_req = new XDR(); - handle.serialize(xdr_req); + ACCESS3Request req = new ACCESS3Request(handle); + req.serialize(xdr_req); // Attempt by an unpriviledged user should fail. ACCESS3Response response1 = nfsd.access(xdr_req.asReadOnlyWrap(), @@ -237,12 +257,10 @@ public class TestRpcProgramNfs3 { long dirId = status.getFileId(); XDR xdr_req = new XDR(); FileHandle handle = new FileHandle(dirId); - handle.serialize(xdr_req); - xdr_req.writeString("fubar"); - SetAttr3 symAttr = new SetAttr3(); - symAttr.serialize(xdr_req); - xdr_req.writeString("bar"); - + SYMLINK3Request req = new SYMLINK3Request(handle, "fubar", new SetAttr3(), + "bar"); + req.serialize(xdr_req); + SYMLINK3Response response = nfsd.symlink(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, @@ -251,7 +269,8 @@ public class TestRpcProgramNfs3 { // Now perform readlink operations. FileHandle handle2 = response.getObjFileHandle(); XDR xdr_req2 = new XDR(); - handle2.serialize(xdr_req2); + READLINK3Request req2 = new READLINK3Request(handle2); + req2.serialize(xdr_req2); // Attempt by an unpriviledged user should fail. READLINK3Response response1 = nfsd.readlink(xdr_req2.asReadOnlyWrap(), @@ -327,12 +346,10 @@ public class TestRpcProgramNfs3 { long dirId = status.getFileId(); XDR xdr_req = new XDR(); FileHandle handle = new FileHandle(dirId); - handle.serialize(xdr_req); - xdr_req.writeString("fubar"); - xdr_req.writeInt(Nfs3Constant.CREATE_UNCHECKED); - SetAttr3 symAttr = new SetAttr3(); - symAttr.serialize(xdr_req); - + CREATE3Request req = new CREATE3Request(handle, "fubar", + Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0); + req.serialize(xdr_req); + // Attempt by an unpriviledged user should fail. CREATE3Response response1 = nfsd.create(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, @@ -348,26 +365,27 @@ public class TestRpcProgramNfs3 { } @Test(timeout = 60000) - public void testMkdir() throws Exception { + public void testMkdir() throws Exception {//FixME HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); XDR xdr_req = new XDR(); FileHandle handle = new FileHandle(dirId); - handle.serialize(xdr_req); - xdr_req.writeString("fubar"); - SetAttr3 symAttr = new SetAttr3(); - symAttr.serialize(xdr_req); - xdr_req.writeString("bar"); - - // Attempt to remove by an unpriviledged user should fail. - SYMLINK3Response response1 = nfsd.symlink(xdr_req.asReadOnlyWrap(), + MKDIR3Request req = new MKDIR3Request(handle, "fubar1", new SetAttr3()); + req.serialize(xdr_req); + + // Attempt to mkdir by an unprivileged user should fail. + MKDIR3Response response1 = nfsd.mkdir(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus()); - // Attempt to remove by a priviledged user should pass. - SYMLINK3Response response2 = nfsd.symlink(xdr_req.asReadOnlyWrap(), + XDR xdr_req2 = new XDR(); + MKDIR3Request req2 = new MKDIR3Request(handle, "fubar2", new SetAttr3()); + req2.serialize(xdr_req2); + + // Attempt to mkdir by a privileged user should pass. + MKDIR3Response response2 = nfsd.mkdir(xdr_req2.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, response2.getStatus()); @@ -379,20 +397,18 @@ public class TestRpcProgramNfs3 { long dirId = status.getFileId(); XDR xdr_req = new XDR(); FileHandle handle = new FileHandle(dirId); - handle.serialize(xdr_req); - xdr_req.writeString("fubar"); - SetAttr3 symAttr = new SetAttr3(); - symAttr.serialize(xdr_req); - xdr_req.writeString("bar"); + SYMLINK3Request req = new SYMLINK3Request(handle, "fubar", new SetAttr3(), + "bar"); + req.serialize(xdr_req); - // Attempt by an unpriviledged user should fail. + // Attempt by an unprivileged user should fail. SYMLINK3Response response1 = nfsd.symlink(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus()); - // Attempt by a priviledged user should pass. + // Attempt by a privileged user should pass. SYMLINK3Response response2 = nfsd.symlink(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, @@ -405,8 +421,8 @@ public class TestRpcProgramNfs3 { long dirId = status.getFileId(); XDR xdr_req = new XDR(); FileHandle handle = new FileHandle(dirId); - handle.serialize(xdr_req); - xdr_req.writeString("bar"); + REMOVE3Request req = new REMOVE3Request(handle, "bar"); + req.serialize(xdr_req); // Attempt by an unpriviledged user should fail. REMOVE3Response response1 = nfsd.remove(xdr_req.asReadOnlyWrap(), @@ -428,17 +444,17 @@ public class TestRpcProgramNfs3 { long dirId = status.getFileId(); XDR xdr_req = new XDR(); FileHandle handle = new FileHandle(dirId); - handle.serialize(xdr_req); - xdr_req.writeString("foo"); + RMDIR3Request req = new RMDIR3Request(handle, "foo"); + req.serialize(xdr_req); - // Attempt by an unpriviledged user should fail. + // Attempt by an unprivileged user should fail. RMDIR3Response response1 = nfsd.rmdir(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus()); - // Attempt by a priviledged user should pass. + // Attempt by a privileged user should pass. RMDIR3Response response2 = nfsd.rmdir(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, @@ -451,19 +467,17 @@ public class TestRpcProgramNfs3 { long dirId = status.getFileId(); XDR xdr_req = new XDR(); FileHandle handle = new FileHandle(dirId); - handle.serialize(xdr_req); - xdr_req.writeString("bar"); - handle.serialize(xdr_req); - xdr_req.writeString("fubar"); - - // Attempt by an unpriviledged user should fail. + RENAME3Request req = new RENAME3Request(handle, "bar", handle, "fubar"); + req.serialize(xdr_req); + + // Attempt by an unprivileged user should fail. RENAME3Response response1 = nfsd.rename(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus()); - // Attempt by a priviledged user should pass. + // Attempt by a privileged user should pass. RENAME3Response response2 = nfsd.rename(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, @@ -476,10 +490,8 @@ public class TestRpcProgramNfs3 { long dirId = status.getFileId(); FileHandle handle = new FileHandle(dirId); XDR xdr_req = new XDR(); - handle.serialize(xdr_req); - xdr_req.writeLongAsHyper(0); - xdr_req.writeLongAsHyper(0); - xdr_req.writeInt(100); + READDIR3Request req = new READDIR3Request(handle, 0, 0, 100); + req.serialize(xdr_req); // Attempt by an unpriviledged user should fail. READDIR3Response response1 = nfsd.readdir(xdr_req.asReadOnlyWrap(), @@ -501,20 +513,17 @@ public class TestRpcProgramNfs3 { long dirId = status.getFileId(); FileHandle handle = new FileHandle(dirId); XDR xdr_req = new XDR(); - handle.serialize(xdr_req); - xdr_req.writeLongAsHyper(0); - xdr_req.writeLongAsHyper(0); - xdr_req.writeInt(3); - xdr_req.writeInt(2); - - // Attempt by an unpriviledged user should fail. + READDIRPLUS3Request req = new READDIRPLUS3Request(handle, 0, 0, 3, 2); + req.serialize(xdr_req); + + // Attempt by an unprivileged user should fail. READDIRPLUS3Response response1 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus()); - // Attempt by a priviledged user should pass. + // Attempt by a privileged user should pass. READDIRPLUS3Response response2 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, @@ -527,8 +536,9 @@ public class TestRpcProgramNfs3 { long dirId = status.getFileId(); FileHandle handle = new FileHandle(dirId); XDR xdr_req = new XDR(); - handle.serialize(xdr_req); - + FSSTAT3Request req = new FSSTAT3Request(handle); + req.serialize(xdr_req); + // Attempt by an unpriviledged user should fail. FSSTAT3Response response1 = nfsd.fsstat(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, @@ -549,8 +559,9 @@ public class TestRpcProgramNfs3 { long dirId = status.getFileId(); FileHandle handle = new FileHandle(dirId); XDR xdr_req = new XDR(); - handle.serialize(xdr_req); - + FSINFO3Request req = new FSINFO3Request(handle); + req.serialize(xdr_req); + // Attempt by an unpriviledged user should fail. FSINFO3Response response1 = nfsd.fsinfo(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, @@ -571,8 +582,9 @@ public class TestRpcProgramNfs3 { long dirId = status.getFileId(); FileHandle handle = new FileHandle(dirId); XDR xdr_req = new XDR(); - handle.serialize(xdr_req); - + PATHCONF3Request req = new PATHCONF3Request(handle); + req.serialize(xdr_req); + // Attempt by an unpriviledged user should fail. PATHCONF3Response response1 = nfsd.pathconf(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, @@ -593,9 +605,8 @@ public class TestRpcProgramNfs3 { long dirId = status.getFileId(); FileHandle handle = new FileHandle(dirId); XDR xdr_req = new XDR(); - handle.serialize(xdr_req); - xdr_req.writeLongAsHyper(0); - xdr_req.writeInt(5); + COMMIT3Request req = new COMMIT3Request(handle, 0, 5); + req.serialize(xdr_req); Channel ch = Mockito.mock(Channel.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 8444eb71c97..70c8346941e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -278,99 +278,6 @@ Trunk (Unreleased) HDFS-6657. Remove link to 'Legacy UI' in trunk's Namenode UI. (Vinayakumar B via wheat 9) - BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS - - HDFS-6387. HDFS CLI admin tool for creating & deleting an - encryption zone. (clamb) - - HDFS-6386. HDFS Encryption Zones (clamb) - - HDFS-6388. HDFS integration with KeyProvider. (clamb) - - HDFS-6473. Protocol and API for Encryption Zones (clamb) - - HDFS-6392. Wire crypto streams for encrypted files in - DFSClient. (clamb and yliu) - - HDFS-6476. Print out the KeyProvider after finding KP successfully on - startup. (Juan Yu via wang) - - HDFS-6391. Get the Key/IV from the NameNode for encrypted files in - DFSClient. (Charles Lamb and wang) - - HDFS-6389. Rename restrictions for encryption zones. (clamb) - - HDFS-6605. Client server negotiation of cipher suite. (wang) - - HDFS-6625. Remove the Delete Encryption Zone function (clamb) - - HDFS-6516. List of Encryption Zones should be based on inodes (clamb) - - HDFS-6629. Not able to create symlinks after HDFS-6516 (umamaheswararao) - - HDFS-6635. Refactor encryption zone functionality into new - EncryptionZoneManager class. (wang) - - HDFS-6474. Namenode needs to get the actual keys and iv from the - KeyProvider. (wang) - - HDFS-6619. Clean up encryption-related tests. (wang) - - HDFS-6405. Test Crypto streams in HDFS. (yliu via wang) - - HDFS-6490. Fix the keyid format for generated keys in - FSNamesystem.createEncryptionZone (clamb) - - HDFS-6716. Update usage of KeyProviderCryptoExtension APIs on NameNode. - (wang) - - HDFS-6718. Remove EncryptionZoneManager lock. (wang) - - HDFS-6720. Remove KeyProvider in EncryptionZoneManager. (wang) - - HDFS-6738. Remove unnecessary getEncryptionZoneForPath call in - EZManager#createEncryptionZone. (clamb) - - HDFS-6724. Decrypt EDEK before creating - CryptoInputStream/CryptoOutputStream. (wang) - - HDFS-6509. Create a special /.reserved/raw directory for raw access to - encrypted data. (clamb via wang) - - HDFS-6771. Require specification of an encryption key when creating - an encryption zone. (wang) - - HDFS-6730. Create a .RAW extended attribute namespace. (clamb) - - HDFS-6692. Add more HDFS encryption tests. (wang) - - HDFS-6780. Batch the encryption zones listing API. (wang) - - HDFS-6394. HDFS encryption documentation. (wang) - - HDFS-6834. Improve the configuration guidance in DFSClient when there - are no Codec classes found in configs. (umamahesh) - - HDFS-6546. Add non-superuser capability to get the encryption zone - for a specific path. (clamb) - - HDFS-6733. Creating encryption zone results in NPE when - KeyProvider is null. (clamb) - - HDFS-6785. Should not be able to create encryption zone using path - to a non-directory file. (clamb) - - HDFS-6807. Fix TestReservedRawPaths. (clamb) - - HDFS-6814. Mistakenly dfs.namenode.list.encryption.zones.num.responses configured - as boolean. (umamahesh) - - HDFS-6817. Fix findbugs and other warnings. (yliu) - - HDFS-6839. Fix TestCLI to expect new output. (clamb) - - HDFS-6905. fs-encryption merge triggered release audit failures. (clamb via tucu) - HDFS-6694. TestPipelinesFailover.testPipelineRecoveryStress tests fail intermittently with various symptoms - debugging patch. (Yongjun Zhang via Arpit Agarwal) @@ -537,10 +444,30 @@ Release 2.6.0 - UNRELEASED HDFS-6899. Allow changing MiniDFSCluster volumes per DN and capacity per volume. (Arpit Agarwal) + HDFS-4486. Add log category for long-running DFSClient notices (Zhe Zhang + via Colin Patrick McCabe) + + HDFS-6879. Adding tracing to Hadoop RPC (Masatake Iwasaki via Colin Patrick + McCabe) + + HDFS-6774. Make FsDataset and DataStore support removing volumes. (Lei Xu + via atm) + + HDFS-6634. inotify in HDFS. (James Thomas via wang) + + HDFS-4257. The ReplaceDatanodeOnFailure policies could have a forgiving + option (szetszwo via cmccabe) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) + HDFS-6773. MiniDFSCluster should skip edit log fsync by default (Stephen + Chu via Colin Patrick McCabe) + + HDFS-6865. Byte array native checksumming on client side + (James Thomas via todd) + BUG FIXES HDFS-6823. dfs.web.authentication.kerberos.principal shows up in logs for @@ -668,6 +595,117 @@ Release 2.6.0 - UNRELEASED HDFS-6908. Incorrect snapshot directory diff generated by snapshot deletion. (Juan Yu and jing9 via jing9) + HDFS-6892. Add XDR packaging method for each NFS request (brandonli) + + HDFS-6938. Cleanup javac warnings in FSNamesystem (Charles Lamb via wheat9) + + HDFS-6902. FileWriter should be closed in finally block in + BlockReceiver#receiveBlock() (Tsuyoshi OZAWA via Colin Patrick McCabe) + + HDFS-6800. Support Datanode layout changes with rolling upgrade. + (James Thomas via Arpit Agarwal) + + HDFS-6972. TestRefreshUserMappings.testRefreshSuperUserGroupsConfiguration + doesn't decode url correctly. (Yongjun Zhang via wang) + + HDFS-6942. Fix typos in log messages. (Ray Chiang via wheat9) + + BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS + + HDFS-6387. HDFS CLI admin tool for creating & deleting an + encryption zone. (clamb) + + HDFS-6386. HDFS Encryption Zones (clamb) + + HDFS-6388. HDFS integration with KeyProvider. (clamb) + + HDFS-6473. Protocol and API for Encryption Zones (clamb) + + HDFS-6392. Wire crypto streams for encrypted files in + DFSClient. (clamb and yliu) + + HDFS-6476. Print out the KeyProvider after finding KP successfully on + startup. (Juan Yu via wang) + + HDFS-6391. Get the Key/IV from the NameNode for encrypted files in + DFSClient. (Charles Lamb and wang) + + HDFS-6389. Rename restrictions for encryption zones. (clamb) + + HDFS-6605. Client server negotiation of cipher suite. (wang) + + HDFS-6625. Remove the Delete Encryption Zone function (clamb) + + HDFS-6516. List of Encryption Zones should be based on inodes (clamb) + + HDFS-6629. Not able to create symlinks after HDFS-6516 (umamaheswararao) + + HDFS-6635. Refactor encryption zone functionality into new + EncryptionZoneManager class. (wang) + + HDFS-6474. Namenode needs to get the actual keys and iv from the + KeyProvider. (wang) + + HDFS-6619. Clean up encryption-related tests. (wang) + + HDFS-6405. Test Crypto streams in HDFS. (yliu via wang) + + HDFS-6490. Fix the keyid format for generated keys in + FSNamesystem.createEncryptionZone (clamb) + + HDFS-6716. Update usage of KeyProviderCryptoExtension APIs on NameNode. + (wang) + + HDFS-6718. Remove EncryptionZoneManager lock. (wang) + + HDFS-6720. Remove KeyProvider in EncryptionZoneManager. (wang) + + HDFS-6738. Remove unnecessary getEncryptionZoneForPath call in + EZManager#createEncryptionZone. (clamb) + + HDFS-6724. Decrypt EDEK before creating + CryptoInputStream/CryptoOutputStream. (wang) + + HDFS-6509. Create a special /.reserved/raw directory for raw access to + encrypted data. (clamb via wang) + + HDFS-6771. Require specification of an encryption key when creating + an encryption zone. (wang) + + HDFS-6730. Create a .RAW extended attribute namespace. (clamb) + + HDFS-6692. Add more HDFS encryption tests. (wang) + + HDFS-6780. Batch the encryption zones listing API. (wang) + + HDFS-6394. HDFS encryption documentation. (wang) + + HDFS-6834. Improve the configuration guidance in DFSClient when there + are no Codec classes found in configs. (umamahesh) + + HDFS-6546. Add non-superuser capability to get the encryption zone + for a specific path. (clamb) + + HDFS-6733. Creating encryption zone results in NPE when + KeyProvider is null. (clamb) + + HDFS-6785. Should not be able to create encryption zone using path + to a non-directory file. (clamb) + + HDFS-6807. Fix TestReservedRawPaths. (clamb) + + HDFS-6814. Mistakenly dfs.namenode.list.encryption.zones.num.responses configured + as boolean. (umamahesh) + + HDFS-6817. Fix findbugs and other warnings. (yliu) + + HDFS-6839. Fix TestCLI to expect new output. (clamb) + + HDFS-6954. With crypto, no native lib systems are too verbose. (clamb via wang) + + HDFS-2975. Rename with overwrite flag true can make NameNode to stuck in safemode + on NN (crash + restart). (Yi Liu via umamahesh) + Release 2.5.1 - UNRELEASED INCOMPATIBLE CHANGES @@ -680,6 +718,9 @@ Release 2.5.1 - UNRELEASED BUG FIXES + HADOOP-10957. The globber will sometimes erroneously return a permission + denied exception when there is a non-terminal wildcard (cmccabe) + Release 2.5.0 - 2014-08-11 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/LICENSE.txt b/hadoop-hdfs-project/hadoop-hdfs/LICENSE.txt deleted file mode 100644 index 966012349ba..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/LICENSE.txt +++ /dev/null @@ -1,271 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - -APACHE HADOOP SUBCOMPONENTS: - -The Apache Hadoop project contains subcomponents with separate copyright -notices and license terms. Your use of the source code for the these -subcomponents is subject to the terms and conditions of the following -licenses. - -For the org.apache.hadoop.util.bloom.* classes: - -/** - * - * Copyright (c) 2005, European Commission project OneLab under contract - * 034819 (http://www.one-lab.org) - * All rights reserved. - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the distribution. - * - Neither the name of the University Catholique de Louvain - UCL - * nor the names of its contributors may be used to endorse or - * promote products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -For src/main/native/util/tree.h: - -/*- - * Copyright 2002 Niels Provos - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/NOTICE.txt b/hadoop-hdfs-project/hadoop-hdfs/NOTICE.txt deleted file mode 100644 index 62fc5816c99..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/NOTICE.txt +++ /dev/null @@ -1,2 +0,0 @@ -This product includes software developed by The Apache Software -Foundation (http://www.apache.org/). diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml index 29702d4bace..bbfb9e9599d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml @@ -106,6 +106,15 @@ + + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 9b026f2bdb5..2c4ddf64379 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -181,6 +181,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> xercesImpl compile + + org.htrace + htrace-core + @@ -305,6 +309,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> fsimage.proto hdfs.proto encryption.proto + inotify.proto ${project.build.directory}/generated-sources/java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java index bd3ccd435c0..e2098ddee19 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java @@ -168,6 +168,11 @@ class BookKeeperEditLogInputStream extends EditLogInputStream { reader.setMaxOpSize(maxOpSize); } + @Override + public boolean isLocalLog() { + return false; + } + /** * Input stream implementation which can be used by * FSEditLogOp.Reader diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs index 77f1582db27..6872a0eb1a4 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs @@ -80,6 +80,7 @@ shift case ${COMMAND} in balancer) CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer + hadoop_debug "Appending HADOOP_BALANCER_OPTS onto HADOOP_OPTS" HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_BALANCER_OPTS}" ;; cacheadmin) @@ -105,19 +106,24 @@ case ${COMMAND} in HADOOP_SECURE_PID_DIR="${HADOOP_SECURE_PID_DIR:-$HADOOP_SECURE_DN_PID_DIR}" HADOOP_SECURE_LOG_DIR="${HADOOP_SECURE_LOG_DIR:-$HADOOP_SECURE_DN_LOG_DIR}" - HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_DN_SECURE_EXTRA_OPTS} ${HADOOP_DATANODE_OPTS}" + hadoop_debug "Appending HADOOP_DATANODE_OPTS onto HADOOP_OPTS" + hadoop_debug "Appending HADOOP_DN_SECURE_EXTRA_OPTS onto HADOOP_OPTS" + HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_DATANODE_OPTS} ${HADOOP_DN_SECURE_EXTRA_OPTS}" CLASS="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter" else + hadoop_debug "Appending HADOOP_DATANODE_OPTS onto HADOOP_OPTS" HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_DATANODE_OPTS}" CLASS='org.apache.hadoop.hdfs.server.datanode.DataNode' fi ;; dfs) CLASS=org.apache.hadoop.fs.FsShell + hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS" HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}" ;; dfsadmin) CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin + hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS" HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}" ;; fetchdt) @@ -125,6 +131,7 @@ case ${COMMAND} in ;; fsck) CLASS=org.apache.hadoop.hdfs.tools.DFSck + hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS" HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}" ;; getconf) @@ -135,12 +142,15 @@ case ${COMMAND} in ;; haadmin) CLASS=org.apache.hadoop.hdfs.tools.DFSHAAdmin - CLASSPATH="${CLASSPATH}:${TOOL_PATH}" + hadoop_debug "Injecting TOOL_PATH into CLASSPATH" + hadoop_add_classpath "${TOOL_PATH}" + hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS" HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}" ;; journalnode) daemon="true" CLASS='org.apache.hadoop.hdfs.qjournal.server.JournalNode' + hadoop_debug "Appending HADOOP_JOURNALNODE_OPTS onto HADOOP_OPTS" HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_JOURNALNODE_OPTS}" ;; jmxget) @@ -152,6 +162,7 @@ case ${COMMAND} in namenode) daemon="true" CLASS='org.apache.hadoop.hdfs.server.namenode.NameNode' + hadoop_debug "Appending HADOOP_NAMENODE_OPTS onto HADOOP_OPTS" HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NAMENODE_OPTS}" ;; nfs3) @@ -164,9 +175,12 @@ case ${COMMAND} in HADOOP_SECURE_PID_DIR="${HADOOP_SECURE_PID_DIR:-$HADOOP_SECURE_NFS3_PID_DIR}" HADOOP_SECURE_LOG_DIR="${HADOOP_SECURE_LOG_DIR:-$HADOOP_SECURE_NFS3_LOG_DIR}" - HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NFS3_SECURE_EXTRA_OPTS} ${HADOOP_NFS3_OPTS}" + hadoop_debug "Appending HADOOP_NFS3_OPTS onto HADOOP_OPTS" + hadoop_debug "Appending HADOOP_NFS3_SECURE_EXTRA_OPTS onto HADOOP_OPTS" + HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NFS3_OPTS} ${HADOOP_NFS3_SECURE_EXTRA_OPTS}" CLASS=org.apache.hadoop.hdfs.nfs.nfs3.PrivilegedNfsGatewayStarter else + hadoop_debug "Appending HADOOP_NFS3_OPTS onto HADOOP_OPTS" HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NFS3_OPTS}" CLASS=org.apache.hadoop.hdfs.nfs.nfs3.Nfs3 fi @@ -183,11 +197,13 @@ case ${COMMAND} in portmap) daemon="true" CLASS=org.apache.hadoop.portmap.Portmap + hadoop_debug "Appending HADOOP_PORTMAP_OPTS onto HADOOP_OPTS" HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_PORTMAP_OPTS}" ;; secondarynamenode) daemon="true" CLASS='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode' + hadoop_debug "Appending HADOOP_SECONDARYNAMENODE_OPTS onto HADOOP_OPTS" HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_SECONDARYNAMENODE_OPTS}" ;; snapshotDiff) @@ -196,6 +212,7 @@ case ${COMMAND} in zkfc) daemon="true" CLASS='org.apache.hadoop.hdfs.tools.DFSZKFailoverController' + hadoop_debug "Appending HADOOP_ZKFC_OPTS onto HADOOP_OPTS" HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_ZKFC_OPTS}" ;; -*) @@ -236,8 +253,6 @@ fi hadoop_add_param HADOOP_OPTS Xmx "${JAVA_HEAP_MAX}" hadoop_finalize -export CLASSPATH - if [[ -n "${daemon}" ]]; then if [[ -n "${secure_service}" ]]; then hadoop_secure_daemon_handler \ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java index d27bd6ef0d2..3fb442b94a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java @@ -54,6 +54,7 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.PerformanceAdvisory; import org.apache.hadoop.util.Time; import com.google.common.annotations.VisibleForTesting; @@ -343,10 +344,9 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator { return null; } if (clientContext.getDisableLegacyBlockReaderLocal()) { - if (LOG.isTraceEnabled()) { - LOG.trace(this + ": can't construct BlockReaderLocalLegacy because " + - "disableLegacyBlockReaderLocal is set."); - } + PerformanceAdvisory.LOG.debug(this + ": can't construct " + + "BlockReaderLocalLegacy because " + + "disableLegacyBlockReaderLocal is set."); return null; } IOException ioe = null; @@ -385,10 +385,8 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator { getPathInfo(inetSocketAddress, conf); } if (!pathInfo.getPathState().getUsableForShortCircuit()) { - if (LOG.isTraceEnabled()) { - LOG.trace(this + ": " + pathInfo + " is not " + - "usable for short circuit; giving up on BlockReaderLocal."); - } + PerformanceAdvisory.LOG.debug(this + ": " + pathInfo + " is not " + + "usable for short circuit; giving up on BlockReaderLocal."); return null; } ShortCircuitCache cache = clientContext.getShortCircuitCache(); @@ -404,8 +402,9 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator { } if (info.getReplica() == null) { if (LOG.isTraceEnabled()) { - LOG.trace(this + ": failed to get ShortCircuitReplica. " + - "Cannot construct BlockReaderLocal via " + pathInfo.getPath()); + PerformanceAdvisory.LOG.debug(this + ": failed to get " + + "ShortCircuitReplica. Cannot construct " + + "BlockReaderLocal via " + pathInfo.getPath()); } return null; } @@ -580,11 +579,9 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator { getPathInfo(inetSocketAddress, conf); } if (!pathInfo.getPathState().getUsableForDataTransfer()) { - if (LOG.isTraceEnabled()) { - LOG.trace(this + ": not trying to create a remote block reader " + - "because the UNIX domain socket at " + pathInfo + - " is not usable."); - } + PerformanceAdvisory.LOG.debug(this + ": not trying to create a " + + "remote block reader because the UNIX domain socket at " + + pathInfo + " is not usable."); return null; } if (LOG.isTraceEnabled()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index bcc0f7223fc..23e51f2213b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -606,10 +606,12 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, cipherSuites.add(codec.getCipherSuite()); } provider = DFSUtil.createKeyProviderCryptoExtension(conf); - if (provider == null) { - LOG.info("No KeyProvider found."); - } else { - LOG.info("Found KeyProvider: " + provider.toString()); + if (LOG.isDebugEnabled()) { + if (provider == null) { + LOG.debug("No KeyProvider found."); + } else { + LOG.debug("Found KeyProvider: " + provider.toString()); + } } int numResponseToDrop = conf.getInt( DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, @@ -3009,6 +3011,15 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, } } + public DFSInotifyEventInputStream getInotifyEventStream() throws IOException { + return new DFSInotifyEventInputStream(namenode); + } + + public DFSInotifyEventInputStream getInotifyEventStream(long lastReadTxid) + throws IOException { + return new DFSInotifyEventInputStream(namenode, lastReadTxid); + } + @Override // RemotePeerFactory public Peer newConnectedPeer(InetSocketAddress addr, Token blockToken, DatanodeID datanodeId) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 3dd13a53df4..2f4237e86e8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -53,6 +53,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final boolean DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_ENABLE_DEFAULT = true; public static final String DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY = "dfs.client.block.write.replace-datanode-on-failure.policy"; public static final String DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_DEFAULT = "DEFAULT"; + public static final String DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_BEST_EFFORT_KEY = "dfs.client.block.write.replace-datanode-on-failure.best-effort"; + public static final boolean DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_BEST_EFFORT_DEFAULT = false; public static final String DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY = "dfs.client.socketcache.capacity"; public static final int DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT = 16; public static final String DFS_CLIENT_USE_DN_HOSTNAME = "dfs.client.use.datanode.hostname"; @@ -688,4 +690,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS_KEY = "dfs.datanode.block.id.layout.upgrade.threads"; public static final int DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS = 12; + + public static final String DFS_NAMENODE_INOTIFY_MAX_EVENTS_PER_RPC_KEY = + "dfs.namenode.inotify.max.events.per.rpc"; + public static final int DFS_NAMENODE_INOTIFY_MAX_EVENTS_PER_RPC_DEFAULT = + 1000; + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInotifyEventInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInotifyEventInputStream.java new file mode 100644 index 00000000000..73c5f55a43b --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInotifyEventInputStream.java @@ -0,0 +1,220 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs; + +import com.google.common.collect.Iterators; +import com.google.common.util.concurrent.UncheckedExecutionException; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hdfs.inotify.Event; +import org.apache.hadoop.hdfs.inotify.EventsList; +import org.apache.hadoop.hdfs.inotify.MissingEventsException; +import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.util.Time; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Iterator; +import java.util.Random; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +/** + * Stream for reading inotify events. DFSInotifyEventInputStreams should not + * be shared among multiple threads. + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public class DFSInotifyEventInputStream { + public static Logger LOG = LoggerFactory.getLogger(DFSInotifyEventInputStream + .class); + + private final ClientProtocol namenode; + private Iterator it; + private long lastReadTxid; + /** + * The most recent txid the NameNode told us it has sync'ed -- helps us + * determine how far behind we are in the edit stream. + */ + private long syncTxid; + /** + * Used to generate wait times in {@link DFSInotifyEventInputStream#take()}. + */ + private Random rng = new Random(); + + private static final int INITIAL_WAIT_MS = 10; + + DFSInotifyEventInputStream(ClientProtocol namenode) throws IOException { + this(namenode, namenode.getCurrentEditLogTxid()); // only consider new txn's + } + + DFSInotifyEventInputStream(ClientProtocol namenode, long lastReadTxid) + throws IOException { + this.namenode = namenode; + this.it = Iterators.emptyIterator(); + this.lastReadTxid = lastReadTxid; + } + + /** + * Returns the next event in the stream or null if no new events are currently + * available. + * + * @throws IOException because of network error or edit log + * corruption. Also possible if JournalNodes are unresponsive in the + * QJM setting (even one unresponsive JournalNode is enough in rare cases), + * so catching this exception and retrying at least a few times is + * recommended. + * @throws MissingEventsException if we cannot return the next event in the + * stream because the data for the event (and possibly some subsequent events) + * has been deleted (generally because this stream is a very large number of + * events behind the current state of the NameNode). It is safe to continue + * reading from the stream after this exception is thrown -- the next + * available event will be returned. + */ + public Event poll() throws IOException, MissingEventsException { + // need to keep retrying until the NN sends us the latest committed txid + if (lastReadTxid == -1) { + LOG.debug("poll(): lastReadTxid is -1, reading current txid from NN"); + lastReadTxid = namenode.getCurrentEditLogTxid(); + return null; + } + if (!it.hasNext()) { + EventsList el = namenode.getEditsFromTxid(lastReadTxid + 1); + if (el.getLastTxid() != -1) { + // we only want to set syncTxid when we were actually able to read some + // edits on the NN -- otherwise it will seem like edits are being + // generated faster than we can read them when the problem is really + // that we are temporarily unable to read edits + syncTxid = el.getSyncTxid(); + it = el.getEvents().iterator(); + long formerLastReadTxid = lastReadTxid; + lastReadTxid = el.getLastTxid(); + if (el.getFirstTxid() != formerLastReadTxid + 1) { + throw new MissingEventsException(formerLastReadTxid + 1, + el.getFirstTxid()); + } + } else { + LOG.debug("poll(): read no edits from the NN when requesting edits " + + "after txid {}", lastReadTxid); + return null; + } + } + + if (it.hasNext()) { // can be empty if el.getLastTxid != -1 but none of the + // newly seen edit log ops actually got converted to events + return it.next(); + } else { + return null; + } + } + + /** + * Return a estimate of how many events behind the NameNode's current state + * this stream is. Clients should periodically call this method and check if + * its result is steadily increasing, which indicates that they are falling + * behind (i.e. events are being generated faster than the client is reading + * them). If a client falls too far behind events may be deleted before the + * client can read them. + *

+ * A return value of -1 indicates that an estimate could not be produced, and + * should be ignored. The value returned by this method is really only useful + * when compared to previous or subsequent returned values. + */ + public long getEventsBehindEstimate() { + if (syncTxid == 0) { + return -1; + } else { + assert syncTxid >= lastReadTxid; + // this gives the difference between the last txid we have fetched to the + // client and syncTxid at the time we last fetched events from the + // NameNode + return syncTxid - lastReadTxid; + } + } + + /** + * Returns the next event in the stream, waiting up to the specified amount of + * time for a new event. Returns null if a new event is not available at the + * end of the specified amount of time. The time before the method returns may + * exceed the specified amount of time by up to the time required for an RPC + * to the NameNode. + * + * @param time number of units of the given TimeUnit to wait + * @param tu the desired TimeUnit + * @throws IOException see {@link DFSInotifyEventInputStream#poll()} + * @throws MissingEventsException + * see {@link DFSInotifyEventInputStream#poll()} + * @throws InterruptedException if the calling thread is interrupted + */ + public Event poll(long time, TimeUnit tu) throws IOException, + InterruptedException, MissingEventsException { + long initialTime = Time.monotonicNow(); + long totalWait = TimeUnit.MILLISECONDS.convert(time, tu); + long nextWait = INITIAL_WAIT_MS; + Event next = null; + while ((next = poll()) == null) { + long timeLeft = totalWait - (Time.monotonicNow() - initialTime); + if (timeLeft <= 0) { + LOG.debug("timed poll(): timed out"); + break; + } else if (timeLeft < nextWait * 2) { + nextWait = timeLeft; + } else { + nextWait *= 2; + } + LOG.debug("timed poll(): poll() returned null, sleeping for {} ms", + nextWait); + Thread.sleep(nextWait); + } + + return next; + } + + /** + * Returns the next event in the stream, waiting indefinitely if a new event + * is not immediately available. + * + * @throws IOException see {@link DFSInotifyEventInputStream#poll()} + * @throws MissingEventsException see + * {@link DFSInotifyEventInputStream#poll()} + * @throws InterruptedException if the calling thread is interrupted + */ + public Event take() throws IOException, InterruptedException, + MissingEventsException { + Event next = null; + int nextWaitMin = INITIAL_WAIT_MS; + while ((next = poll()) == null) { + // sleep for a random period between nextWaitMin and nextWaitMin * 2 + // to avoid stampedes at the NN if there are multiple clients + int sleepTime = nextWaitMin + rng.nextInt(nextWaitMin); + LOG.debug("take(): poll() returned null, sleeping for {} ms", sleepTime); + Thread.sleep(sleepTime); + // the maximum sleep is 2 minutes + nextWaitMin = Math.min(60000, nextWaitMin * 2); + } + + return next; + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index 14977a25077..f3d66923b23 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -398,7 +398,7 @@ public class DFSOutputStream extends FSOutputSummer // one chunk that fills up the partial chunk. // computePacketChunkSize(0, freeInCksum); - resetChecksumChunk(freeInCksum); + setChecksumBufSize(freeInCksum); appendChunk = true; } else { // if the remaining space in the block is smaller than @@ -1178,7 +1178,17 @@ public class DFSOutputStream extends FSOutputSummer // Check if replace-datanode policy is satisfied. if (dfsClient.dtpReplaceDatanodeOnFailure.satisfy(blockReplication, nodes, isAppend, isHflushed)) { - addDatanode2ExistingPipeline(); + try { + addDatanode2ExistingPipeline(); + } catch(IOException ioe) { + if (!dfsClient.dtpReplaceDatanodeOnFailure.isBestEffort()) { + throw ioe; + } + DFSClient.LOG.warn("Failed to replace datanode." + + " Continue with the remaining datanodes since " + + DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_BEST_EFFORT_KEY + + " is set to true.", ioe); + } } // get a new generation stamp and an access token @@ -1563,7 +1573,7 @@ public class DFSOutputStream extends FSOutputSummer private DFSOutputStream(DFSClient dfsClient, String src, Progressable progress, HdfsFileStatus stat, DataChecksum checksum) throws IOException { - super(checksum, checksum.getBytesPerChecksum(), checksum.getChecksumSize()); + super(checksum); this.dfsClient = dfsClient; this.src = src; this.fileId = stat.getFileId(); @@ -1717,22 +1727,21 @@ public class DFSOutputStream extends FSOutputSummer // @see FSOutputSummer#writeChunk() @Override - protected synchronized void writeChunk(byte[] b, int offset, int len, byte[] checksum) - throws IOException { + protected synchronized void writeChunk(byte[] b, int offset, int len, + byte[] checksum, int ckoff, int cklen) throws IOException { dfsClient.checkOpen(); checkClosed(); - int cklen = checksum.length; int bytesPerChecksum = this.checksum.getBytesPerChecksum(); if (len > bytesPerChecksum) { throw new IOException("writeChunk() buffer size is " + len + " is larger than supported bytesPerChecksum " + bytesPerChecksum); } - if (checksum.length != this.checksum.getChecksumSize()) { + if (cklen != this.checksum.getChecksumSize()) { throw new IOException("writeChunk() checksum size is supposed to be " + this.checksum.getChecksumSize() + - " but found to be " + checksum.length); + " but found to be " + cklen); } if (currentPacket == null) { @@ -1748,7 +1757,7 @@ public class DFSOutputStream extends FSOutputSummer } } - currentPacket.writeChecksum(checksum, 0, cklen); + currentPacket.writeChecksum(checksum, ckoff, cklen); currentPacket.writeData(b, offset, len); currentPacket.numChunks++; bytesCurBlock += len; @@ -1772,7 +1781,7 @@ public class DFSOutputStream extends FSOutputSummer // crc chunks from now on. if (appendChunk && bytesCurBlock%bytesPerChecksum == 0) { appendChunk = false; - resetChecksumChunk(bytesPerChecksum); + resetChecksumBufSize(); } if (!appendChunk) { @@ -1853,20 +1862,13 @@ public class DFSOutputStream extends FSOutputSummer long lastBlockLength = -1L; boolean updateLength = syncFlags.contains(SyncFlag.UPDATE_LENGTH); synchronized (this) { - /* Record current blockOffset. This might be changed inside - * flushBuffer() where a partial checksum chunk might be flushed. - * After the flush, reset the bytesCurBlock back to its previous value, - * any partial checksum chunk will be sent now and in next packet. - */ - long saveOffset = bytesCurBlock; - Packet oldCurrentPacket = currentPacket; // flush checksum buffer, but keep checksum buffer intact - flushBuffer(true); + int numKept = flushBuffer(true, true); // bytesCurBlock potentially incremented if there was buffered data if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug( - "DFSClient flush() : saveOffset " + saveOffset + + "DFSClient flush() :" + " bytesCurBlock " + bytesCurBlock + " lastFlushOffset " + lastFlushOffset); } @@ -1883,14 +1885,6 @@ public class DFSOutputStream extends FSOutputSummer bytesCurBlock, currentSeqno++, this.checksum.getChecksumSize()); } } else { - // We already flushed up to this offset. - // This means that we haven't written anything since the last flush - // (or the beginning of the file). Hence, we should not have any - // packet queued prior to this call, since the last flush set - // currentPacket = null. - assert oldCurrentPacket == null : - "Empty flush should not occur with a currentPacket"; - if (isSync && bytesCurBlock > 0) { // Nothing to send right now, // and the block was partially written, @@ -1910,7 +1904,7 @@ public class DFSOutputStream extends FSOutputSummer // Restore state of stream. Record the last flush offset // of the last full chunk that was flushed. // - bytesCurBlock = saveOffset; + bytesCurBlock -= numKept; toWaitFor = lastQueuedSeqno; } // end synchronized diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index e679974bff8..ac00f5c8cca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -1966,4 +1966,13 @@ public class DistributedFileSystem extends FileSystem { } }.resolve(this, absF); } + + public DFSInotifyEventInputStream getInotifyEventStream() throws IOException { + return dfs.getInotifyEventStream(); + } + + public DFSInotifyEventInputStream getInotifyEventStream(long lastReadTxid) + throws IOException { + return dfs.getInotifyEventStream(lastReadTxid); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java index 1adfc1bfab0..fdc466a0658 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.CacheFlag; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.hdfs.DFSInotifyEventInputStream; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; @@ -275,4 +276,53 @@ public class HdfsAdmin { throws IOException { return dfs.listEncryptionZones(); } + + /** + * Exposes a stream of namesystem events. Only events occurring after the + * stream is created are available. + * See {@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream} + * for information on stream usage. + * See {@link org.apache.hadoop.hdfs.inotify.Event} + * for information on the available events. + *

+ * Inotify users may want to tune the following HDFS parameters to + * ensure that enough extra HDFS edits are saved to support inotify clients + * that fall behind the current state of the namespace while reading events. + * The default parameter values should generally be reasonable. If edits are + * deleted before their corresponding events can be read, clients will see a + * {@link org.apache.hadoop.hdfs.inotify.MissingEventsException} on + * {@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream} method calls. + * + * It should generally be sufficient to tune these parameters: + * dfs.namenode.num.extra.edits.retained + * dfs.namenode.max.extra.edits.segments.retained + * + * Parameters that affect the number of created segments and the number of + * edits that are considered necessary, i.e. do not count towards the + * dfs.namenode.num.extra.edits.retained quota): + * dfs.namenode.checkpoint.period + * dfs.namenode.checkpoint.txns + * dfs.namenode.num.checkpoints.retained + * dfs.ha.log-roll.period + *

+ * It is recommended that local journaling be configured + * (dfs.namenode.edits.dir) for inotify (in addition to a shared journal) + * so that edit transfers from the shared journal can be avoided. + * + * @throws IOException If there was an error obtaining the stream. + */ + public DFSInotifyEventInputStream getInotifyEventStream() throws IOException { + return dfs.getInotifyEventStream(); + } + + /** + * A version of {@link HdfsAdmin#getInotifyEventStream()} meant for advanced + * users who are aware of HDFS edits up to lastReadTxid (e.g. because they + * have access to an FSImage inclusive of lastReadTxid) and only want to read + * events after this point. + */ + public DFSInotifyEventInputStream getInotifyEventStream(long lastReadTxid) + throws IOException { + return dfs.getInotifyEventStream(lastReadTxid); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/Event.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/Event.java new file mode 100644 index 00000000000..c7129ca324c --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/Event.java @@ -0,0 +1,452 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.inotify; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.FsPermission; + +import java.util.List; + +/** + * Events sent by the inotify system. Note that no events are necessarily sent + * when a file is opened for read (although a MetadataUpdateEvent will be sent + * if the atime is updated). + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public abstract class Event { + public static enum EventType { + CREATE, CLOSE, APPEND, RENAME, METADATA, UNLINK + } + + private EventType eventType; + + public EventType getEventType() { + return eventType; + } + + public Event(EventType eventType) { + this.eventType = eventType; + } + + /** + * Sent when a file is closed after append or create. + */ + public static class CloseEvent extends Event { + private String path; + private long fileSize; + private long timestamp; + + public CloseEvent(String path, long fileSize, long timestamp) { + super(EventType.CLOSE); + this.path = path; + this.fileSize = fileSize; + this.timestamp = timestamp; + } + + public String getPath() { + return path; + } + + /** + * The size of the closed file in bytes. May be -1 if the size is not + * available (e.g. in the case of a close generated by a concat operation). + */ + public long getFileSize() { + return fileSize; + } + + /** + * The time when this event occurred, in milliseconds since the epoch. + */ + public long getTimestamp() { + return timestamp; + } + } + + /** + * Sent when a new file is created (including overwrite). + */ + public static class CreateEvent extends Event { + + public static enum INodeType { + FILE, DIRECTORY, SYMLINK; + } + + private INodeType iNodeType; + private String path; + private long ctime; + private int replication; + private String ownerName; + private String groupName; + private FsPermission perms; + private String symlinkTarget; + + public static class Builder { + private INodeType iNodeType; + private String path; + private long ctime; + private int replication; + private String ownerName; + private String groupName; + private FsPermission perms; + private String symlinkTarget; + + public Builder iNodeType(INodeType type) { + this.iNodeType = type; + return this; + } + + public Builder path(String path) { + this.path = path; + return this; + } + + public Builder ctime(long ctime) { + this.ctime = ctime; + return this; + } + + public Builder replication(int replication) { + this.replication = replication; + return this; + } + + public Builder ownerName(String ownerName) { + this.ownerName = ownerName; + return this; + } + + public Builder groupName(String groupName) { + this.groupName = groupName; + return this; + } + + public Builder perms(FsPermission perms) { + this.perms = perms; + return this; + } + + public Builder symlinkTarget(String symlinkTarget) { + this.symlinkTarget = symlinkTarget; + return this; + } + + public CreateEvent build() { + return new CreateEvent(this); + } + } + + private CreateEvent(Builder b) { + super(EventType.CREATE); + this.iNodeType = b.iNodeType; + this.path = b.path; + this.ctime = b.ctime; + this.replication = b.replication; + this.ownerName = b.ownerName; + this.groupName = b.groupName; + this.perms = b.perms; + this.symlinkTarget = b.symlinkTarget; + } + + public INodeType getiNodeType() { + return iNodeType; + } + + public String getPath() { + return path; + } + + /** + * Creation time of the file, directory, or symlink. + */ + public long getCtime() { + return ctime; + } + + /** + * Replication is zero if the CreateEvent iNodeType is directory or symlink. + */ + public int getReplication() { + return replication; + } + + public String getOwnerName() { + return ownerName; + } + + public String getGroupName() { + return groupName; + } + + public FsPermission getPerms() { + return perms; + } + + /** + * Symlink target is null if the CreateEvent iNodeType is not symlink. + */ + public String getSymlinkTarget() { + return symlinkTarget; + } + } + + /** + * Sent when there is an update to directory or file (none of the metadata + * tracked here applies to symlinks) that is not associated with another + * inotify event. The tracked metadata includes atime/mtime, replication, + * owner/group, permissions, ACLs, and XAttributes. Fields not relevant to the + * metadataType of the MetadataUpdateEvent will be null or will have their default + * values. + */ + public static class MetadataUpdateEvent extends Event { + + public static enum MetadataType { + TIMES, REPLICATION, OWNER, PERMS, ACLS, XATTRS; + } + + private String path; + private MetadataType metadataType; + private long mtime; + private long atime; + private int replication; + private String ownerName; + private String groupName; + private FsPermission perms; + private List acls; + private List xAttrs; + private boolean xAttrsRemoved; + + public static class Builder { + private String path; + private MetadataType metadataType; + private long mtime; + private long atime; + private int replication; + private String ownerName; + private String groupName; + private FsPermission perms; + private List acls; + private List xAttrs; + private boolean xAttrsRemoved; + + public Builder path(String path) { + this.path = path; + return this; + } + + public Builder metadataType(MetadataType type) { + this.metadataType = type; + return this; + } + + public Builder mtime(long mtime) { + this.mtime = mtime; + return this; + } + + public Builder atime(long atime) { + this.atime = atime; + return this; + } + + public Builder replication(int replication) { + this.replication = replication; + return this; + } + + public Builder ownerName(String ownerName) { + this.ownerName = ownerName; + return this; + } + + public Builder groupName(String groupName) { + this.groupName = groupName; + return this; + } + + public Builder perms(FsPermission perms) { + this.perms = perms; + return this; + } + + public Builder acls(List acls) { + this.acls = acls; + return this; + } + + public Builder xAttrs(List xAttrs) { + this.xAttrs = xAttrs; + return this; + } + + public Builder xAttrsRemoved(boolean xAttrsRemoved) { + this.xAttrsRemoved = xAttrsRemoved; + return this; + } + + public MetadataUpdateEvent build() { + return new MetadataUpdateEvent(this); + } + } + + private MetadataUpdateEvent(Builder b) { + super(EventType.METADATA); + this.path = b.path; + this.metadataType = b.metadataType; + this.mtime = b.mtime; + this.atime = b.atime; + this.replication = b.replication; + this.ownerName = b.ownerName; + this.groupName = b.groupName; + this.perms = b.perms; + this.acls = b.acls; + this.xAttrs = b.xAttrs; + this.xAttrsRemoved = b.xAttrsRemoved; + } + + public String getPath() { + return path; + } + + public MetadataType getMetadataType() { + return metadataType; + } + + public long getMtime() { + return mtime; + } + + public long getAtime() { + return atime; + } + + public int getReplication() { + return replication; + } + + public String getOwnerName() { + return ownerName; + } + + public String getGroupName() { + return groupName; + } + + public FsPermission getPerms() { + return perms; + } + + /** + * The full set of ACLs currently associated with this file or directory. + * May be null if all ACLs were removed. + */ + public List getAcls() { + return acls; + } + + public List getxAttrs() { + return xAttrs; + } + + /** + * Whether the xAttrs returned by getxAttrs() were removed (as opposed to + * added). + */ + public boolean isxAttrsRemoved() { + return xAttrsRemoved; + } + + } + + /** + * Sent when a file, directory, or symlink is renamed. + */ + public static class RenameEvent extends Event { + private String srcPath; + private String dstPath; + private long timestamp; + + public RenameEvent(String srcPath, String dstPath, long timestamp) { + super(EventType.RENAME); + this.srcPath = srcPath; + this.dstPath = dstPath; + this.timestamp = timestamp; + } + + public String getSrcPath() { + return srcPath; + } + + public String getDstPath() { + return dstPath; + } + + /** + * The time when this event occurred, in milliseconds since the epoch. + */ + public long getTimestamp() { + return timestamp; + } + } + + /** + * Sent when an existing file is opened for append. + */ + public static class AppendEvent extends Event { + private String path; + + public AppendEvent(String path) { + super(EventType.APPEND); + this.path = path; + } + + public String getPath() { + return path; + } + } + + /** + * Sent when a file, directory, or symlink is deleted. + */ + public static class UnlinkEvent extends Event { + private String path; + private long timestamp; + + public UnlinkEvent(String path, long timestamp) { + super(EventType.UNLINK); + this.path = path; + this.timestamp = timestamp; + } + + public String getPath() { + return path; + } + + /** + * The time when this event occurred, in milliseconds since the epoch. + */ + public long getTimestamp() { + return timestamp; + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/EventsList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/EventsList.java new file mode 100644 index 00000000000..6d02d3c2980 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/EventsList.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.inotify; + +import org.apache.hadoop.classification.InterfaceAudience; + +import java.util.List; + +/** + * Contains a set of events, the transaction ID in the edit log up to which we + * read to produce these events, and the first txid we observed when producing + * these events (the last of which is for the purpose of determining whether we + * have missed events due to edit deletion). Also contains the most recent txid + * that the NameNode has sync'ed, so the client can determine how far behind in + * the edit log it is. + */ +@InterfaceAudience.Private +public class EventsList { + private List events; + private long firstTxid; + private long lastTxid; + private long syncTxid; + + public EventsList(List events, long firstTxid, long lastTxid, + long syncTxid) { + this.events = events; + this.firstTxid = firstTxid; + this.lastTxid = lastTxid; + this.syncTxid = syncTxid; + } + + public List getEvents() { + return events; + } + + public long getFirstTxid() { + return firstTxid; + } + + public long getLastTxid() { + return lastTxid; + } + + public long getSyncTxid() { + return syncTxid; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/MissingEventsException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/MissingEventsException.java new file mode 100644 index 00000000000..e4b51c50c01 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/MissingEventsException.java @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.inotify; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class MissingEventsException extends Exception { + private static final long serialVersionUID = 1L; + + private long expectedTxid; + private long actualTxid; + + public MissingEventsException() {} + + public MissingEventsException(long expectedTxid, long actualTxid) { + this.expectedTxid = expectedTxid; + this.actualTxid = actualTxid; + } + + public long getExpectedTxid() { + return expectedTxid; + } + + public long getActualTxid() { + return actualTxid; + } + + @Override + public String toString() { + return "We expected the next batch of events to start with transaction ID " + + expectedTxid + ", but it instead started with transaction ID " + + actualTxid + ". Most likely the intervening transactions were cleaned " + + "up as part of checkpointing."; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 54c612c0add..23d44130200 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -43,10 +43,13 @@ import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.inotify.Event; +import org.apache.hadoop.hdfs.inotify.EventsList; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; @@ -1386,4 +1389,19 @@ public interface ClientProtocol { */ @Idempotent public void checkAccess(String path, FsAction mode) throws IOException; + + /** + * Get the highest txid the NameNode knows has been written to the edit + * log, or -1 if the NameNode's edit log is not yet open for write. Used as + * the starting point for the inotify event stream. + */ + @Idempotent + public long getCurrentEditLogTxid() throws IOException; + + /** + * Get an ordered list of events corresponding to the edit log transactions + * from txid onwards. + */ + @Idempotent + public EventsList getEditsFromTxid(long txid) throws IOException; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/ReplaceDatanodeOnFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/ReplaceDatanodeOnFailure.java index 318455424aa..0f2c1abdf15 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/ReplaceDatanodeOnFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/ReplaceDatanodeOnFailure.java @@ -29,26 +29,90 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; */ @InterfaceAudience.Private @InterfaceStability.Evolving -public enum ReplaceDatanodeOnFailure { - /** The feature is disabled in the entire site. */ - DISABLE, - /** Never add a new datanode. */ - NEVER, - /** - * DEFAULT policy: - * Let r be the replication number. - * Let n be the number of existing datanodes. - * Add a new datanode only if r >= 3 and either - * (1) floor(r/2) >= n; or - * (2) r > n and the block is hflushed/appended. - */ - DEFAULT, - /** Always add a new datanode when an existing datanode is removed. */ - ALWAYS; +public class ReplaceDatanodeOnFailure { + /** The replacement policies */ + public enum Policy { + /** The feature is disabled in the entire site. */ + DISABLE(Condition.FALSE), + /** Never add a new datanode. */ + NEVER(Condition.FALSE), + /** @see ReplaceDatanodeOnFailure.Condition#DEFAULT */ + DEFAULT(Condition.DEFAULT), + /** Always add a new datanode when an existing datanode is removed. */ + ALWAYS(Condition.TRUE); + + private final Condition condition; + + private Policy(Condition condition) { + this.condition = condition; + } + + Condition getCondition() { + return condition; + } + } + + /** Datanode replacement condition */ + private static interface Condition { + /** Return true unconditionally. */ + static final Condition TRUE = new Condition() { + @Override + public boolean satisfy(short replication, DatanodeInfo[] existings, + int nExistings, boolean isAppend, boolean isHflushed) { + return true; + } + }; + + /** Return false unconditionally. */ + static final Condition FALSE = new Condition() { + @Override + public boolean satisfy(short replication, DatanodeInfo[] existings, + int nExistings, boolean isAppend, boolean isHflushed) { + return false; + } + }; + + /** + * DEFAULT condition: + * Let r be the replication number. + * Let n be the number of existing datanodes. + * Add a new datanode only if r >= 3 and either + * (1) floor(r/2) >= n; or + * (2) r > n and the block is hflushed/appended. + */ + static final Condition DEFAULT = new Condition() { + @Override + public boolean satisfy(final short replication, + final DatanodeInfo[] existings, final int n, final boolean isAppend, + final boolean isHflushed) { + if (replication < 3) { + return false; + } else { + if (n <= (replication/2)) { + return true; + } else { + return isAppend || isHflushed; + } + } + } + }; + + /** Is the condition satisfied? */ + public boolean satisfy(short replication, DatanodeInfo[] existings, + int nExistings, boolean isAppend, boolean isHflushed); + } + + private final Policy policy; + private final boolean bestEffort; + + public ReplaceDatanodeOnFailure(Policy policy, boolean bestEffort) { + this.policy = policy; + this.bestEffort = bestEffort; + } /** Check if the feature is enabled. */ public void checkEnabled() { - if (this == DISABLE) { + if (policy == Policy.DISABLE) { throw new UnsupportedOperationException( "This feature is disabled. Please refer to " + DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_ENABLE_KEY @@ -56,7 +120,20 @@ public enum ReplaceDatanodeOnFailure { } } - /** Is the policy satisfied? */ + /** + * Best effort means that the client will try to replace the failed datanode + * (provided that the policy is satisfied), however, it will continue the + * write operation in case that the datanode replacement also fails. + * + * @return Suppose the datanode replacement fails. + * false: An exception should be thrown so that the write will fail. + * true : The write should be resumed with the remaining datandoes. + */ + public boolean isBestEffort() { + return bestEffort; + } + + /** Does it need a replacement according to the policy? */ public boolean satisfy( final short replication, final DatanodeInfo[] existings, final boolean isAppend, final boolean isHflushed) { @@ -64,40 +141,42 @@ public enum ReplaceDatanodeOnFailure { if (n == 0 || n >= replication) { //don't need to add datanode for any policy. return false; - } else if (this == DISABLE || this == NEVER) { - return false; - } else if (this == ALWAYS) { - return true; } else { - //DEFAULT - if (replication < 3) { - return false; - } else { - if (n <= (replication/2)) { - return true; - } else { - return isAppend || isHflushed; - } - } + return policy.getCondition().satisfy( + replication, existings, n, isAppend, isHflushed); } } + + @Override + public String toString() { + return policy.toString(); + } /** Get the setting from configuration. */ public static ReplaceDatanodeOnFailure get(final Configuration conf) { + final Policy policy = getPolicy(conf); + final boolean bestEffort = conf.getBoolean( + DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_BEST_EFFORT_KEY, + DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_BEST_EFFORT_DEFAULT); + + return new ReplaceDatanodeOnFailure(policy, bestEffort); + } + + private static Policy getPolicy(final Configuration conf) { final boolean enabled = conf.getBoolean( DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_ENABLE_KEY, DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_ENABLE_DEFAULT); if (!enabled) { - return DISABLE; + return Policy.DISABLE; } final String policy = conf.get( DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY, DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_DEFAULT); - for(int i = 1; i < values().length; i++) { - final ReplaceDatanodeOnFailure rdof = values()[i]; - if (rdof.name().equalsIgnoreCase(policy)) { - return rdof; + for(int i = 1; i < Policy.values().length; i++) { + final Policy p = Policy.values()[i]; + if (p.name().equalsIgnoreCase(policy)) { + return p; } } throw new HadoopIllegalArgumentException("Illegal configuration value for " @@ -106,12 +185,16 @@ public enum ReplaceDatanodeOnFailure { } /** Write the setting to configuration. */ - public void write(final Configuration conf) { + public static void write(final Policy policy, + final boolean bestEffort, final Configuration conf) { conf.setBoolean( DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_ENABLE_KEY, - this != DISABLE); + policy != Policy.DISABLE); conf.set( DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY, - name()); + policy.name()); + conf.setBoolean( + DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_BEST_EFFORT_KEY, + bestEffort); } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 5d222e87727..03145764f27 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -91,12 +91,16 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlo import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.Builder; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto; @@ -1424,4 +1428,25 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements } return VOID_SET_STORAGE_POLICY_RESPONSE; } + + public GetCurrentEditLogTxidResponseProto getCurrentEditLogTxid(RpcController controller, + GetCurrentEditLogTxidRequestProto req) throws ServiceException { + try { + return GetCurrentEditLogTxidResponseProto.newBuilder().setTxid( + server.getCurrentEditLogTxid()).build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public GetEditsFromTxidResponseProto getEditsFromTxid(RpcController controller, + GetEditsFromTxidRequestProto req) throws ServiceException { + try { + return PBHelper.convertEditsResponse(server.getEditsFromTxid( + req.getTxid())); + } catch (IOException e) { + throw new ServiceException(e); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 63af6b049e7..4295ce75108 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -43,6 +43,7 @@ import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.inotify.EventsList; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; @@ -97,10 +98,12 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdd import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto; @@ -162,6 +165,7 @@ import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrRequestProto; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; @@ -1446,4 +1450,25 @@ public class ClientNamenodeProtocolTranslatorPB implements throw ProtobufHelper.getRemoteException(e); } } + + public long getCurrentEditLogTxid() throws IOException { + GetCurrentEditLogTxidRequestProto req = GetCurrentEditLogTxidRequestProto + .getDefaultInstance(); + try { + return rpcProxy.getCurrentEditLogTxid(null, req).getTxid(); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override + public EventsList getEditsFromTxid(long txid) throws IOException { + GetEditsFromTxidRequestProto req = GetEditsFromTxidRequestProto.newBuilder() + .setTxid(txid).build(); + try { + return PBHelper.convert(rpcProxy.getEditsFromTxid(null, req)); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 4a882cd17fd..fe6093d0ea2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -47,6 +47,8 @@ import org.apache.hadoop.ha.proto.HAServiceProtocolProtos; import org.apache.hadoop.hdfs.BlockStoragePolicy; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.StorageType; +import org.apache.hadoop.hdfs.inotify.Event; +import org.apache.hadoop.hdfs.inotify.EventsList; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; @@ -97,6 +99,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheP import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto; @@ -159,6 +162,7 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto; +import org.apache.hadoop.hdfs.protocol.proto.InotifyProtos; import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsResponseProto; @@ -2360,6 +2364,247 @@ public class PBHelper { return new ShmId(shmId.getHi(), shmId.getLo()); } + private static Event.CreateEvent.INodeType createTypeConvert(InotifyProtos.INodeType + type) { + switch (type) { + case I_TYPE_DIRECTORY: + return Event.CreateEvent.INodeType.DIRECTORY; + case I_TYPE_FILE: + return Event.CreateEvent.INodeType.FILE; + case I_TYPE_SYMLINK: + return Event.CreateEvent.INodeType.SYMLINK; + default: + return null; + } + } + + private static InotifyProtos.MetadataUpdateType metadataUpdateTypeConvert( + Event.MetadataUpdateEvent.MetadataType type) { + switch (type) { + case TIMES: + return InotifyProtos.MetadataUpdateType.META_TYPE_TIMES; + case REPLICATION: + return InotifyProtos.MetadataUpdateType.META_TYPE_REPLICATION; + case OWNER: + return InotifyProtos.MetadataUpdateType.META_TYPE_OWNER; + case PERMS: + return InotifyProtos.MetadataUpdateType.META_TYPE_PERMS; + case ACLS: + return InotifyProtos.MetadataUpdateType.META_TYPE_ACLS; + case XATTRS: + return InotifyProtos.MetadataUpdateType.META_TYPE_XATTRS; + default: + return null; + } + } + + private static Event.MetadataUpdateEvent.MetadataType metadataUpdateTypeConvert( + InotifyProtos.MetadataUpdateType type) { + switch (type) { + case META_TYPE_TIMES: + return Event.MetadataUpdateEvent.MetadataType.TIMES; + case META_TYPE_REPLICATION: + return Event.MetadataUpdateEvent.MetadataType.REPLICATION; + case META_TYPE_OWNER: + return Event.MetadataUpdateEvent.MetadataType.OWNER; + case META_TYPE_PERMS: + return Event.MetadataUpdateEvent.MetadataType.PERMS; + case META_TYPE_ACLS: + return Event.MetadataUpdateEvent.MetadataType.ACLS; + case META_TYPE_XATTRS: + return Event.MetadataUpdateEvent.MetadataType.XATTRS; + default: + return null; + } + } + + private static InotifyProtos.INodeType createTypeConvert(Event.CreateEvent.INodeType + type) { + switch (type) { + case DIRECTORY: + return InotifyProtos.INodeType.I_TYPE_DIRECTORY; + case FILE: + return InotifyProtos.INodeType.I_TYPE_FILE; + case SYMLINK: + return InotifyProtos.INodeType.I_TYPE_SYMLINK; + default: + return null; + } + } + + public static EventsList convert(GetEditsFromTxidResponseProto resp) throws + IOException { + List events = Lists.newArrayList(); + for (InotifyProtos.EventProto p : resp.getEventsList().getEventsList()) { + switch(p.getType()) { + case EVENT_CLOSE: + InotifyProtos.CloseEventProto close = + InotifyProtos.CloseEventProto.parseFrom(p.getContents()); + events.add(new Event.CloseEvent(close.getPath(), close.getFileSize(), + close.getTimestamp())); + break; + case EVENT_CREATE: + InotifyProtos.CreateEventProto create = + InotifyProtos.CreateEventProto.parseFrom(p.getContents()); + events.add(new Event.CreateEvent.Builder() + .iNodeType(createTypeConvert(create.getType())) + .path(create.getPath()) + .ctime(create.getCtime()) + .ownerName(create.getOwnerName()) + .groupName(create.getGroupName()) + .perms(convert(create.getPerms())) + .replication(create.getReplication()) + .symlinkTarget(create.getSymlinkTarget().isEmpty() ? null : + create.getSymlinkTarget()).build()); + break; + case EVENT_METADATA: + InotifyProtos.MetadataUpdateEventProto meta = + InotifyProtos.MetadataUpdateEventProto.parseFrom(p.getContents()); + events.add(new Event.MetadataUpdateEvent.Builder() + .path(meta.getPath()) + .metadataType(metadataUpdateTypeConvert(meta.getType())) + .mtime(meta.getMtime()) + .atime(meta.getAtime()) + .replication(meta.getReplication()) + .ownerName( + meta.getOwnerName().isEmpty() ? null : meta.getOwnerName()) + .groupName( + meta.getGroupName().isEmpty() ? null : meta.getGroupName()) + .perms(meta.hasPerms() ? convert(meta.getPerms()) : null) + .acls(meta.getAclsList().isEmpty() ? null : convertAclEntry( + meta.getAclsList())) + .xAttrs(meta.getXAttrsList().isEmpty() ? null : convertXAttrs( + meta.getXAttrsList())) + .xAttrsRemoved(meta.getXAttrsRemoved()) + .build()); + break; + case EVENT_RENAME: + InotifyProtos.RenameEventProto rename = + InotifyProtos.RenameEventProto.parseFrom(p.getContents()); + events.add(new Event.RenameEvent(rename.getSrcPath(), rename.getDestPath(), + rename.getTimestamp())); + break; + case EVENT_APPEND: + InotifyProtos.AppendEventProto reopen = + InotifyProtos.AppendEventProto.parseFrom(p.getContents()); + events.add(new Event.AppendEvent(reopen.getPath())); + break; + case EVENT_UNLINK: + InotifyProtos.UnlinkEventProto unlink = + InotifyProtos.UnlinkEventProto.parseFrom(p.getContents()); + events.add(new Event.UnlinkEvent(unlink.getPath(), unlink.getTimestamp())); + break; + default: + throw new RuntimeException("Unexpected inotify event type: " + + p.getType()); + } + } + return new EventsList(events, resp.getEventsList().getFirstTxid(), + resp.getEventsList().getLastTxid(), resp.getEventsList().getSyncTxid()); + } + + public static GetEditsFromTxidResponseProto convertEditsResponse(EventsList el) { + InotifyProtos.EventsListProto.Builder builder = + InotifyProtos.EventsListProto.newBuilder(); + for (Event e : el.getEvents()) { + switch(e.getEventType()) { + case CLOSE: + Event.CloseEvent ce = (Event.CloseEvent) e; + builder.addEvents(InotifyProtos.EventProto.newBuilder() + .setType(InotifyProtos.EventType.EVENT_CLOSE) + .setContents( + InotifyProtos.CloseEventProto.newBuilder() + .setPath(ce.getPath()) + .setFileSize(ce.getFileSize()) + .setTimestamp(ce.getTimestamp()).build().toByteString() + ).build()); + break; + case CREATE: + Event.CreateEvent ce2 = (Event.CreateEvent) e; + builder.addEvents(InotifyProtos.EventProto.newBuilder() + .setType(InotifyProtos.EventType.EVENT_CREATE) + .setContents( + InotifyProtos.CreateEventProto.newBuilder() + .setType(createTypeConvert(ce2.getiNodeType())) + .setPath(ce2.getPath()) + .setCtime(ce2.getCtime()) + .setOwnerName(ce2.getOwnerName()) + .setGroupName(ce2.getGroupName()) + .setPerms(convert(ce2.getPerms())) + .setReplication(ce2.getReplication()) + .setSymlinkTarget(ce2.getSymlinkTarget() == null ? + "" : ce2.getSymlinkTarget()).build().toByteString() + ).build()); + break; + case METADATA: + Event.MetadataUpdateEvent me = (Event.MetadataUpdateEvent) e; + InotifyProtos.MetadataUpdateEventProto.Builder metaB = + InotifyProtos.MetadataUpdateEventProto.newBuilder() + .setPath(me.getPath()) + .setType(metadataUpdateTypeConvert(me.getMetadataType())) + .setMtime(me.getMtime()) + .setAtime(me.getAtime()) + .setReplication(me.getReplication()) + .setOwnerName(me.getOwnerName() == null ? "" : + me.getOwnerName()) + .setGroupName(me.getGroupName() == null ? "" : + me.getGroupName()) + .addAllAcls(me.getAcls() == null ? + Lists.newArrayList() : + convertAclEntryProto(me.getAcls())) + .addAllXAttrs(me.getxAttrs() == null ? + Lists.newArrayList() : + convertXAttrProto(me.getxAttrs())) + .setXAttrsRemoved(me.isxAttrsRemoved()); + if (me.getPerms() != null) { + metaB.setPerms(convert(me.getPerms())); + } + builder.addEvents(InotifyProtos.EventProto.newBuilder() + .setType(InotifyProtos.EventType.EVENT_METADATA) + .setContents(metaB.build().toByteString()) + .build()); + break; + case RENAME: + Event.RenameEvent re = (Event.RenameEvent) e; + builder.addEvents(InotifyProtos.EventProto.newBuilder() + .setType(InotifyProtos.EventType.EVENT_RENAME) + .setContents( + InotifyProtos.RenameEventProto.newBuilder() + .setSrcPath(re.getSrcPath()) + .setDestPath(re.getDstPath()) + .setTimestamp(re.getTimestamp()).build().toByteString() + ).build()); + break; + case APPEND: + Event.AppendEvent re2 = (Event.AppendEvent) e; + builder.addEvents(InotifyProtos.EventProto.newBuilder() + .setType(InotifyProtos.EventType.EVENT_APPEND) + .setContents( + InotifyProtos.AppendEventProto.newBuilder() + .setPath(re2.getPath()).build().toByteString() + ).build()); + break; + case UNLINK: + Event.UnlinkEvent ue = (Event.UnlinkEvent) e; + builder.addEvents(InotifyProtos.EventProto.newBuilder() + .setType(InotifyProtos.EventType.EVENT_UNLINK) + .setContents( + InotifyProtos.UnlinkEventProto.newBuilder() + .setPath(ue.getPath()) + .setTimestamp(ue.getTimestamp()).build().toByteString() + ).build()); + break; + default: + throw new RuntimeException("Unexpected inotify event: " + e); + } + } + builder.setFirstTxid(el.getFirstTxid()); + builder.setLastTxid(el.getLastTxid()); + builder.setSyncTxid(el.getSyncTxid()); + return GetEditsFromTxidResponseProto.newBuilder().setEventsList( + builder.build()).build(); + } + public static HdfsProtos.CipherSuite convert(CipherSuite suite) { switch (suite) { case UNKNOWN: diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java index 0196c5b0229..e37869c7975 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java @@ -79,7 +79,17 @@ public class IPCLoggerChannel implements AsyncLogger { protected final InetSocketAddress addr; private QJournalProtocol proxy; - private final ListeningExecutorService executor; + /** + * Executes tasks submitted to it serially, on a single thread, in FIFO order + * (generally used for write tasks that should not be reordered). + */ + private final ListeningExecutorService singleThreadExecutor; + /** + * Executes tasks submitted to it in parallel with each other and with those + * submitted to singleThreadExecutor (generally used for read tasks that can + * be safely reordered and interleaved with writes). + */ + private final ListeningExecutorService parallelExecutor; private long ipcSerial = 0; private long epoch = -1; private long committedTxId = HdfsConstants.INVALID_TXID; @@ -160,8 +170,10 @@ public class IPCLoggerChannel implements AsyncLogger { DFSConfigKeys.DFS_QJOURNAL_QUEUE_SIZE_LIMIT_KEY, DFSConfigKeys.DFS_QJOURNAL_QUEUE_SIZE_LIMIT_DEFAULT); - executor = MoreExecutors.listeningDecorator( - createExecutor()); + singleThreadExecutor = MoreExecutors.listeningDecorator( + createSingleThreadExecutor()); + parallelExecutor = MoreExecutors.listeningDecorator( + createParallelExecutor()); metrics = IPCLoggerChannelMetrics.create(this); } @@ -183,7 +195,8 @@ public class IPCLoggerChannel implements AsyncLogger { @Override public void close() { // No more tasks may be submitted after this point. - executor.shutdown(); + singleThreadExecutor.shutdown(); + parallelExecutor.shutdown(); if (proxy != null) { // TODO: this can hang for quite some time if the client // is currently in the middle of a call to a downed JN. @@ -230,15 +243,30 @@ public class IPCLoggerChannel implements AsyncLogger { * Separated out for easy overriding in tests. */ @VisibleForTesting - protected ExecutorService createExecutor() { + protected ExecutorService createSingleThreadExecutor() { return Executors.newSingleThreadExecutor( new ThreadFactoryBuilder() .setDaemon(true) - .setNameFormat("Logger channel to " + addr) + .setNameFormat("Logger channel (from single-thread executor) to " + + addr) .setUncaughtExceptionHandler( UncaughtExceptionHandlers.systemExit()) .build()); } + + /** + * Separated out for easy overriding in tests. + */ + @VisibleForTesting + protected ExecutorService createParallelExecutor() { + return Executors.newCachedThreadPool( + new ThreadFactoryBuilder() + .setDaemon(true) + .setNameFormat("Logger channel (from parallel executor) to " + addr) + .setUncaughtExceptionHandler( + UncaughtExceptionHandlers.systemExit()) + .build()); + } @Override public URL buildURLToFetchLogs(long segmentTxId) { @@ -286,7 +314,7 @@ public class IPCLoggerChannel implements AsyncLogger { @VisibleForTesting void waitForAllPendingCalls() throws InterruptedException { try { - executor.submit(new Runnable() { + singleThreadExecutor.submit(new Runnable() { @Override public void run() { } @@ -299,7 +327,7 @@ public class IPCLoggerChannel implements AsyncLogger { @Override public ListenableFuture isFormatted() { - return executor.submit(new Callable() { + return singleThreadExecutor.submit(new Callable() { @Override public Boolean call() throws IOException { return getProxy().isFormatted(journalId); @@ -309,7 +337,7 @@ public class IPCLoggerChannel implements AsyncLogger { @Override public ListenableFuture getJournalState() { - return executor.submit(new Callable() { + return singleThreadExecutor.submit(new Callable() { @Override public GetJournalStateResponseProto call() throws IOException { GetJournalStateResponseProto ret = @@ -323,7 +351,7 @@ public class IPCLoggerChannel implements AsyncLogger { @Override public ListenableFuture newEpoch( final long epoch) { - return executor.submit(new Callable() { + return singleThreadExecutor.submit(new Callable() { @Override public NewEpochResponseProto call() throws IOException { return getProxy().newEpoch(journalId, nsInfo, epoch); @@ -347,7 +375,7 @@ public class IPCLoggerChannel implements AsyncLogger { ListenableFuture ret = null; try { - ret = executor.submit(new Callable() { + ret = singleThreadExecutor.submit(new Callable() { @Override public Void call() throws IOException { throwIfOutOfSync(); @@ -464,7 +492,7 @@ public class IPCLoggerChannel implements AsyncLogger { @Override public ListenableFuture format(final NamespaceInfo nsInfo) { - return executor.submit(new Callable() { + return singleThreadExecutor.submit(new Callable() { @Override public Void call() throws Exception { getProxy().format(journalId, nsInfo); @@ -476,7 +504,7 @@ public class IPCLoggerChannel implements AsyncLogger { @Override public ListenableFuture startLogSegment(final long txid, final int layoutVersion) { - return executor.submit(new Callable() { + return singleThreadExecutor.submit(new Callable() { @Override public Void call() throws IOException { getProxy().startLogSegment(createReqInfo(), txid, layoutVersion); @@ -497,7 +525,7 @@ public class IPCLoggerChannel implements AsyncLogger { @Override public ListenableFuture finalizeLogSegment( final long startTxId, final long endTxId) { - return executor.submit(new Callable() { + return singleThreadExecutor.submit(new Callable() { @Override public Void call() throws IOException { throwIfOutOfSync(); @@ -510,7 +538,7 @@ public class IPCLoggerChannel implements AsyncLogger { @Override public ListenableFuture purgeLogsOlderThan(final long minTxIdToKeep) { - return executor.submit(new Callable() { + return singleThreadExecutor.submit(new Callable() { @Override public Void call() throws Exception { getProxy().purgeLogsOlderThan(createReqInfo(), minTxIdToKeep); @@ -522,7 +550,7 @@ public class IPCLoggerChannel implements AsyncLogger { @Override public ListenableFuture getEditLogManifest( final long fromTxnId, final boolean inProgressOk) { - return executor.submit(new Callable() { + return parallelExecutor.submit(new Callable() { @Override public RemoteEditLogManifest call() throws IOException { GetEditLogManifestResponseProto ret = getProxy().getEditLogManifest( @@ -538,7 +566,7 @@ public class IPCLoggerChannel implements AsyncLogger { @Override public ListenableFuture prepareRecovery( final long segmentTxId) { - return executor.submit(new Callable() { + return singleThreadExecutor.submit(new Callable() { @Override public PrepareRecoveryResponseProto call() throws IOException { if (!hasHttpServerEndPoint()) { @@ -556,7 +584,7 @@ public class IPCLoggerChannel implements AsyncLogger { @Override public ListenableFuture acceptRecovery( final SegmentStateProto log, final URL url) { - return executor.submit(new Callable() { + return singleThreadExecutor.submit(new Callable() { @Override public Void call() throws IOException { getProxy().acceptRecovery(createReqInfo(), log, url); @@ -567,7 +595,7 @@ public class IPCLoggerChannel implements AsyncLogger { @Override public ListenableFuture doPreUpgrade() { - return executor.submit(new Callable() { + return singleThreadExecutor.submit(new Callable() { @Override public Void call() throws IOException { getProxy().doPreUpgrade(journalId); @@ -578,7 +606,7 @@ public class IPCLoggerChannel implements AsyncLogger { @Override public ListenableFuture doUpgrade(final StorageInfo sInfo) { - return executor.submit(new Callable() { + return singleThreadExecutor.submit(new Callable() { @Override public Void call() throws IOException { getProxy().doUpgrade(journalId, sInfo); @@ -589,7 +617,7 @@ public class IPCLoggerChannel implements AsyncLogger { @Override public ListenableFuture doFinalize() { - return executor.submit(new Callable() { + return singleThreadExecutor.submit(new Callable() { @Override public Void call() throws IOException { getProxy().doFinalize(journalId); @@ -601,7 +629,7 @@ public class IPCLoggerChannel implements AsyncLogger { @Override public ListenableFuture canRollBack(final StorageInfo storage, final StorageInfo prevStorage, final int targetLayoutVersion) { - return executor.submit(new Callable() { + return singleThreadExecutor.submit(new Callable() { @Override public Boolean call() throws IOException { return getProxy().canRollBack(journalId, storage, prevStorage, @@ -612,7 +640,7 @@ public class IPCLoggerChannel implements AsyncLogger { @Override public ListenableFuture doRollback() { - return executor.submit(new Callable() { + return singleThreadExecutor.submit(new Callable() { @Override public Void call() throws IOException { getProxy().doRollback(journalId); @@ -623,7 +651,7 @@ public class IPCLoggerChannel implements AsyncLogger { @Override public ListenableFuture discardSegments(final long startTxId) { - return executor.submit(new Callable() { + return singleThreadExecutor.submit(new Callable() { @Override public Void call() throws IOException { getProxy().discardSegments(journalId, startTxId); @@ -634,7 +662,7 @@ public class IPCLoggerChannel implements AsyncLogger { @Override public ListenableFuture getJournalCTime() { - return executor.submit(new Callable() { + return singleThreadExecutor.submit(new Callable() { @Override public Long call() throws IOException { return getProxy().getJournalCTime(journalId); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java index 1ffe6f7def0..b36e547056e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java @@ -651,7 +651,8 @@ public class Journal implements Closeable { } } if (log != null && log.isInProgress()) { - logs.add(new RemoteEditLog(log.getStartTxId(), getHighestWrittenTxId())); + logs.add(new RemoteEditLog(log.getStartTxId(), getHighestWrittenTxId(), + true)); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java index 8e65dd0b548..b7f688dca4d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java @@ -201,6 +201,20 @@ public class BlockPoolSliceStorage extends Storage { writeProperties(bpSdir); } + /** + * Remove storage directories. + * @param storageDirs a set of storage directories to be removed. + */ + void removeVolumes(Set storageDirs) { + for (Iterator it = this.storageDirs.iterator(); + it.hasNext(); ) { + StorageDirectory sd = it.next(); + if (storageDirs.contains(sd.getRoot())) { + it.remove(); + } + } + } + /** * Set layoutVersion, namespaceID and blockpoolID into block pool storage * VERSION file @@ -255,7 +269,14 @@ public class BlockPoolSliceStorage extends Storage { */ private void doTransition(DataNode datanode, StorageDirectory sd, NamespaceInfo nsInfo, StartupOption startOpt) throws IOException { - if (startOpt == StartupOption.ROLLBACK) { + if (startOpt == StartupOption.ROLLBACK && sd.getPreviousDir().exists()) { + // we will already restore everything in the trash by rolling back to + // the previous directory, so we must delete the trash to ensure + // that it's not restored by BPOfferService.signalRollingUpgrade() + if (!FileUtil.fullyDelete(getTrashRootDir(sd))) { + throw new IOException("Unable to delete trash directory prior to " + + "restoration of previous directory: " + getTrashRootDir(sd)); + } doRollback(sd, nsInfo); // rollback if applicable } else { // Restore all the files in the trash. The restored files are retained diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java index afa8bbba481..bfb22331250 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java @@ -825,15 +825,17 @@ class BlockReceiver implements Closeable { LOG.warn("Failed to delete restart meta file: " + restartMeta.getPath()); } + FileWriter out = null; try { - FileWriter out = new FileWriter(restartMeta); + out = new FileWriter(restartMeta); // write out the current time. out.write(Long.toString(Time.now() + restartBudget)); out.flush(); - out.close(); } catch (IOException ioe) { // The worst case is not recovering this RBW replica. // Client will fall back to regular pipeline recovery. + } finally { + IOUtils.cleanup(LOG, out); } try { // Even if the connection is closed after the ack packet is diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index df50eabacb7..381062171b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -180,6 +180,7 @@ import org.apache.hadoop.util.ServicePlugin; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.VersionInfo; +import org.apache.hadoop.tracing.SpanReceiverHost; import org.mortbay.util.ajax.JSON; import com.google.common.annotations.VisibleForTesting; @@ -243,10 +244,9 @@ public class DataNode extends Configured LogFactory.getLog(DataNode.class.getName() + ".clienttrace"); private static final String USAGE = - "Usage: java DataNode [-regular | -rollback | -rollingupgrade rollback]\n" + + "Usage: java DataNode [-regular | -rollback]\n" + " -regular : Normal DataNode startup (default).\n" + - " -rollback : Rollback a standard upgrade.\n" + - " -rollingupgrade rollback : Rollback a rolling upgrade operation.\n" + + " -rollback : Rollback a standard or rolling upgrade.\n" + " Refer to HDFS documentation for the difference between standard\n" + " and rolling upgrades."; @@ -326,6 +326,8 @@ public class DataNode extends Configured private boolean isPermissionEnabled; private String dnUserName = null; + private SpanReceiverHost spanReceiverHost; + /** * Create the DataNode given a configuration, an array of dataDirs, * and a namenode proxy @@ -823,6 +825,7 @@ public class DataNode extends Configured this.dataDirs = dataDirs; this.conf = conf; this.dnConf = new DNConf(conf); + this.spanReceiverHost = SpanReceiverHost.getInstance(conf); if (dnConf.maxLockedMemory > 0) { if (!NativeIO.POSIX.getCacheManipulator().verifyCanMlock()) { @@ -1510,6 +1513,9 @@ public class DataNode extends Configured MBeans.unregister(dataNodeInfoBeanName); dataNodeInfoBeanName = null; } + if (this.spanReceiverHost != null) { + this.spanReceiverHost.closeReceivers(); + } if (shortCircuitRegistry != null) shortCircuitRegistry.shutdown(); LOG.info("Shutdown complete."); synchronized(this) { @@ -1738,7 +1744,7 @@ public class DataNode extends Configured + b + " (numBytes=" + b.getNumBytes() + ")" + ", stage=" + stage + ", clientname=" + clientname - + ", targests=" + Arrays.asList(targets)); + + ", targets=" + Arrays.asList(targets)); } this.targets = targets; this.targetStorageTypes = targetStorageTypes; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java index 4b9656eb8e9..ceb2aa07953 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java @@ -336,6 +336,33 @@ public class DataStorage extends Storage { } } + /** + * Remove volumes from DataStorage. + * @param locations a collection of volumes. + */ + synchronized void removeVolumes(Collection locations) { + if (locations.isEmpty()) { + return; + } + + Set dataDirs = new HashSet(); + for (StorageLocation sl : locations) { + dataDirs.add(sl.getFile()); + } + + for (BlockPoolSliceStorage bpsStorage : this.bpStorageMap.values()) { + bpsStorage.removeVolumes(dataDirs); + } + + for (Iterator it = this.storageDirs.iterator(); + it.hasNext(); ) { + StorageDirectory sd = it.next(); + if (dataDirs.contains(sd.getRoot())) { + it.remove(); + } + } + } + /** * Analyze storage directories. * Recover from previous transitions if required. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java index a64f9c0d589..0fbfe190869 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java @@ -97,6 +97,9 @@ public interface FsDatasetSpi extends FSDatasetMBean { public void addVolumes(Collection volumes) throws IOException; + /** Removes a collection of volumes from FsDataset. */ + public void removeVolumes(Collection volumes); + /** @return a storage with the given storage ID */ public DatanodeStorage getStorage(final String storageUuid); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java index af467b93f09..57744073c2a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java @@ -340,7 +340,7 @@ class BlockPoolSlice { loadRwr = false; } sc.close(); - if (restartMeta.delete()) { + if (!restartMeta.delete()) { FsDatasetImpl.LOG.warn("Failed to delete restart meta file: " + restartMeta.getPath()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java index 539e97be4a7..bee7bf70c3e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java @@ -118,6 +118,24 @@ class FsDatasetAsyncDiskService { } addExecutorForVolume(volume); } + + /** + * Stops AsyncDiskService for a volume. + * @param volume the root of the volume. + */ + synchronized void removeVolume(File volume) { + if (executors == null) { + throw new RuntimeException("AsyncDiskService is already shutdown"); + } + ThreadPoolExecutor executor = executors.get(volume); + if (executor == null) { + throw new RuntimeException("Can not find volume " + volume + + " to remove."); + } else { + executor.shutdown(); + executors.remove(volume); + } + } synchronized long countPendingDeletions() { long count = 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 148055c6f9e..5306be77140 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -30,9 +30,11 @@ import java.nio.channels.FileChannel; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.Executor; import javax.management.NotCompliantMBeanException; @@ -314,6 +316,51 @@ class FsDatasetImpl implements FsDatasetSpi { } } + /** + * Removes a collection of volumes from FsDataset. + * @param volumes the root directories of the volumes. + * + * DataNode should call this function before calling + * {@link DataStorage#removeVolumes(java.util.Collection)}. + */ + @Override + public synchronized void removeVolumes(Collection volumes) { + Set volumeSet = new HashSet(); + for (StorageLocation sl : volumes) { + volumeSet.add(sl.getFile()); + } + for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) { + Storage.StorageDirectory sd = dataStorage.getStorageDir(idx); + if (volumeSet.contains(sd.getRoot())) { + String volume = sd.getRoot().toString(); + LOG.info("Removing " + volume + " from FsDataset."); + + this.volumes.removeVolume(volume); + storageMap.remove(sd.getStorageUuid()); + asyncDiskService.removeVolume(sd.getCurrentDir()); + + // Removed all replica information for the blocks on the volume. Unlike + // updating the volumeMap in addVolume(), this operation does not scan + // disks. + for (String bpid : volumeMap.getBlockPoolList()) { + List blocks = new ArrayList(); + for (Iterator it = volumeMap.replicas(bpid).iterator(); + it.hasNext(); ) { + ReplicaInfo block = it.next(); + if (block.getVolume().getBasePath().equals(volume)) { + invalidate(bpid, block.getBlockId()); + blocks.add(block); + it.remove(); + } + } + // Delete blocks from the block scanner in batch. + datanode.getBlockScanner().deleteBlocks(bpid, + blocks.toArray(new Block[blocks.size()])); + } + } + } + } + private StorageType getStorageTypeFromLocations( Collection dataLocations, File dir) { for (StorageLocation dataLocation : dataLocations) { @@ -1294,6 +1341,28 @@ class FsDatasetImpl implements FsDatasetSpi { } } + /** + * Invalidate a block but does not delete the actual on-disk block file. + * + * It should only be used for decommissioning disks. + * + * @param bpid the block pool ID. + * @param blockId the ID of the block. + */ + public void invalidate(String bpid, long blockId) { + // If a DFSClient has the replica in its cache of short-circuit file + // descriptors (and the client is using ShortCircuitShm), invalidate it. + // The short-circuit registry is null in the unit tests, because the + // datanode is mock object. + if (datanode.getShortCircuitRegistry() != null) { + datanode.getShortCircuitRegistry().processBlockInvalidation( + new ExtendedBlockId(blockId, bpid)); + + // If the block is cached, start uncaching it. + cacheManager.uncacheBlock(bpid, blockId); + } + } + /** * Asynchronously attempts to cache a single block via {@link FsDatasetCache}. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java index d4f8adc0113..90739c3f413 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java @@ -212,6 +212,25 @@ class FsVolumeList { FsDatasetImpl.LOG.info("Added new volume: " + newVolume.toString()); } + /** + * Dynamically remove volume to the list. + * @param volume the volume to be removed. + */ + synchronized void removeVolume(String volume) { + // Make a copy of volumes to remove one volume. + final List volumeList = new ArrayList(volumes); + for (Iterator it = volumeList.iterator(); it.hasNext(); ) { + FsVolumeImpl fsVolume = it.next(); + if (fsVolume.getBasePath().equals(volume)) { + fsVolume.shutdown(); + it.remove(); + volumes = Collections.unmodifiableList(volumeList); + FsDatasetImpl.LOG.info("Removed volume: " + volume); + break; + } + } + } + void addBlockPool(final String bpid, final Configuration conf) throws IOException { long totalStartTime = Time.monotonicNow(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java index 0f6396658f9..36494374cca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java @@ -147,4 +147,9 @@ class EditLogBackupInputStream extends EditLogInputStream { public void setMaxOpSize(int maxOpSize) { reader.setMaxOpSize(maxOpSize); } + + @Override + public boolean isLocalLog() { + return true; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java index fa25604d306..974860caf55 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java @@ -506,4 +506,9 @@ public class EditLogFileInputStream extends EditLogInputStream { reader.setMaxOpSize(maxOpSize); } } + + @Override + public boolean isLocalLog() { + return log instanceof FileLog; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java index 969668d3d64..ac58616592c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java @@ -203,4 +203,10 @@ public abstract class EditLogInputStream implements Closeable { * Set the maximum opcode size in bytes. */ public abstract void setMaxOpSize(int maxOpSize); + + /** + * Returns true if we are currently reading the log from a local disk or an + * even faster data source (e.g. a byte buffer). + */ + public abstract boolean isLocalLog(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index c7e46586b56..522d2a3488e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -457,7 +457,7 @@ public class FSDirectory implements Closeable { * @see #unprotectedRenameTo(String, String, long, Options.Rename...) */ void renameTo(String src, String dst, long mtime, - Options.Rename... options) + BlocksMapUpdateInfo collectedBlocks, Options.Rename... options) throws FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, QuotaExceededException, UnresolvedLinkException, IOException { @@ -467,7 +467,7 @@ public class FSDirectory implements Closeable { } writeLock(); try { - if (unprotectedRenameTo(src, dst, mtime, options)) { + if (unprotectedRenameTo(src, dst, mtime, collectedBlocks, options)) { namesystem.incrDeletedFileCount(1); } } finally { @@ -574,8 +574,9 @@ public class FSDirectory implements Closeable { /** * Rename src to dst. - * See {@link DistributedFileSystem#rename(Path, Path, Options.Rename...)} - * for details related to rename semantics and exceptions. + *
+ * Note: This is to be used by {@link FSEditLog} only. + *
* * @param src source path * @param dst destination path @@ -583,9 +584,34 @@ public class FSDirectory implements Closeable { * @param options Rename options */ boolean unprotectedRenameTo(String src, String dst, long timestamp, - Options.Rename... options) throws FileAlreadyExistsException, - FileNotFoundException, ParentNotDirectoryException, + Options.Rename... options) throws FileAlreadyExistsException, + FileNotFoundException, ParentNotDirectoryException, QuotaExceededException, UnresolvedLinkException, IOException { + BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); + boolean ret = unprotectedRenameTo(src, dst, timestamp, + collectedBlocks, options); + if (!collectedBlocks.getToDeleteList().isEmpty()) { + getFSNamesystem().removeBlocksAndUpdateSafemodeTotal(collectedBlocks); + } + return ret; + } + + /** + * Rename src to dst. + * See {@link DistributedFileSystem#rename(Path, Path, Options.Rename...)} + * for details related to rename semantics and exceptions. + * + * @param src source path + * @param dst destination path + * @param timestamp modification time + * @param collectedBlocks blocks to be removed + * @param options Rename options + */ + boolean unprotectedRenameTo(String src, String dst, long timestamp, + BlocksMapUpdateInfo collectedBlocks, Options.Rename... options) + throws FileAlreadyExistsException, FileNotFoundException, + ParentNotDirectoryException, QuotaExceededException, + UnresolvedLinkException, IOException { assert hasWriteLock(); boolean overwrite = options != null && Arrays.asList(options).contains (Rename.OVERWRITE); @@ -675,7 +701,6 @@ public class FSDirectory implements Closeable { if (removedDst != null) { undoRemoveDst = false; if (removedNum > 0) { - BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); List removedINodes = new ChunkedArrayList(); if (!removedDst.isInLatestSnapshot(dstIIP.getLatestSnapshotId())) { removedDst.destroyAndCollectBlocks(collectedBlocks, removedINodes); @@ -685,7 +710,7 @@ public class FSDirectory implements Closeable { dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes, true).get(Quota.NAMESPACE) >= 0; } - getFSNamesystem().removePathAndBlocks(src, collectedBlocks, + getFSNamesystem().removePathAndBlocks(src, null, removedINodes, false); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index 7434a06dc6a..c4e05ed666c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -189,6 +189,13 @@ public class FSEditLog implements LogsPurgeable { */ private final List sharedEditsDirs; + /** + * Take this lock when adding journals to or closing the JournalSet. Allows + * us to ensure that the JournalSet isn't closed or updated underneath us + * in selectInputStreams(). + */ + private final Object journalSetLock = new Object(); + private static class TransactionId { public long txid; @@ -253,20 +260,22 @@ public class FSEditLog implements LogsPurgeable { DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY, DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT); - journalSet = new JournalSet(minimumRedundantJournals); + synchronized(journalSetLock) { + journalSet = new JournalSet(minimumRedundantJournals); - for (URI u : dirs) { - boolean required = FSNamesystem.getRequiredNamespaceEditsDirs(conf) - .contains(u); - if (u.getScheme().equals(NNStorage.LOCAL_URI_SCHEME)) { - StorageDirectory sd = storage.getStorageDirectory(u); - if (sd != null) { - journalSet.add(new FileJournalManager(conf, sd, storage), - required, sharedEditsDirs.contains(u)); + for (URI u : dirs) { + boolean required = FSNamesystem.getRequiredNamespaceEditsDirs(conf) + .contains(u); + if (u.getScheme().equals(NNStorage.LOCAL_URI_SCHEME)) { + StorageDirectory sd = storage.getStorageDirectory(u); + if (sd != null) { + journalSet.add(new FileJournalManager(conf, sd, storage), + required, sharedEditsDirs.contains(u)); + } + } else { + journalSet.add(createJournal(u), required, + sharedEditsDirs.contains(u)); } - } else { - journalSet.add(createJournal(u), required, - sharedEditsDirs.contains(u)); } } @@ -350,7 +359,9 @@ public class FSEditLog implements LogsPurgeable { } finally { if (journalSet != null && !journalSet.isEmpty()) { try { - journalSet.close(); + synchronized(journalSetLock) { + journalSet.close(); + } } catch (IOException ioe) { LOG.warn("Error closing journalSet", ioe); } @@ -607,7 +618,9 @@ public class FSEditLog implements LogsPurgeable { "due to " + e.getMessage() + ". " + "Unsynced transactions: " + (txid - synctxid); LOG.fatal(msg, new Exception()); - IOUtils.cleanup(LOG, journalSet); + synchronized(journalSetLock) { + IOUtils.cleanup(LOG, journalSet); + } terminate(1, msg); } } finally { @@ -631,7 +644,9 @@ public class FSEditLog implements LogsPurgeable { "Could not sync enough journals to persistent storage. " + "Unsynced transactions: " + (txid - synctxid); LOG.fatal(msg, new Exception()); - IOUtils.cleanup(LOG, journalSet); + synchronized(journalSetLock) { + IOUtils.cleanup(LOG, journalSet); + } terminate(1, msg); } } @@ -1311,9 +1326,8 @@ public class FSEditLog implements LogsPurgeable { /** * Return the txid of the last synced transaction. - * For test use only */ - synchronized long getSyncTxId() { + public synchronized long getSyncTxId() { return synctxid; } @@ -1350,7 +1364,9 @@ public class FSEditLog implements LogsPurgeable { LOG.info("Registering new backup node: " + bnReg); BackupJournalManager bjm = new BackupJournalManager(bnReg, nnReg); - journalSet.add(bjm, false); + synchronized(journalSetLock) { + journalSet.add(bjm, false); + } } synchronized void releaseBackupStream(NamenodeRegistration registration) @@ -1358,7 +1374,9 @@ public class FSEditLog implements LogsPurgeable { BackupJournalManager bjm = this.findBackupJournal(registration); if (bjm != null) { LOG.info("Removing backup journal " + bjm); - journalSet.remove(bjm); + synchronized(journalSetLock) { + journalSet.remove(bjm); + } } } @@ -1497,11 +1515,16 @@ public class FSEditLog implements LogsPurgeable { * @param recovery recovery context * @param inProgressOk set to true if in-progress streams are OK */ - public synchronized Collection selectInputStreams( + public Collection selectInputStreams( long fromTxId, long toAtLeastTxId, MetaRecoveryContext recovery, boolean inProgressOk) throws IOException { + List streams = new ArrayList(); - selectInputStreams(streams, fromTxId, inProgressOk); + synchronized(journalSetLock) { + Preconditions.checkState(journalSet.isOpen(), "Cannot call " + + "selectInputStreams() on closed FSEditLog"); + selectInputStreams(streams, fromTxId, inProgressOk); + } try { checkForGaps(streams, fromTxId, toAtLeastTxId, inProgressOk); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index b47350d922d..86a08f743ad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -135,8 +135,6 @@ import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.CipherSuite; -import org.apache.hadoop.crypto.CryptoCodec; -import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries; import org.apache.hadoop.fs.CacheFlag; @@ -540,9 +538,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats, private final NNConf nnConf; private KeyProviderCryptoExtension provider = null; - private KeyProvider.Options providerOptions = null; - - private final CryptoCodec codec; private volatile boolean imageLoaded = false; private final Condition cond; @@ -769,8 +764,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } else { LOG.info("Found KeyProvider: " + provider.toString()); } - providerOptions = KeyProvider.options(conf); - this.codec = CryptoCodec.getInstance(conf); if (conf.getBoolean(DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY, DFS_NAMENODE_AUDIT_LOG_ASYNC_DEFAULT)) { LOG.info("Enabling async auditlog"); @@ -3684,12 +3677,14 @@ public class FSNamesystem implements Namesystem, FSClusterStats, HdfsFileStatus resultingStat = null; boolean success = false; writeLock(); + BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot rename " + src); src = resolvePath(src, srcComponents); dst = resolvePath(dst, dstComponents); - renameToInternal(pc, src, dst, cacheEntry != null, options); + renameToInternal(pc, src, dst, cacheEntry != null, + collectedBlocks, options); resultingStat = getAuditFileInfo(dst, false); success = true; } finally { @@ -3697,6 +3692,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats, RetryCache.setState(cacheEntry, success); } getEditLog().logSync(); + if (!collectedBlocks.getToDeleteList().isEmpty()) { + removeBlocks(collectedBlocks); + collectedBlocks.clear(); + } if (resultingStat != null) { StringBuilder cmd = new StringBuilder("rename options="); for (Rename option : options) { @@ -3706,8 +3705,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } } - private void renameToInternal(FSPermissionChecker pc, String src, String dst, - boolean logRetryCache, Options.Rename... options) throws IOException { + private void renameToInternal(FSPermissionChecker pc, String src, + String dst, boolean logRetryCache, BlocksMapUpdateInfo collectedBlocks, + Options.Rename... options) throws IOException { assert hasWriteLock(); if (isPermissionEnabled) { // Rename does not operates on link targets @@ -3722,7 +3722,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, waitForLoadingFSImage(); long mtime = now(); - dir.renameTo(src, dst, mtime, options); + dir.renameTo(src, dst, mtime, collectedBlocks, options); getEditLog().logRename(src, dst, mtime, logRetryCache, options); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java index 362c316cc2c..6001db5ccea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java @@ -187,17 +187,27 @@ public class FileJournalManager implements JournalManager { List allLogFiles = matchEditLogs(currentDir); List ret = Lists.newArrayListWithCapacity( allLogFiles.size()); - for (EditLogFile elf : allLogFiles) { if (elf.hasCorruptHeader() || (!inProgressOk && elf.isInProgress())) { continue; } + if (elf.isInProgress()) { + try { + elf.validateLog(); + } catch (IOException e) { + LOG.error("got IOException while trying to validate header of " + + elf + ". Skipping.", e); + continue; + } + } if (elf.getFirstTxId() >= firstTxId) { - ret.add(new RemoteEditLog(elf.firstTxId, elf.lastTxId)); + ret.add(new RemoteEditLog(elf.firstTxId, elf.lastTxId, + elf.isInProgress())); } else if (elf.getFirstTxId() < firstTxId && firstTxId <= elf.getLastTxId()) { // If the firstTxId is in the middle of an edit log segment. Return this // anyway and let the caller figure out whether it wants to use it. - ret.add(new RemoteEditLog(elf.firstTxId, elf.lastTxId)); + ret.add(new RemoteEditLog(elf.firstTxId, elf.lastTxId, + elf.isInProgress())); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/InotifyFSEditLogOpTranslator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/InotifyFSEditLogOpTranslator.java new file mode 100644 index 00000000000..676f8874cf0 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/InotifyFSEditLogOpTranslator.java @@ -0,0 +1,146 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.namenode; + +import com.google.common.collect.Lists; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hdfs.inotify.Event; +import org.apache.hadoop.hdfs.protocol.Block; + +import java.util.List; + +/** + * Translates from edit log ops to inotify events. + */ +@InterfaceAudience.Private +public class InotifyFSEditLogOpTranslator { + + private static long getSize(FSEditLogOp.AddCloseOp acOp) { + long size = 0; + for (Block b : acOp.getBlocks()) { + size += b.getNumBytes(); + } + return size; + } + + public static Event[] translate(FSEditLogOp op) { + switch(op.opCode) { + case OP_ADD: + FSEditLogOp.AddOp addOp = (FSEditLogOp.AddOp) op; + if (addOp.blocks.length == 0) { // create + return new Event[] { new Event.CreateEvent.Builder().path(addOp.path) + .ctime(addOp.atime) + .replication(addOp.replication) + .ownerName(addOp.permissions.getUserName()) + .groupName(addOp.permissions.getGroupName()) + .perms(addOp.permissions.getPermission()) + .iNodeType(Event.CreateEvent.INodeType.FILE).build() }; + } else { + return new Event[] { new Event.AppendEvent(addOp.path) }; + } + case OP_CLOSE: + FSEditLogOp.CloseOp cOp = (FSEditLogOp.CloseOp) op; + return new Event[] { + new Event.CloseEvent(cOp.path, getSize(cOp), cOp.mtime) }; + case OP_SET_REPLICATION: + FSEditLogOp.SetReplicationOp setRepOp = (FSEditLogOp.SetReplicationOp) op; + return new Event[] { new Event.MetadataUpdateEvent.Builder() + .metadataType(Event.MetadataUpdateEvent.MetadataType.REPLICATION) + .path(setRepOp.path) + .replication(setRepOp.replication).build() }; + case OP_CONCAT_DELETE: + FSEditLogOp.ConcatDeleteOp cdOp = (FSEditLogOp.ConcatDeleteOp) op; + List events = Lists.newArrayList(); + events.add(new Event.AppendEvent(cdOp.trg)); + for (String src : cdOp.srcs) { + events.add(new Event.UnlinkEvent(src, cdOp.timestamp)); + } + events.add(new Event.CloseEvent(cdOp.trg, -1, cdOp.timestamp)); + return events.toArray(new Event[0]); + case OP_RENAME_OLD: + FSEditLogOp.RenameOldOp rnOpOld = (FSEditLogOp.RenameOldOp) op; + return new Event[] { + new Event.RenameEvent(rnOpOld.src, rnOpOld.dst, rnOpOld.timestamp) }; + case OP_RENAME: + FSEditLogOp.RenameOp rnOp = (FSEditLogOp.RenameOp) op; + return new Event[] { + new Event.RenameEvent(rnOp.src, rnOp.dst, rnOp.timestamp) }; + case OP_DELETE: + FSEditLogOp.DeleteOp delOp = (FSEditLogOp.DeleteOp) op; + return new Event[] { new Event.UnlinkEvent(delOp.path, delOp.timestamp) }; + case OP_MKDIR: + FSEditLogOp.MkdirOp mkOp = (FSEditLogOp.MkdirOp) op; + return new Event[] { new Event.CreateEvent.Builder().path(mkOp.path) + .ctime(mkOp.timestamp) + .ownerName(mkOp.permissions.getUserName()) + .groupName(mkOp.permissions.getGroupName()) + .perms(mkOp.permissions.getPermission()) + .iNodeType(Event.CreateEvent.INodeType.DIRECTORY).build() }; + case OP_SET_PERMISSIONS: + FSEditLogOp.SetPermissionsOp permOp = (FSEditLogOp.SetPermissionsOp) op; + return new Event[] { new Event.MetadataUpdateEvent.Builder() + .metadataType(Event.MetadataUpdateEvent.MetadataType.PERMS) + .path(permOp.src) + .perms(permOp.permissions).build() }; + case OP_SET_OWNER: + FSEditLogOp.SetOwnerOp ownOp = (FSEditLogOp.SetOwnerOp) op; + return new Event[] { new Event.MetadataUpdateEvent.Builder() + .metadataType(Event.MetadataUpdateEvent.MetadataType.OWNER) + .path(ownOp.src) + .ownerName(ownOp.username).groupName(ownOp.groupname).build() }; + case OP_TIMES: + FSEditLogOp.TimesOp timesOp = (FSEditLogOp.TimesOp) op; + return new Event[] { new Event.MetadataUpdateEvent.Builder() + .metadataType(Event.MetadataUpdateEvent.MetadataType.TIMES) + .path(timesOp.path) + .atime(timesOp.atime).mtime(timesOp.mtime).build() }; + case OP_SYMLINK: + FSEditLogOp.SymlinkOp symOp = (FSEditLogOp.SymlinkOp) op; + return new Event[] { new Event.CreateEvent.Builder().path(symOp.path) + .ctime(symOp.atime) + .ownerName(symOp.permissionStatus.getUserName()) + .groupName(symOp.permissionStatus.getGroupName()) + .perms(symOp.permissionStatus.getPermission()) + .symlinkTarget(symOp.value) + .iNodeType(Event.CreateEvent.INodeType.SYMLINK).build() }; + case OP_REMOVE_XATTR: + FSEditLogOp.RemoveXAttrOp rxOp = (FSEditLogOp.RemoveXAttrOp) op; + return new Event[] { new Event.MetadataUpdateEvent.Builder() + .metadataType(Event.MetadataUpdateEvent.MetadataType.XATTRS) + .path(rxOp.src) + .xAttrs(rxOp.xAttrs) + .xAttrsRemoved(true).build() }; + case OP_SET_XATTR: + FSEditLogOp.SetXAttrOp sxOp = (FSEditLogOp.SetXAttrOp) op; + return new Event[] { new Event.MetadataUpdateEvent.Builder() + .metadataType(Event.MetadataUpdateEvent.MetadataType.XATTRS) + .path(sxOp.src) + .xAttrs(sxOp.xAttrs) + .xAttrsRemoved(false).build() }; + case OP_SET_ACL: + FSEditLogOp.SetAclOp saOp = (FSEditLogOp.SetAclOp) op; + return new Event[] { new Event.MetadataUpdateEvent.Builder() + .metadataType(Event.MetadataUpdateEvent.MetadataType.ACLS) + .path(saOp.src) + .acls(saOp.aclEntries).build() }; + default: + return null; + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java index 4e5bc666745..667b2e01ce8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java @@ -56,6 +56,17 @@ import com.google.common.collect.Sets; public class JournalSet implements JournalManager { static final Log LOG = LogFactory.getLog(FSEditLog.class); + + private static final Comparator + LOCAL_LOG_PREFERENCE_COMPARATOR = new Comparator() { + @Override + public int compare(EditLogInputStream elis1, EditLogInputStream elis2) { + // we want local logs to be ordered earlier in the collection, and true + // is considered larger than false, so we want to invert the booleans here + return ComparisonChain.start().compare(!elis1.isLocalLog(), + !elis2.isLocalLog()).result(); + } + }; static final public Comparator EDIT_LOG_INPUT_STREAM_COMPARATOR = new Comparator() { @@ -180,6 +191,8 @@ public class JournalSet implements JournalManager { private final List journals = new CopyOnWriteArrayList(); final int minimumRedundantJournals; + + private boolean closed; JournalSet(int minimumRedundantResources) { this.minimumRedundantJournals = minimumRedundantResources; @@ -233,6 +246,11 @@ public class JournalSet implements JournalManager { jas.close(); } }, "close journal"); + closed = true; + } + + public boolean isOpen() { + return !closed; } /** @@ -281,10 +299,25 @@ public class JournalSet implements JournalManager { if (acc.isEmpty()) { acc.add(elis); } else { - long accFirstTxId = acc.get(0).getFirstTxId(); + EditLogInputStream accFirst = acc.get(0); + long accFirstTxId = accFirst.getFirstTxId(); if (accFirstTxId == elis.getFirstTxId()) { - acc.add(elis); + // if we have a finalized log segment available at this txid, + // we should throw out all in-progress segments at this txid + if (elis.isInProgress()) { + if (accFirst.isInProgress()) { + acc.add(elis); + } + } else { + if (accFirst.isInProgress()) { + acc.clear(); + } + acc.add(elis); + } } else if (accFirstTxId < elis.getFirstTxId()) { + // try to read from the local logs first since the throughput should + // be higher + Collections.sort(acc, LOCAL_LOG_PREFERENCE_COMPARATOR); outStreams.add(new RedundantEditLogInputStream(acc, fromTxId)); acc.clear(); acc.add(elis); @@ -296,6 +329,7 @@ public class JournalSet implements JournalManager { } } if (!acc.isEmpty()) { + Collections.sort(acc, LOCAL_LOG_PREFERENCE_COMPARATOR); outStreams.add(new RedundantEditLogInputStream(acc, fromTxId)); acc.clear(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 4072b1720d7..bcb5a8697d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -60,6 +60,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol; import org.apache.hadoop.ipc.RefreshCallQueueProtocol; import org.apache.hadoop.tools.GetUserMappingsProtocol; +import org.apache.hadoop.tracing.SpanReceiverHost; import org.apache.hadoop.util.ExitUtil.ExitException; import org.apache.hadoop.util.GenericOptionsParser; import org.apache.hadoop.util.JvmPauseMonitor; @@ -278,6 +279,7 @@ public class NameNode implements NameNodeStatusMXBean { private JvmPauseMonitor pauseMonitor; private ObjectName nameNodeStatusBeanName; + private SpanReceiverHost spanReceiverHost; /** * The namenode address that clients will use to access this namenode * or the name service. For HA configurations using logical URI, it @@ -586,6 +588,9 @@ public class NameNode implements NameNodeStatusMXBean { if (NamenodeRole.NAMENODE == role) { startHttpServer(conf); } + + this.spanReceiverHost = SpanReceiverHost.getInstance(conf); + loadNamesystem(conf); rpcServer = createRpcServer(conf); @@ -822,6 +827,9 @@ public class NameNode implements NameNodeStatusMXBean { MBeans.unregister(nameNodeStatusBeanName); nameNodeStatusBeanName = null; } + if (this.spanReceiverHost != null) { + this.spanReceiverHost.closeReceivers(); + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index b55cec11990..ea43e86cffb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -34,6 +34,7 @@ import java.util.HashSet; import java.util.List; import java.util.Set; +import com.google.common.collect.Lists; import org.apache.commons.logging.Log; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; @@ -66,6 +67,8 @@ import org.apache.hadoop.ha.protocolPB.HAServiceProtocolServerSideTranslatorPB; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HDFSPolicyProvider; +import org.apache.hadoop.hdfs.inotify.Event; +import org.apache.hadoop.hdfs.inotify.EventsList; import org.apache.hadoop.hdfs.protocol.AclException; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; @@ -1079,6 +1082,7 @@ class NameNodeRpcServer implements NamenodeProtocols { } if (nn.getFSImage().isUpgradeFinalized() && + !namesystem.isRollingUpgrade() && !nn.isStandbyState() && noStaleStorages) { return new FinalizeCommand(poolId); @@ -1476,5 +1480,116 @@ class NameNodeRpcServer implements NamenodeProtocols { public void checkAccess(String path, FsAction mode) throws IOException { namesystem.checkAccess(path, mode); } + + @Override // ClientProtocol + public long getCurrentEditLogTxid() throws IOException { + namesystem.checkOperation(OperationCategory.READ); // only active + namesystem.checkSuperuserPrivilege(); + // if it's not yet open for write, we may be in the process of transitioning + // from standby to active and may not yet know what the latest committed + // txid is + return namesystem.getEditLog().isOpenForWrite() ? + namesystem.getEditLog().getLastWrittenTxId() : -1; + } + + private static FSEditLogOp readOp(EditLogInputStream elis) + throws IOException { + try { + return elis.readOp(); + // we can get the below two exceptions if a segment is deleted + // (because we have accumulated too many edits) or (for the local journal/ + // no-QJM case only) if a in-progress segment is finalized under us ... + // no need to throw an exception back to the client in this case + } catch (FileNotFoundException e) { + LOG.debug("Tried to read from deleted or moved edit log segment", e); + return null; + } catch (TransferFsImage.HttpGetFailedException e) { + LOG.debug("Tried to read from deleted edit log segment", e); + return null; + } + } + + @Override // ClientProtocol + public EventsList getEditsFromTxid(long txid) throws IOException { + namesystem.checkOperation(OperationCategory.READ); // only active + namesystem.checkSuperuserPrivilege(); + int maxEventsPerRPC = nn.conf.getInt( + DFSConfigKeys.DFS_NAMENODE_INOTIFY_MAX_EVENTS_PER_RPC_KEY, + DFSConfigKeys.DFS_NAMENODE_INOTIFY_MAX_EVENTS_PER_RPC_DEFAULT); + FSEditLog log = namesystem.getFSImage().getEditLog(); + long syncTxid = log.getSyncTxId(); + // If we haven't synced anything yet, we can only read finalized + // segments since we can't reliably determine which txns in in-progress + // segments have actually been committed (e.g. written to a quorum of JNs). + // If we have synced txns, we can definitely read up to syncTxid since + // syncTxid is only updated after a transaction is committed to all + // journals. (In-progress segments written by old writers are already + // discarded for us, so if we read any in-progress segments they are + // guaranteed to have been written by this NameNode.) + boolean readInProgress = syncTxid > 0; + + List events = Lists.newArrayList(); + long maxSeenTxid = -1; + long firstSeenTxid = -1; + + if (syncTxid > 0 && txid > syncTxid) { + // we can't read past syncTxid, so there's no point in going any further + return new EventsList(events, firstSeenTxid, maxSeenTxid, syncTxid); + } + + Collection streams = null; + try { + streams = log.selectInputStreams(txid, 0, null, readInProgress); + } catch (IllegalStateException e) { // can happen if we have + // transitioned out of active and haven't yet transitioned to standby + // and are using QJM -- the edit log will be closed and this exception + // will result + LOG.info("NN is transitioning from active to standby and FSEditLog " + + "is closed -- could not read edits"); + return new EventsList(events, firstSeenTxid, maxSeenTxid, syncTxid); + } + + boolean breakOuter = false; + for (EditLogInputStream elis : streams) { + // our assumption in this code is the EditLogInputStreams are ordered by + // starting txid + try { + FSEditLogOp op = null; + while ((op = readOp(elis)) != null) { + // break out of here in the unlikely event that syncTxid is so + // out of date that its segment has already been deleted, so the first + // txid we get is greater than syncTxid + if (syncTxid > 0 && op.getTransactionId() > syncTxid) { + breakOuter = true; + break; + } + + Event[] eventsFromOp = InotifyFSEditLogOpTranslator.translate(op); + if (eventsFromOp != null) { + events.addAll(Arrays.asList(eventsFromOp)); + } + if (op.getTransactionId() > maxSeenTxid) { + maxSeenTxid = op.getTransactionId(); + } + if (firstSeenTxid == -1) { + firstSeenTxid = op.getTransactionId(); + } + if (events.size() >= maxEventsPerRPC || (syncTxid > 0 && + op.getTransactionId() == syncTxid)) { + // we're done + breakOuter = true; + break; + } + } + } finally { + elis.close(); + } + if (breakOuter) { + break; + } + } + + return new EventsList(events, firstSeenTxid, maxSeenTxid, syncTxid); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java index 7c642c06961..674a9574499 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java @@ -279,4 +279,9 @@ class RedundantEditLogInputStream extends EditLogInputStream { elis.setMaxOpSize(maxOpSize); } } + + @Override + public boolean isLocalLog() { + return streams[curIdx].isLocalLog(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java index 242c7d75461..160371a646e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java @@ -63,7 +63,7 @@ import org.apache.http.client.utils.URIBuilder; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; - +import org.mortbay.jetty.EofException; /** * This class provides fetching a specified file from the NameNode. @@ -370,6 +370,9 @@ public class TransferFsImage { throttler.throttle(num, canceler); } } + } catch (EofException e) { + LOG.info("Connection closed by client"); + out = null; // so we don't close in the finally } finally { if (out != null) { out.close(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java index e067de7b4ad..5fd31a920cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java @@ -33,6 +33,7 @@ import org.apache.hadoop.net.unix.DomainSocket; import com.google.common.base.Preconditions; import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; +import org.apache.hadoop.util.PerformanceAdvisory; public class DomainSocketFactory { private static final Log LOG = LogFactory.getLog(DomainSocketFactory.class); @@ -105,7 +106,8 @@ public class DomainSocketFactory { } if (feature == null) { - LOG.debug("Both short-circuit local reads and UNIX domain socket are disabled."); + PerformanceAdvisory.LOG.debug( + "Both short-circuit local reads and UNIX domain socket are disabled."); } else { if (conf.getDomainSocketPath().isEmpty()) { throw new HadoopIllegalArgumentException(feature + " is enabled but " diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index 670b83ff7e1..082e5bd8145 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -33,6 +33,7 @@ import "hdfs.proto"; import "acl.proto"; import "xattr.proto"; import "encryption.proto"; +import "inotify.proto"; /** * The ClientNamenodeProtocol Service defines the interface between a client @@ -672,6 +673,21 @@ message CheckAccessRequestProto { message CheckAccessResponseProto { // void response } +message GetCurrentEditLogTxidRequestProto { +} + +message GetCurrentEditLogTxidResponseProto { + required int64 txid = 1; +} + +message GetEditsFromTxidRequestProto { + required int64 txid = 1; +} + +message GetEditsFromTxidResponseProto { + required EventsListProto eventsList = 1; +} + service ClientNamenodeProtocol { rpc getBlockLocations(GetBlockLocationsRequestProto) returns(GetBlockLocationsResponseProto); @@ -811,4 +827,8 @@ service ClientNamenodeProtocol { returns(ListEncryptionZonesResponseProto); rpc getEZForPath(GetEZForPathRequestProto) returns(GetEZForPathResponseProto); + rpc getCurrentEditLogTxid(GetCurrentEditLogTxidRequestProto) + returns(GetCurrentEditLogTxidResponseProto); + rpc getEditsFromTxid(GetEditsFromTxidRequestProto) + returns(GetEditsFromTxidResponseProto); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/inotify.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/inotify.proto new file mode 100644 index 00000000000..b58bfcc3b5f --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/inotify.proto @@ -0,0 +1,117 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * These .proto interfaces are private and stable. + * Please see http://wiki.apache.org/hadoop/Compatibility + * for what changes are allowed for a *stable* .proto interface. + */ + +// This file contains protocol buffers used to communicate edits to clients +// as part of the inotify system. + +option java_package = "org.apache.hadoop.hdfs.protocol.proto"; +option java_outer_classname = "InotifyProtos"; +option java_generate_equals_and_hash = true; +package hadoop.hdfs; + +import "acl.proto"; +import "xattr.proto"; +import "hdfs.proto"; + +enum EventType { + EVENT_CREATE = 0x0; + EVENT_CLOSE = 0x1; + EVENT_APPEND = 0x2; + EVENT_RENAME = 0x3; + EVENT_METADATA = 0x4; + EVENT_UNLINK = 0x5; +} + +message EventProto { + required EventType type = 1; + required bytes contents = 2; +} + +enum INodeType { + I_TYPE_FILE = 0x0; + I_TYPE_DIRECTORY = 0x1; + I_TYPE_SYMLINK = 0x2; +} + +enum MetadataUpdateType { + META_TYPE_TIMES = 0x0; + META_TYPE_REPLICATION = 0x1; + META_TYPE_OWNER = 0x2; + META_TYPE_PERMS = 0x3; + META_TYPE_ACLS = 0x4; + META_TYPE_XATTRS = 0x5; +} + +message CreateEventProto { + required INodeType type = 1; + required string path = 2; + required int64 ctime = 3; + required string ownerName = 4; + required string groupName = 5; + required FsPermissionProto perms = 6; + optional int32 replication = 7; + optional string symlinkTarget = 8; +} + +message CloseEventProto { + required string path = 1; + required int64 fileSize = 2; + required int64 timestamp = 3; +} + +message AppendEventProto { + required string path = 1; +} + +message RenameEventProto { + required string srcPath = 1; + required string destPath = 2; + required int64 timestamp = 3; +} + +message MetadataUpdateEventProto { + required string path = 1; + required MetadataUpdateType type = 2; + optional int64 mtime = 3; + optional int64 atime = 4; + optional int32 replication = 5; + optional string ownerName = 6; + optional string groupName = 7; + optional FsPermissionProto perms = 8; + repeated AclEntryProto acls = 9; + repeated XAttrProto xAttrs = 10; + optional bool xAttrsRemoved = 11; +} + +message UnlinkEventProto { + required string path = 1; + required int64 timestamp = 2; +} + +message EventsListProto { + repeated EventProto events = 1; + required int64 firstTxid = 2; + required int64 lastTxid = 3; + required int64 syncTxid = 4; +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index efbfc376d75..2cd547c50e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -524,6 +524,28 @@ + + dfs.client.block.write.replace-datanode-on-failure.best-effort + false + + This property is used only if the value of + dfs.client.block.write.replace-datanode-on-failure.enable is true. + + Best effort means that the client will try to replace a failed datanode + in write pipeline (provided that the policy is satisfied), however, it + continues the write operation in case that the datanode replacement also + fails. + + Suppose the datanode replacement fails. + false: An exception should be thrown so that the write will fail. + true : The write should be resumed with the remaining datandoes. + + Note that setting this property to true allows writing to a pipeline + with a smaller number of datanodes. As a result, it increases the + probability of data loss. + + + dfs.blockreport.intervalMsec 21600000 @@ -2067,4 +2089,14 @@ + + dfs.namenode.inotify.max.events.per.rpc + 1000 + Maximum number of events that will be sent to an inotify client + in a single RPC response. The default value attempts to amortize away + the overhead for this RPC while avoiding huge memory requirements for the + client and NameNode (1000 events should consume no more than 1 MB.) + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml b/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml index c369f3bd735..61d7d067f91 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml @@ -206,7 +206,7 @@

  • Restore the pre-upgrade release in all machines.
  • Start NNs with the "-rollingUpgrade rollback" option.
  • -
  • Start DNs normally.
  • +
  • Start DNs with the "-rollback" option.
  • diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java index dccc581d689..50e2e5b0a34 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java @@ -37,7 +37,8 @@ import org.junit.*; public class TestGlobPaths { private static final UserGroupInformation unprivilegedUser = - UserGroupInformation.createRemoteUser("myuser"); + UserGroupInformation.createUserForTesting("myuser", + new String[] { "mygroup" }); static class RegexPathFilter implements PathFilter { @@ -55,9 +56,9 @@ public class TestGlobPaths { static private MiniDFSCluster dfsCluster; static private FileSystem fs; - static private FileSystem unprivilegedFs; + static private FileSystem privilegedFs; static private FileContext fc; - static private FileContext unprivilegedFc; + static private FileContext privilegedFc; static final private int NUM_OF_PATHS = 4; static private String USER_DIR; private final Path[] path = new Path[NUM_OF_PATHS]; @@ -66,22 +67,15 @@ public class TestGlobPaths { public static void setUp() throws Exception { final Configuration conf = new HdfsConfiguration(); dfsCluster = new MiniDFSCluster.Builder(conf).build(); + + privilegedFs = FileSystem.get(conf); + privilegedFc = FileContext.getFileContext(conf); + // allow unpriviledged user ability to create paths + privilegedFs.setPermission(new Path("/"), + FsPermission.createImmutable((short)0777)); + UserGroupInformation.setLoginUser(unprivilegedUser); fs = FileSystem.get(conf); - unprivilegedFs = - unprivilegedUser.doAs(new PrivilegedExceptionAction() { - @Override - public FileSystem run() throws IOException { - return FileSystem.get(conf); - } - }); fc = FileContext.getFileContext(conf); - unprivilegedFc = - unprivilegedUser.doAs(new PrivilegedExceptionAction() { - @Override - public FileContext run() throws IOException { - return FileContext.getFileContext(conf); - } - }); USER_DIR = fs.getHomeDirectory().toUri().getPath().toString(); } @@ -443,8 +437,8 @@ public class TestGlobPaths { String[] files = new String[] { USER_DIR + "/a", USER_DIR + "/a/b" }; Path[] matchedPath = prepareTesting(USER_DIR + "/*/*", files, new RegexPathFilter("^.*" + Pattern.quote(USER_DIR) + "/a/b")); - assertEquals(matchedPath.length, 1); - assertEquals(matchedPath[0], path[1]); + assertEquals(1, matchedPath.length); + assertEquals(path[1], matchedPath[0]); } finally { cleanupDFS(); } @@ -793,9 +787,21 @@ public class TestGlobPaths { /** * A glob test that can be run on either FileContext or FileSystem. */ - private static interface FSTestWrapperGlobTest { - void run(FSTestWrapper wrap, FSTestWrapper unprivilegedWrapper, - FileSystem fs, FileContext fc) throws Exception; + private abstract class FSTestWrapperGlobTest { + FSTestWrapperGlobTest(boolean useFc) { + if (useFc) { + this.privWrap = new FileContextTestWrapper(privilegedFc); + this.wrap = new FileContextTestWrapper(fc); + } else { + this.privWrap = new FileSystemTestWrapper(privilegedFs); + this.wrap = new FileSystemTestWrapper(fs); + } + } + + abstract void run() throws Exception; + + final FSTestWrapper privWrap; + final FSTestWrapper wrap; } /** @@ -804,8 +810,7 @@ public class TestGlobPaths { private void testOnFileSystem(FSTestWrapperGlobTest test) throws Exception { try { fc.mkdir(new Path(USER_DIR), FsPermission.getDefault(), true); - test.run(new FileSystemTestWrapper(fs), - new FileSystemTestWrapper(unprivilegedFs), fs, null); + test.run(); } finally { fc.delete(new Path(USER_DIR), true); } @@ -817,8 +822,7 @@ public class TestGlobPaths { private void testOnFileContext(FSTestWrapperGlobTest test) throws Exception { try { fs.mkdirs(new Path(USER_DIR)); - test.run(new FileContextTestWrapper(fc), - new FileContextTestWrapper(unprivilegedFc), null, fc); + test.run(); } finally { cleanupDFS(); } @@ -850,9 +854,12 @@ public class TestGlobPaths { /** * Test globbing through symlinks. */ - private static class TestGlobWithSymlinks implements FSTestWrapperGlobTest { - public void run(FSTestWrapper wrap, FSTestWrapper unprivilegedWrap, - FileSystem fs, FileContext fc) throws Exception { + private class TestGlobWithSymlinks extends FSTestWrapperGlobTest { + TestGlobWithSymlinks(boolean useFc) { + super(useFc); + } + + void run() throws Exception { // Test that globbing through a symlink to a directory yields a path // containing that symlink. wrap.mkdir(new Path(USER_DIR + "/alpha"), FsPermission.getDirDefault(), @@ -889,13 +896,13 @@ public class TestGlobPaths { @Ignore @Test public void testGlobWithSymlinksOnFS() throws Exception { - testOnFileSystem(new TestGlobWithSymlinks()); + testOnFileSystem(new TestGlobWithSymlinks(false)); } @Ignore @Test public void testGlobWithSymlinksOnFC() throws Exception { - testOnFileContext(new TestGlobWithSymlinks()); + testOnFileContext(new TestGlobWithSymlinks(true)); } /** @@ -903,10 +910,13 @@ public class TestGlobPaths { * * Also test globbing dangling symlinks. It should NOT throw any exceptions! */ - private static class TestGlobWithSymlinksToSymlinks implements + private class TestGlobWithSymlinksToSymlinks extends FSTestWrapperGlobTest { - public void run(FSTestWrapper wrap, FSTestWrapper unprivilegedWrap, - FileSystem fs, FileContext fc) throws Exception { + TestGlobWithSymlinksToSymlinks(boolean useFc) { + super(useFc); + } + + void run() throws Exception { // Test that globbing through a symlink to a symlink to a directory // fully resolves wrap.mkdir(new Path(USER_DIR + "/alpha"), FsPermission.getDirDefault(), @@ -968,22 +978,25 @@ public class TestGlobPaths { @Ignore @Test public void testGlobWithSymlinksToSymlinksOnFS() throws Exception { - testOnFileSystem(new TestGlobWithSymlinksToSymlinks()); + testOnFileSystem(new TestGlobWithSymlinksToSymlinks(false)); } @Ignore @Test public void testGlobWithSymlinksToSymlinksOnFC() throws Exception { - testOnFileContext(new TestGlobWithSymlinksToSymlinks()); + testOnFileContext(new TestGlobWithSymlinksToSymlinks(true)); } /** * Test globbing symlinks with a custom PathFilter */ - private static class TestGlobSymlinksWithCustomPathFilter implements + private class TestGlobSymlinksWithCustomPathFilter extends FSTestWrapperGlobTest { - public void run(FSTestWrapper wrap, FSTestWrapper unprivilegedWrap, - FileSystem fs, FileContext fc) throws Exception { + TestGlobSymlinksWithCustomPathFilter(boolean useFc) { + super(useFc); + } + + void run() throws Exception { // Test that globbing through a symlink to a symlink to a directory // fully resolves wrap.mkdir(new Path(USER_DIR + "/alpha"), FsPermission.getDirDefault(), @@ -1019,21 +1032,24 @@ public class TestGlobPaths { @Ignore @Test public void testGlobSymlinksWithCustomPathFilterOnFS() throws Exception { - testOnFileSystem(new TestGlobSymlinksWithCustomPathFilter()); + testOnFileSystem(new TestGlobSymlinksWithCustomPathFilter(false)); } @Ignore @Test public void testGlobSymlinksWithCustomPathFilterOnFC() throws Exception { - testOnFileContext(new TestGlobSymlinksWithCustomPathFilter()); + testOnFileContext(new TestGlobSymlinksWithCustomPathFilter(true)); } /** * Test that globStatus fills in the scheme even when it is not provided. */ - private static class TestGlobFillsInScheme implements FSTestWrapperGlobTest { - public void run(FSTestWrapper wrap, FSTestWrapper unprivilegedWrap, - FileSystem fs, FileContext fc) throws Exception { + private class TestGlobFillsInScheme extends FSTestWrapperGlobTest { + TestGlobFillsInScheme(boolean useFc) { + super(useFc); + } + + void run() throws Exception { // Verify that the default scheme is hdfs, when we don't supply one. wrap.mkdir(new Path(USER_DIR + "/alpha"), FsPermission.getDirDefault(), false); @@ -1045,38 +1061,40 @@ public class TestGlobPaths { Path path = statuses[0].getPath(); Assert.assertEquals(USER_DIR + "/alpha", path.toUri().getPath()); Assert.assertEquals("hdfs", path.toUri().getScheme()); - if (fc != null) { - // If we're using FileContext, then we can list a file:/// URI. - // Since everyone should have the root directory, we list that. - statuses = wrap.globStatus(new Path("file:///"), - new AcceptAllPathFilter()); - Assert.assertEquals(1, statuses.length); - Path filePath = statuses[0].getPath(); - Assert.assertEquals("file", filePath.toUri().getScheme()); - Assert.assertEquals("/", filePath.toUri().getPath()); - } else { - // The FileSystem we passed in should have scheme 'hdfs' - Assert.assertEquals("hdfs", fs.getScheme()); - } + + // FileContext can list a file:/// URI. + // Since everyone should have the root directory, we list that. + statuses = fc.util().globStatus(new Path("file:///"), + new AcceptAllPathFilter()); + Assert.assertEquals(1, statuses.length); + Path filePath = statuses[0].getPath(); + Assert.assertEquals("file", filePath.toUri().getScheme()); + Assert.assertEquals("/", filePath.toUri().getPath()); + + // The FileSystem should have scheme 'hdfs' + Assert.assertEquals("hdfs", fs.getScheme()); } } @Test public void testGlobFillsInSchemeOnFS() throws Exception { - testOnFileSystem(new TestGlobFillsInScheme()); + testOnFileSystem(new TestGlobFillsInScheme(false)); } @Test public void testGlobFillsInSchemeOnFC() throws Exception { - testOnFileContext(new TestGlobFillsInScheme()); + testOnFileContext(new TestGlobFillsInScheme(true)); } /** * Test that globStatus works with relative paths. **/ - private static class TestRelativePath implements FSTestWrapperGlobTest { - public void run(FSTestWrapper wrap, FSTestWrapper unprivilegedWrap, - FileSystem fs, FileContext fc) throws Exception { + private class TestRelativePath extends FSTestWrapperGlobTest { + TestRelativePath(boolean useFc) { + super(useFc); + } + + void run() throws Exception { String[] files = new String[] { "a", "abc", "abc.p", "bacd" }; Path[] path = new Path[files.length]; @@ -1095,19 +1113,26 @@ public class TestGlobPaths { } assertEquals(globResults.length, 3); - assertEquals(USER_DIR + "/a;" + USER_DIR + "/abc;" + USER_DIR + "/abc.p", - TestPath.mergeStatuses(globResults)); + + // The default working directory for FileSystem is the user's home + // directory. For FileContext, the default is based on the UNIX user that + // started the jvm. This is arguably a bug (see HADOOP-10944 for + // details). We work around it here by explicitly calling + // getWorkingDirectory and going from there. + String pwd = wrap.getWorkingDirectory().toUri().getPath(); + assertEquals(pwd + "/a;" + pwd + "/abc;" + pwd + "/abc.p", + TestPath.mergeStatuses(globResults)); } } @Test public void testRelativePathOnFS() throws Exception { - testOnFileSystem(new TestRelativePath()); + testOnFileSystem(new TestRelativePath(false)); } @Test public void testRelativePathOnFC() throws Exception { - testOnFileContext(new TestRelativePath()); + testOnFileContext(new TestRelativePath(true)); } /** @@ -1115,17 +1140,20 @@ public class TestGlobPaths { * to list fails with AccessControlException rather than succeeding or * throwing any other exception. **/ - private static class TestGlobAccessDenied implements FSTestWrapperGlobTest { - public void run(FSTestWrapper wrap, FSTestWrapper unprivilegedWrap, - FileSystem fs, FileContext fc) throws Exception { - wrap.mkdir(new Path("/nopermission/val"), + private class TestGlobAccessDenied extends FSTestWrapperGlobTest { + TestGlobAccessDenied(boolean useFc) { + super(useFc); + } + + void run() throws Exception { + privWrap.mkdir(new Path("/nopermission/val"), new FsPermission((short)0777), true); - wrap.mkdir(new Path("/norestrictions/val"), + privWrap.mkdir(new Path("/norestrictions/val"), new FsPermission((short)0777), true); - wrap.setPermission(new Path("/nopermission"), + privWrap.setPermission(new Path("/nopermission"), new FsPermission((short)0)); try { - unprivilegedWrap.globStatus(new Path("/no*/*"), + wrap.globStatus(new Path("/no*/*"), new AcceptAllPathFilter()); Assert.fail("expected to get an AccessControlException when " + "globbing through a directory we don't have permissions " + @@ -1134,7 +1162,7 @@ public class TestGlobPaths { } Assert.assertEquals("/norestrictions/val", - TestPath.mergeStatuses(unprivilegedWrap.globStatus( + TestPath.mergeStatuses(wrap.globStatus( new Path("/norestrictions/*"), new AcceptAllPathFilter()))); } @@ -1142,66 +1170,118 @@ public class TestGlobPaths { @Test public void testGlobAccessDeniedOnFS() throws Exception { - testOnFileSystem(new TestGlobAccessDenied()); + testOnFileSystem(new TestGlobAccessDenied(false)); } @Test public void testGlobAccessDeniedOnFC() throws Exception { - testOnFileContext(new TestGlobAccessDenied()); + testOnFileContext(new TestGlobAccessDenied(true)); } /** * Test that trying to list a reserved path on HDFS via the globber works. **/ - private static class TestReservedHdfsPaths implements FSTestWrapperGlobTest { - public void run(FSTestWrapper wrap, FSTestWrapper unprivilegedWrap, - FileSystem fs, FileContext fc) throws Exception { + private class TestReservedHdfsPaths extends FSTestWrapperGlobTest { + TestReservedHdfsPaths(boolean useFc) { + super(useFc); + } + + void run() throws Exception { String reservedRoot = "/.reserved/.inodes/" + INodeId.ROOT_INODE_ID; Assert.assertEquals(reservedRoot, - TestPath.mergeStatuses(unprivilegedWrap. + TestPath.mergeStatuses(wrap. globStatus(new Path(reservedRoot), new AcceptAllPathFilter()))); // These inodes don't show up via listStatus. Assert.assertEquals("", - TestPath.mergeStatuses(unprivilegedWrap. + TestPath.mergeStatuses(wrap. globStatus(new Path("/.reserved/*"), new AcceptAllPathFilter()))); } } @Test public void testReservedHdfsPathsOnFS() throws Exception { - testOnFileSystem(new TestReservedHdfsPaths()); + testOnFileSystem(new TestReservedHdfsPaths(false)); } @Test public void testReservedHdfsPathsOnFC() throws Exception { - testOnFileContext(new TestReservedHdfsPaths()); + testOnFileContext(new TestReservedHdfsPaths(true)); } /** * Test trying to glob the root. Regression test for HDFS-5888. **/ - private static class TestGlobRoot implements FSTestWrapperGlobTest { - public void run(FSTestWrapper wrap, FSTestWrapper unprivilegedWrap, - FileSystem fs, FileContext fc) throws Exception { + private class TestGlobRoot extends FSTestWrapperGlobTest { + TestGlobRoot (boolean useFc) { + super(useFc); + } + + void run() throws Exception { final Path rootPath = new Path("/"); FileStatus oldRootStatus = wrap.getFileStatus(rootPath); String newOwner = UUID.randomUUID().toString(); - wrap.setOwner(new Path("/"), newOwner, null); + privWrap.setOwner(new Path("/"), newOwner, null); FileStatus[] status = wrap.globStatus(rootPath, new AcceptAllPathFilter()); Assert.assertEquals(1, status.length); Assert.assertEquals(newOwner, status[0].getOwner()); - wrap.setOwner(new Path("/"), oldRootStatus.getOwner(), null); + privWrap.setOwner(new Path("/"), oldRootStatus.getOwner(), null); } } @Test public void testGlobRootOnFS() throws Exception { - testOnFileSystem(new TestGlobRoot()); + testOnFileSystem(new TestGlobRoot(false)); } @Test public void testGlobRootOnFC() throws Exception { - testOnFileContext(new TestGlobRoot()); + testOnFileContext(new TestGlobRoot(true)); + } + + /** + * Test glob expressions that don't appear at the end of the path. Regression + * test for HADOOP-10957. + **/ + private class TestNonTerminalGlobs extends FSTestWrapperGlobTest { + TestNonTerminalGlobs(boolean useFc) { + super(useFc); + } + + void run() throws Exception { + try { + privWrap.mkdir(new Path("/filed_away/alpha"), + new FsPermission((short)0777), true); + privWrap.createFile(new Path("/filed"), 0); + FileStatus[] statuses = + wrap.globStatus(new Path("/filed*/alpha"), + new AcceptAllPathFilter()); + Assert.assertEquals(1, statuses.length); + Assert.assertEquals("/filed_away/alpha", statuses[0].getPath() + .toUri().getPath()); + privWrap.mkdir(new Path("/filed_away/alphabet"), + new FsPermission((short)0777), true); + privWrap.mkdir(new Path("/filed_away/alphabet/abc"), + new FsPermission((short)0777), true); + statuses = wrap.globStatus(new Path("/filed*/alph*/*b*"), + new AcceptAllPathFilter()); + Assert.assertEquals(1, statuses.length); + Assert.assertEquals("/filed_away/alphabet/abc", statuses[0].getPath() + .toUri().getPath()); + } finally { + privWrap.delete(new Path("/filed"), true); + privWrap.delete(new Path("/filed_away"), true); + } + } + } + + @Test + public void testNonTerminalGlobsOnFS() throws Exception { + testOnFileSystem(new TestNonTerminalGlobs(false)); + } + + @Test + public void testNonTerminalGlobsOnFC() throws Exception { + testOnFileContext(new TestNonTerminalGlobs(true)); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 98ca3160047..0e49cfec053 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -93,6 +93,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetUtil; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl; +import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; @@ -172,6 +173,7 @@ public class MiniDFSCluster { private boolean checkDataNodeAddrConfig = false; private boolean checkDataNodeHostConfig = false; private Configuration[] dnConfOverlays; + private boolean skipFsyncForTesting = true; public Builder(Configuration conf) { this.conf = conf; @@ -405,6 +407,15 @@ public class MiniDFSCluster { this.dnConfOverlays = dnConfOverlays; return this; } + + /** + * Default: true + * When true, we skip fsync() calls for speed improvements. + */ + public Builder skipFsyncForTesting(boolean val) { + this.skipFsyncForTesting = val; + return this; + } /** * Construct the actual MiniDFSCluster @@ -472,7 +483,8 @@ public class MiniDFSCluster { builder.checkExitOnShutdown, builder.checkDataNodeAddrConfig, builder.checkDataNodeHostConfig, - builder.dnConfOverlays); + builder.dnConfOverlays, + builder.skipFsyncForTesting); } public class DataNodeProperties { @@ -727,7 +739,8 @@ public class MiniDFSCluster { manageNameDfsDirs, true, manageDataDfsDirs, manageDataDfsDirs, operation, null, racks, hosts, null, simulatedCapacities, null, true, false, - MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0), true, false, false, null); + MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0), + true, false, false, null, true); } private void initMiniDFSCluster( @@ -742,7 +755,8 @@ public class MiniDFSCluster { MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown, boolean checkDataNodeAddrConfig, boolean checkDataNodeHostConfig, - Configuration[] dnConfOverlays) + Configuration[] dnConfOverlays, + boolean skipFsyncForTesting) throws IOException { boolean success = false; try { @@ -782,6 +796,8 @@ public class MiniDFSCluster { + "Standby node since no IPC ports have been specified."); conf.setInt(DFS_HA_LOGROLL_PERIOD_KEY, -1); } + + EditLogFileOutputStream.setShouldSkipFsyncForTesting(skipFsyncForTesting); federation = nnTopology.isFederated(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java new file mode 100644 index 00000000000..c268281a0f7 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java @@ -0,0 +1,430 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.XAttrSetFlag; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.inotify.Event; +import org.apache.hadoop.hdfs.inotify.MissingEventsException; +import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes; +import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; +import org.apache.hadoop.util.ExitUtil; +import org.junit.Assert; +import org.junit.Test; + +import java.io.IOException; +import java.io.OutputStream; +import java.net.URISyntaxException; +import java.util.EnumSet; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +public class TestDFSInotifyEventInputStream { + + private static final int BLOCK_SIZE = 1024; + private static final Log LOG = LogFactory.getLog( + TestDFSInotifyEventInputStream.class); + + private static Event waitForNextEvent(DFSInotifyEventInputStream eis) + throws IOException, MissingEventsException { + Event next = null; + while ((next = eis.poll()) == null); + return next; + } + + /** + * If this test fails, check whether the newly added op should map to an + * inotify event, and if so, establish the mapping in + * {@link org.apache.hadoop.hdfs.server.namenode.InotifyFSEditLogOpTranslator} + * and update testBasic() to include the new op. + */ + @Test + public void testOpcodeCount() { + Assert.assertTrue(FSEditLogOpCodes.values().length == 46); + } + + + /** + * Tests all FsEditLogOps that are converted to inotify events. + */ + @Test(timeout = 120000) + @SuppressWarnings("deprecation") + public void testBasic() throws IOException, URISyntaxException, + InterruptedException, MissingEventsException { + Configuration conf = new HdfsConfiguration(); + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); + // so that we can get an atime change + conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1); + + MiniQJMHACluster.Builder builder = new MiniQJMHACluster.Builder(conf); + builder.getDfsBuilder().numDataNodes(2); + MiniQJMHACluster cluster = builder.build(); + + try { + cluster.getDfsCluster().waitActive(); + cluster.getDfsCluster().transitionToActive(0); + DFSClient client = new DFSClient(cluster.getDfsCluster().getNameNode(0) + .getNameNodeAddress(), conf); + FileSystem fs = cluster.getDfsCluster().getFileSystem(0); + DFSTestUtil.createFile(fs, new Path("/file"), BLOCK_SIZE, (short) 1, 0L); + DFSTestUtil.createFile(fs, new Path("/file3"), BLOCK_SIZE, (short) 1, 0L); + DFSTestUtil.createFile(fs, new Path("/file5"), BLOCK_SIZE, (short) 1, 0L); + DFSInotifyEventInputStream eis = client.getInotifyEventStream(); + client.rename("/file", "/file4", null); // RenameOp -> RenameEvent + client.rename("/file4", "/file2"); // RenameOldOp -> RenameEvent + // DeleteOp, AddOp -> UnlinkEvent, CreateEvent + OutputStream os = client.create("/file2", true, (short) 2, BLOCK_SIZE); + os.write(new byte[BLOCK_SIZE]); + os.close(); // CloseOp -> CloseEvent + // AddOp -> AppendEvent + os = client.append("/file2", BLOCK_SIZE, null, null); + os.write(new byte[BLOCK_SIZE]); + os.close(); // CloseOp -> CloseEvent + Thread.sleep(10); // so that the atime will get updated on the next line + client.open("/file2").read(new byte[1]); // TimesOp -> MetadataUpdateEvent + // SetReplicationOp -> MetadataUpdateEvent + client.setReplication("/file2", (short) 1); + // ConcatDeleteOp -> AppendEvent, UnlinkEvent, CloseEvent + client.concat("/file2", new String[]{"/file3"}); + client.delete("/file2", false); // DeleteOp -> UnlinkEvent + client.mkdirs("/dir", null, false); // MkdirOp -> CreateEvent + // SetPermissionsOp -> MetadataUpdateEvent + client.setPermission("/dir", FsPermission.valueOf("-rw-rw-rw-")); + // SetOwnerOp -> MetadataUpdateEvent + client.setOwner("/dir", "username", "groupname"); + client.createSymlink("/dir", "/dir2", false); // SymlinkOp -> CreateEvent + client.setXAttr("/file5", "user.field", "value".getBytes(), EnumSet.of( + XAttrSetFlag.CREATE)); // SetXAttrOp -> MetadataUpdateEvent + // RemoveXAttrOp -> MetadataUpdateEvent + client.removeXAttr("/file5", "user.field"); + // SetAclOp -> MetadataUpdateEvent + client.setAcl("/file5", AclEntry.parseAclSpec( + "user::rwx,user:foo:rw-,group::r--,other::---", true)); + client.removeAcl("/file5"); // SetAclOp -> MetadataUpdateEvent + + Event next = null; + + // RenameOp + next = waitForNextEvent(eis); + Assert.assertTrue(next.getEventType() == Event.EventType.RENAME); + Event.RenameEvent re = (Event.RenameEvent) next; + Assert.assertTrue(re.getDstPath().equals("/file4")); + Assert.assertTrue(re.getSrcPath().equals("/file")); + Assert.assertTrue(re.getTimestamp() > 0); + + long eventsBehind = eis.getEventsBehindEstimate(); + + // RenameOldOp + next = waitForNextEvent(eis); + Assert.assertTrue(next.getEventType() == Event.EventType.RENAME); + Event.RenameEvent re2 = (Event.RenameEvent) next; + Assert.assertTrue(re2.getDstPath().equals("/file2")); + Assert.assertTrue(re2.getSrcPath().equals("/file4")); + Assert.assertTrue(re.getTimestamp() > 0); + + // DeleteOp + next = waitForNextEvent(eis); + Assert.assertTrue(next.getEventType() == Event.EventType.UNLINK); + Assert.assertTrue(((Event.UnlinkEvent) next).getPath().equals("/file2")); + + // AddOp + next = waitForNextEvent(eis); + Assert.assertTrue(next.getEventType() == Event.EventType.CREATE); + Event.CreateEvent ce = (Event.CreateEvent) next; + Assert.assertTrue(ce.getiNodeType() == Event.CreateEvent.INodeType.FILE); + Assert.assertTrue(ce.getPath().equals("/file2")); + Assert.assertTrue(ce.getCtime() > 0); + Assert.assertTrue(ce.getReplication() > 0); + Assert.assertTrue(ce.getSymlinkTarget() == null); + + // CloseOp + next = waitForNextEvent(eis); + Assert.assertTrue(next.getEventType() == Event.EventType.CLOSE); + Event.CloseEvent ce2 = (Event.CloseEvent) next; + Assert.assertTrue(ce2.getPath().equals("/file2")); + Assert.assertTrue(ce2.getFileSize() > 0); + Assert.assertTrue(ce2.getTimestamp() > 0); + + // AddOp + next = waitForNextEvent(eis); + Assert.assertTrue(next.getEventType() == Event.EventType.APPEND); + Assert.assertTrue(((Event.AppendEvent) next).getPath().equals("/file2")); + + // CloseOp + next = waitForNextEvent(eis); + Assert.assertTrue(next.getEventType() == Event.EventType.CLOSE); + Assert.assertTrue(((Event.CloseEvent) next).getPath().equals("/file2")); + + // TimesOp + next = waitForNextEvent(eis); + Assert.assertTrue(next.getEventType() == Event.EventType.METADATA); + Event.MetadataUpdateEvent mue = (Event.MetadataUpdateEvent) next; + Assert.assertTrue(mue.getPath().equals("/file2")); + Assert.assertTrue(mue.getMetadataType() == + Event.MetadataUpdateEvent.MetadataType.TIMES); + + // SetReplicationOp + next = waitForNextEvent(eis); + Assert.assertTrue(next.getEventType() == Event.EventType.METADATA); + Event.MetadataUpdateEvent mue2 = (Event.MetadataUpdateEvent) next; + Assert.assertTrue(mue2.getPath().equals("/file2")); + Assert.assertTrue(mue2.getMetadataType() == + Event.MetadataUpdateEvent.MetadataType.REPLICATION); + Assert.assertTrue(mue2.getReplication() == 1); + + // ConcatDeleteOp + next = waitForNextEvent(eis); + Assert.assertTrue(next.getEventType() == Event.EventType.APPEND); + Assert.assertTrue(((Event.AppendEvent) next).getPath().equals("/file2")); + next = waitForNextEvent(eis); + Assert.assertTrue(next.getEventType() == Event.EventType.UNLINK); + Event.UnlinkEvent ue2 = (Event.UnlinkEvent) next; + Assert.assertTrue(ue2.getPath().equals("/file3")); + Assert.assertTrue(ue2.getTimestamp() > 0); + next = waitForNextEvent(eis); + Assert.assertTrue(next.getEventType() == Event.EventType.CLOSE); + Event.CloseEvent ce3 = (Event.CloseEvent) next; + Assert.assertTrue(ce3.getPath().equals("/file2")); + Assert.assertTrue(ce3.getTimestamp() > 0); + + // DeleteOp + next = waitForNextEvent(eis); + Assert.assertTrue(next.getEventType() == Event.EventType.UNLINK); + Event.UnlinkEvent ue = (Event.UnlinkEvent) next; + Assert.assertTrue(ue.getPath().equals("/file2")); + Assert.assertTrue(ue.getTimestamp() > 0); + + // MkdirOp + next = waitForNextEvent(eis); + Assert.assertTrue(next.getEventType() == Event.EventType.CREATE); + Event.CreateEvent ce4 = (Event.CreateEvent) next; + Assert.assertTrue(ce4.getiNodeType() == + Event.CreateEvent.INodeType.DIRECTORY); + Assert.assertTrue(ce4.getPath().equals("/dir")); + Assert.assertTrue(ce4.getCtime() > 0); + Assert.assertTrue(ce4.getReplication() == 0); + Assert.assertTrue(ce4.getSymlinkTarget() == null); + + // SetPermissionsOp + next = waitForNextEvent(eis); + Assert.assertTrue(next.getEventType() == Event.EventType.METADATA); + Event.MetadataUpdateEvent mue3 = (Event.MetadataUpdateEvent) next; + Assert.assertTrue(mue3.getPath().equals("/dir")); + Assert.assertTrue(mue3.getMetadataType() == + Event.MetadataUpdateEvent.MetadataType.PERMS); + Assert.assertTrue(mue3.getPerms().toString().contains("rw-rw-rw-")); + + // SetOwnerOp + next = waitForNextEvent(eis); + Assert.assertTrue(next.getEventType() == Event.EventType.METADATA); + Event.MetadataUpdateEvent mue4 = (Event.MetadataUpdateEvent) next; + Assert.assertTrue(mue4.getPath().equals("/dir")); + Assert.assertTrue(mue4.getMetadataType() == + Event.MetadataUpdateEvent.MetadataType.OWNER); + Assert.assertTrue(mue4.getOwnerName().equals("username")); + Assert.assertTrue(mue4.getGroupName().equals("groupname")); + + // SymlinkOp + next = waitForNextEvent(eis); + Assert.assertTrue(next.getEventType() == Event.EventType.CREATE); + Event.CreateEvent ce5 = (Event.CreateEvent) next; + Assert.assertTrue(ce5.getiNodeType() == + Event.CreateEvent.INodeType.SYMLINK); + Assert.assertTrue(ce5.getPath().equals("/dir2")); + Assert.assertTrue(ce5.getCtime() > 0); + Assert.assertTrue(ce5.getReplication() == 0); + Assert.assertTrue(ce5.getSymlinkTarget().equals("/dir")); + + // SetXAttrOp + next = waitForNextEvent(eis); + Assert.assertTrue(next.getEventType() == Event.EventType.METADATA); + Event.MetadataUpdateEvent mue5 = (Event.MetadataUpdateEvent) next; + Assert.assertTrue(mue5.getPath().equals("/file5")); + Assert.assertTrue(mue5.getMetadataType() == + Event.MetadataUpdateEvent.MetadataType.XATTRS); + Assert.assertTrue(mue5.getxAttrs().size() == 1); + Assert.assertTrue(mue5.getxAttrs().get(0).getName().contains("field")); + Assert.assertTrue(!mue5.isxAttrsRemoved()); + + // RemoveXAttrOp + next = waitForNextEvent(eis); + Assert.assertTrue(next.getEventType() == Event.EventType.METADATA); + Event.MetadataUpdateEvent mue6 = (Event.MetadataUpdateEvent) next; + Assert.assertTrue(mue6.getPath().equals("/file5")); + Assert.assertTrue(mue6.getMetadataType() == + Event.MetadataUpdateEvent.MetadataType.XATTRS); + Assert.assertTrue(mue6.getxAttrs().size() == 1); + Assert.assertTrue(mue6.getxAttrs().get(0).getName().contains("field")); + Assert.assertTrue(mue6.isxAttrsRemoved()); + + // SetAclOp (1) + next = waitForNextEvent(eis); + Assert.assertTrue(next.getEventType() == Event.EventType.METADATA); + Event.MetadataUpdateEvent mue7 = (Event.MetadataUpdateEvent) next; + Assert.assertTrue(mue7.getPath().equals("/file5")); + Assert.assertTrue(mue7.getMetadataType() == + Event.MetadataUpdateEvent.MetadataType.ACLS); + Assert.assertTrue(mue7.getAcls().contains( + AclEntry.parseAclEntry("user::rwx", true))); + + // SetAclOp (2) + next = waitForNextEvent(eis); + Assert.assertTrue(next.getEventType() == Event.EventType.METADATA); + Event.MetadataUpdateEvent mue8 = (Event.MetadataUpdateEvent) next; + Assert.assertTrue(mue8.getPath().equals("/file5")); + Assert.assertTrue(mue8.getMetadataType() == + Event.MetadataUpdateEvent.MetadataType.ACLS); + Assert.assertTrue(mue8.getAcls() == null); + + // Returns null when there are no further events + Assert.assertTrue(eis.poll() == null); + + // make sure the estimate hasn't changed since the above assertion + // tells us that we are fully caught up to the current namesystem state + // and we should not have been behind at all when eventsBehind was set + // either, since there were few enough events that they should have all + // been read to the client during the first poll() call + Assert.assertTrue(eis.getEventsBehindEstimate() == eventsBehind); + + } finally { + cluster.shutdown(); + } + } + + @Test(timeout = 120000) + public void testNNFailover() throws IOException, URISyntaxException, + MissingEventsException { + Configuration conf = new HdfsConfiguration(); + MiniQJMHACluster cluster = new MiniQJMHACluster.Builder(conf).build(); + + try { + cluster.getDfsCluster().waitActive(); + cluster.getDfsCluster().transitionToActive(0); + DFSClient client = ((DistributedFileSystem) HATestUtil.configureFailoverFs + (cluster.getDfsCluster(), conf)).dfs; + DFSInotifyEventInputStream eis = client.getInotifyEventStream(); + for (int i = 0; i < 10; i++) { + client.mkdirs("/dir" + i, null, false); + } + cluster.getDfsCluster().shutdownNameNode(0); + cluster.getDfsCluster().transitionToActive(1); + Event next = null; + // we can read all of the edits logged by the old active from the new + // active + for (int i = 0; i < 10; i++) { + next = waitForNextEvent(eis); + Assert.assertTrue(next.getEventType() == Event.EventType.CREATE); + Assert.assertTrue(((Event.CreateEvent) next).getPath().equals("/dir" + + i)); + } + Assert.assertTrue(eis.poll() == null); + } finally { + cluster.shutdown(); + } + } + + @Test(timeout = 120000) + public void testTwoActiveNNs() throws IOException, MissingEventsException { + Configuration conf = new HdfsConfiguration(); + MiniQJMHACluster cluster = new MiniQJMHACluster.Builder(conf).build(); + + try { + cluster.getDfsCluster().waitActive(); + cluster.getDfsCluster().transitionToActive(0); + DFSClient client0 = new DFSClient(cluster.getDfsCluster().getNameNode(0) + .getNameNodeAddress(), conf); + DFSClient client1 = new DFSClient(cluster.getDfsCluster().getNameNode(1) + .getNameNodeAddress(), conf); + DFSInotifyEventInputStream eis = client0.getInotifyEventStream(); + for (int i = 0; i < 10; i++) { + client0.mkdirs("/dir" + i, null, false); + } + + cluster.getDfsCluster().transitionToActive(1); + for (int i = 10; i < 20; i++) { + client1.mkdirs("/dir" + i, null, false); + } + + // make sure that the old active can't read any further than the edits + // it logged itself (it has no idea whether the in-progress edits from + // the other writer have actually been committed) + Event next = null; + for (int i = 0; i < 10; i++) { + next = waitForNextEvent(eis); + Assert.assertTrue(next.getEventType() == Event.EventType.CREATE); + Assert.assertTrue(((Event.CreateEvent) next).getPath().equals("/dir" + + i)); + } + Assert.assertTrue(eis.poll() == null); + } finally { + try { + cluster.shutdown(); + } catch (ExitUtil.ExitException e) { + // expected because the old active will be unable to flush the + // end-of-segment op since it is fenced + } + } + } + + @Test(timeout = 120000) + public void testReadEventsWithTimeout() throws IOException, + InterruptedException, MissingEventsException { + Configuration conf = new HdfsConfiguration(); + MiniQJMHACluster cluster = new MiniQJMHACluster.Builder(conf).build(); + + try { + cluster.getDfsCluster().waitActive(); + cluster.getDfsCluster().transitionToActive(0); + final DFSClient client = new DFSClient(cluster.getDfsCluster() + .getNameNode(0).getNameNodeAddress(), conf); + DFSInotifyEventInputStream eis = client.getInotifyEventStream(); + ScheduledExecutorService ex = Executors + .newSingleThreadScheduledExecutor(); + ex.schedule(new Runnable() { + @Override + public void run() { + try { + client.mkdirs("/dir", null, false); + } catch (IOException e) { + // test will fail + LOG.error("Unable to create /dir", e); + } + } + }, 1, TimeUnit.SECONDS); + // a very generous wait period -- the edit will definitely have been + // processed by the time this is up + Event next = eis.poll(5, TimeUnit.SECONDS); + Assert.assertTrue(next != null); + Assert.assertTrue(next.getEventType() == Event.EventType.CREATE); + Assert.assertTrue(((Event.CreateEvent) next).getPath().equals("/dir")); + } finally { + cluster.shutdown(); + } + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java index 2e748b5b1c2..e7002c301c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java @@ -131,6 +131,7 @@ public class TestDFSRename { /** * Check the blocks of dst file are cleaned after rename with overwrite + * Restart NN to check the rename successfully */ @Test(timeout = 120000) public void testRenameWithOverwrite() throws Exception { @@ -160,6 +161,11 @@ public class TestDFSRename { dfs.rename(srcPath, dstPath, Rename.OVERWRITE); assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock(). getLocalBlock()) == null); + + // Restart NN and check the rename successfully + cluster.restartNameNodes(); + assertFalse(dfs.exists(srcPath)); + assertTrue(dfs.exists(dstPath)); } finally { if (dfs != null) { dfs.close(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java index d8400778ec9..34c701d0bbd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java @@ -261,7 +261,9 @@ public class TestFileAppend{ start += 29; } stm.write(fileContents, start, AppendTestUtil.FILE_SIZE -start); - + // need to make sure we completely write out all full blocks before + // the checkFile() call (see FSOutputSummer#flush) + stm.flush(); // verify that full blocks are sane checkFile(fs, file1, 1); stm.close(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java index b685a895f2b..32a41966dc3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java @@ -125,7 +125,7 @@ public class TestFileAppend4 { while (!recovered && tries-- > 0) { try { out = fs.append(file1); - LOG.info("Successfully opened for appends"); + LOG.info("Successfully opened for append"); recovered = true; } catch (IOException e) { LOG.info("Failed open for append, waiting on lease recovery"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java index fbfd6d423f7..f92f287e28e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure; +import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure.Policy; import org.apache.hadoop.io.IOUtils; import org.apache.log4j.Level; import org.junit.Assert; @@ -54,7 +55,8 @@ public class TestReplaceDatanodeOnFailure { /** Test DEFAULT ReplaceDatanodeOnFailure policy. */ @Test public void testDefaultPolicy() throws Exception { - final ReplaceDatanodeOnFailure p = ReplaceDatanodeOnFailure.DEFAULT; + final Configuration conf = new HdfsConfiguration(); + final ReplaceDatanodeOnFailure p = ReplaceDatanodeOnFailure.get(conf); final DatanodeInfo[] infos = new DatanodeInfo[5]; final DatanodeInfo[][] datanodes = new DatanodeInfo[infos.length + 1][]; @@ -113,7 +115,7 @@ public class TestReplaceDatanodeOnFailure { final Configuration conf = new HdfsConfiguration(); //always replace a datanode - ReplaceDatanodeOnFailure.ALWAYS.write(conf); + ReplaceDatanodeOnFailure.write(Policy.ALWAYS, true, conf); final String[] racks = new String[REPLICATION]; Arrays.fill(racks, RACK0); @@ -239,8 +241,6 @@ public class TestReplaceDatanodeOnFailure { final Configuration conf = new HdfsConfiguration(); final short REPLICATION = (short)3; - Assert.assertEquals(ReplaceDatanodeOnFailure.DEFAULT, ReplaceDatanodeOnFailure.get(conf)); - final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf ).numDataNodes(1).build(); @@ -285,4 +285,41 @@ public class TestReplaceDatanodeOnFailure { if (cluster != null) {cluster.shutdown();} } } + + @Test + public void testBestEffort() throws Exception { + final Configuration conf = new HdfsConfiguration(); + + //always replace a datanode but do not throw exception + ReplaceDatanodeOnFailure.write(Policy.ALWAYS, true, conf); + + final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf + ).numDataNodes(1).build(); + + try { + final DistributedFileSystem fs = cluster.getFileSystem(); + final Path f = new Path(DIR, "testIgnoreReplaceFailure"); + + final byte[] bytes = new byte[1000]; + { + LOG.info("write " + bytes.length + " bytes to " + f); + final FSDataOutputStream out = fs.create(f, REPLICATION); + out.write(bytes); + out.close(); + + final FileStatus status = fs.getFileStatus(f); + Assert.assertEquals(REPLICATION, status.getReplication()); + Assert.assertEquals(bytes.length, status.getLen()); + } + + { + LOG.info("append another " + bytes.length + " bytes to " + f); + final FSDataOutputStream out = fs.append(f); + out.write(bytes); + out.close(); + } + } finally { + if (cluster != null) {cluster.shutdown();} + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java index 3166cccc94b..9380701c0f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java @@ -56,7 +56,9 @@ public class MiniQJMHACluster { public Builder(Configuration conf) { this.conf = conf; - this.dfsBuilder = new MiniDFSCluster.Builder(conf); + // most QJMHACluster tests don't need DataNodes, so we'll make + // this the default + this.dfsBuilder = new MiniDFSCluster.Builder(conf).numDataNodes(0); } public MiniDFSCluster.Builder getDfsBuilder() { @@ -102,7 +104,7 @@ public class MiniQJMHACluster { cluster = builder.dfsBuilder.nnTopology(topology) .manageNameDfsSharedDirs(false).build(); cluster.waitActive(); - cluster.shutdown(); + cluster.shutdownNameNodes(); // initialize the journal nodes Configuration confNN0 = cluster.getConfiguration(0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java index 4783e8fb4fc..2e38d5fb406 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java @@ -382,7 +382,7 @@ public class TestQJMWithFaults { } @Override - protected ExecutorService createExecutor() { + protected ExecutorService createSingleThreadExecutor() { return MoreExecutors.sameThreadExecutor(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java index fcb8e55bebd..8bb39f8c6af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java @@ -939,7 +939,7 @@ public class TestQuorumJournalManager { public AsyncLogger createLogger(Configuration conf, NamespaceInfo nsInfo, String journalId, InetSocketAddress addr) { AsyncLogger logger = new IPCLoggerChannel(conf, nsInfo, journalId, addr) { - protected ExecutorService createExecutor() { + protected ExecutorService createSingleThreadExecutor() { // Don't parallelize calls to the quorum in the tests. // This makes the tests more deterministic. return MoreExecutors.sameThreadExecutor(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java index 242934547d0..1fe7ba89851 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java @@ -394,6 +394,8 @@ public class TestBlockToken { Path filePath = new Path(fileName); FSDataOutputStream out = fs.create(filePath, (short) 1); out.write(new byte[1000]); + // ensure that the first block is written out (see FSOutputSummer#flush) + out.flush(); LocatedBlocks locatedBlocks = cluster.getNameNodeRpc().getBlockLocations( fileName, 0, 1000); while (locatedBlocks.getLastLocatedBlock() == null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java index 109a0394fc6..a51342ea751 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java @@ -1120,6 +1120,11 @@ public class SimulatedFSDataset implements FsDatasetSpi { throw new UnsupportedOperationException(); } + @Override + public synchronized void removeVolumes(Collection volumes) { + throw new UnsupportedOperationException(); + } + @Override public void submitBackgroundSyncFileRangeRequest(ExtendedBlock block, FileDescriptor fd, long offset, long nbytes, int flags) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java index 5ac13eec270..d6e70d80037 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java @@ -108,7 +108,6 @@ public class TestFsDatasetCache { private static CacheManipulator prevCacheManipulator; static { - EditLogFileOutputStream.setShouldSkipFsyncForTesting(false); LogManager.getLogger(FsDatasetCache.class).setLevel(Level.DEBUG); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java index d9e99078be1..2c4c401205e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java @@ -18,12 +18,20 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.StorageType; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.Storage; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.datanode.DNConf; +import org.apache.hadoop.hdfs.server.datanode.DataBlockScanner; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataStorage; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.StringUtils; import org.junit.Before; import org.junit.Test; @@ -35,25 +43,44 @@ import java.util.ArrayList; import java.util.List; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class TestFsDatasetImpl { private static final String BASE_DIR = - System.getProperty("test.build.dir") + "/fsdatasetimpl"; + new FileSystemTestHelper().getTestRootDir(); private static final int NUM_INIT_VOLUMES = 2; + private static final String[] BLOCK_POOL_IDS = {"bpid-0", "bpid-1"}; + // Use to generate storageUuid + private static final DataStorage dsForStorageUuid = new DataStorage( + new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE)); + + private Configuration conf; private DataStorage storage; + private DataBlockScanner scanner; private FsDatasetImpl dataset; + private static Storage.StorageDirectory createStorageDirectory(File root) { + Storage.StorageDirectory sd = new Storage.StorageDirectory(root); + dsForStorageUuid.createStorageID(sd); + return sd; + } + private static void createStorageDirs(DataStorage storage, Configuration conf, int numDirs) throws IOException { List dirs = new ArrayList(); List dirStrings = new ArrayList(); for (int i = 0; i < numDirs; i++) { - String loc = BASE_DIR + "/data" + i; - dirStrings.add(loc); - dirs.add(new Storage.StorageDirectory(new File(loc))); + File loc = new File(BASE_DIR + "/data" + i); + dirStrings.add(loc.toString()); + loc.mkdirs(); + dirs.add(createStorageDirectory(loc)); when(storage.getStorageDir(i)).thenReturn(dirs.get(i)); } @@ -66,14 +93,19 @@ public class TestFsDatasetImpl { public void setUp() throws IOException { final DataNode datanode = Mockito.mock(DataNode.class); storage = Mockito.mock(DataStorage.class); - Configuration conf = new Configuration(); + scanner = Mockito.mock(DataBlockScanner.class); + this.conf = new Configuration(); final DNConf dnConf = new DNConf(conf); when(datanode.getConf()).thenReturn(conf); when(datanode.getDnConf()).thenReturn(dnConf); + when(datanode.getBlockScanner()).thenReturn(scanner); createStorageDirs(storage, conf, NUM_INIT_VOLUMES); dataset = new FsDatasetImpl(datanode, storage, conf); + for (String bpid : BLOCK_POOL_IDS) { + dataset.addBlockPool(bpid, conf); + } assertEquals(NUM_INIT_VOLUMES, dataset.getVolumes().size()); assertEquals(0, dataset.getNumFailedVolumes()); @@ -89,15 +121,63 @@ public class TestFsDatasetImpl { String path = BASE_DIR + "/newData" + i; newLocations.add(StorageLocation.parse(path)); when(storage.getStorageDir(numExistingVolumes + i)) - .thenReturn(new Storage.StorageDirectory(new File(path))); + .thenReturn(createStorageDirectory(new File(path))); } when(storage.getNumStorageDirs()).thenReturn(totalVolumes); dataset.addVolumes(newLocations); assertEquals(totalVolumes, dataset.getVolumes().size()); + assertEquals(totalVolumes, dataset.storageMap.size()); for (int i = 0; i < numNewVolumes; i++) { assertEquals(newLocations.get(i).getFile().getPath(), dataset.getVolumes().get(numExistingVolumes + i).getBasePath()); } } + + @Test + public void testRemoveVolumes() throws IOException { + // Feed FsDataset with block metadata. + final int NUM_BLOCKS = 100; + for (int i = 0; i < NUM_BLOCKS; i++) { + String bpid = BLOCK_POOL_IDS[NUM_BLOCKS % BLOCK_POOL_IDS.length]; + ExtendedBlock eb = new ExtendedBlock(bpid, i); + dataset.createRbw(StorageType.DEFAULT, eb); + } + final String[] dataDirs = + conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY).split(","); + final String volumePathToRemove = dataDirs[0]; + List volumesToRemove = new ArrayList(); + volumesToRemove.add(StorageLocation.parse(volumePathToRemove)); + + dataset.removeVolumes(volumesToRemove); + int expectedNumVolumes = dataDirs.length - 1; + assertEquals("The volume has been removed from the volumeList.", + expectedNumVolumes, dataset.getVolumes().size()); + assertEquals("The volume has been removed from the storageMap.", + expectedNumVolumes, dataset.storageMap.size()); + + try { + dataset.asyncDiskService.execute(volumesToRemove.get(0).getFile(), + new Runnable() { + @Override + public void run() {} + }); + fail("Expect RuntimeException: the volume has been removed from the " + + "AsyncDiskService."); + } catch (RuntimeException e) { + GenericTestUtils.assertExceptionContains("Cannot find root", e); + } + + int totalNumReplicas = 0; + for (String bpid : dataset.volumeMap.getBlockPoolList()) { + totalNumReplicas += dataset.volumeMap.size(bpid); + } + assertEquals("The replica infos on this volume has been removed from the " + + "volumeMap.", NUM_BLOCKS / NUM_INIT_VOLUMES, + totalNumReplicas); + + // Verify that every BlockPool deletes the removed blocks from the volume. + verify(scanner, times(BLOCK_POOL_IDS.length)) + .deleteBlocks(anyString(), any(Block[].class)); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java index 5448e7a885e..872ff9c490f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java @@ -70,6 +70,9 @@ public class TestBlockUnderConstruction { long blocksBefore = stm.getPos() / BLOCK_SIZE; TestFileCreation.writeFile(stm, BLOCK_SIZE); + // need to make sure the full block is completely flushed to the DataNodes + // (see FSOutputSummer#flush) + stm.flush(); int blocksAfter = 0; // wait until the block is allocated by DataStreamer BlockLocation[] locatedBlocks; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java index d54b90e6631..93076928f3e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java @@ -110,7 +110,6 @@ public class TestCacheDirectives { static { NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator()); - EditLogFileOutputStream.setShouldSkipFsyncForTesting(false); } private static final long BLOCK_SIZE = 4096; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java index d01df75f794..2ee251bdc7c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java @@ -141,6 +141,9 @@ public class TestDecommissioningStatus { Random rand = new Random(seed); rand.nextBytes(buffer); stm.write(buffer); + // need to make sure that we actually write out both file blocks + // (see FSOutputSummer#flush) + stm.flush(); // Do not close stream, return it // so that it is not garbage collected return stm; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index c8c1a02751e..d1bc2119b00 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -916,6 +916,10 @@ public class TestEditLog { public void setMaxOpSize(int maxOpSize) { reader.setMaxOpSize(maxOpSize); } + + @Override public boolean isLocalLog() { + return true; + } } @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java index db8b3a94085..8b903af3fce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @@ -441,7 +441,7 @@ public class TestStartup { checkNameSpace(conf); // read an image compressed in Gzip and store it uncompressed - LOG.info("Read an compressed iamge and store it as uncompressed."); + LOG.info("Read a compressed image and store it as uncompressed."); conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, false); checkNameSpace(conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java index 72776e03ceb..ca67245371b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java @@ -30,7 +30,9 @@ import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.IOException; import java.io.PrintWriter; +import java.io.UnsupportedEncodingException; import java.net.URL; +import java.net.URLDecoder; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -225,15 +227,17 @@ public class TestRefreshUserMappings { } private void addNewConfigResource(String rsrcName, String keyGroup, - String groups, String keyHosts, String hosts) throws FileNotFoundException { + String groups, String keyHosts, String hosts) + throws FileNotFoundException, UnsupportedEncodingException { // location for temp resource should be in CLASSPATH Configuration conf = new Configuration(); URL url = conf.getResource("hdfs-site.xml"); - Path p = new Path(url.getPath()); + + String urlPath = URLDecoder.decode(url.getPath().toString(), "UTF-8"); + Path p = new Path(urlPath); Path dir = p.getParent(); tempResource = dir.toString() + "/" + rsrcName; - - + String newResource = ""+ "" + keyGroup + ""+groups+"" + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java new file mode 100644 index 00000000000..bb923a2c6be --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java @@ -0,0 +1,280 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.tracing; + +import org.apache.commons.lang.RandomStringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.htrace.HTraceConfiguration; +import org.htrace.Sampler; +import org.htrace.Span; +import org.htrace.SpanReceiver; +import org.htrace.Trace; +import org.htrace.TraceScope; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; + +public class TestTracing { + + private static Configuration conf; + private static MiniDFSCluster cluster; + private static DistributedFileSystem dfs; + + @Test + public void testSpanReceiverHost() throws Exception { + Configuration conf = new Configuration(); + conf.set(SpanReceiverHost.SPAN_RECEIVERS_CONF_KEY, + SetSpanReceiver.class.getName()); + SpanReceiverHost spanReceiverHost = SpanReceiverHost.getInstance(conf); + } + + @Test + public void testWriteTraceHooks() throws Exception { + long startTime = System.currentTimeMillis(); + TraceScope ts = Trace.startSpan("testWriteTraceHooks", Sampler.ALWAYS); + Path file = new Path("traceWriteTest.dat"); + FSDataOutputStream stream = dfs.create(file); + + for (int i = 0; i < 10; i++) { + byte[] data = RandomStringUtils.randomAlphabetic(102400).getBytes(); + stream.write(data); + } + stream.hflush(); + stream.close(); + long endTime = System.currentTimeMillis(); + ts.close(); + + String[] expectedSpanNames = { + "testWriteTraceHooks", + "org.apache.hadoop.hdfs.protocol.ClientProtocol.create", + "org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol.BlockingInterface.create", + "org.apache.hadoop.hdfs.protocol.ClientProtocol.fsync", + "org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol.BlockingInterface.fsync", + "org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol.BlockingInterface.complete" + }; + assertSpanNamesFound(expectedSpanNames); + + // The trace should last about the same amount of time as the test + Map> map = SetSpanReceiver.SetHolder.getMap(); + Span s = map.get("testWriteTraceHooks").get(0); + Assert.assertNotNull(s); + long spanStart = s.getStartTimeMillis(); + long spanEnd = s.getStopTimeMillis(); + Assert.assertTrue(spanStart - startTime < 100); + Assert.assertTrue(spanEnd - endTime < 100); + + // There should only be one trace id as it should all be homed in the + // top trace. + for (Span span : SetSpanReceiver.SetHolder.spans) { + Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId()); + } + } + + @Test + public void testWriteWithoutTraceHooks() throws Exception { + Path file = new Path("withoutTraceWriteTest.dat"); + FSDataOutputStream stream = dfs.create(file); + for (int i = 0; i < 10; i++) { + byte[] data = RandomStringUtils.randomAlphabetic(102400).getBytes(); + stream.write(data); + } + stream.hflush(); + stream.close(); + Assert.assertTrue(SetSpanReceiver.SetHolder.size() == 0); + } + + @Test + public void testReadTraceHooks() throws Exception { + String fileName = "traceReadTest.dat"; + Path filePath = new Path(fileName); + + // Create the file. + FSDataOutputStream ostream = dfs.create(filePath); + for (int i = 0; i < 50; i++) { + byte[] data = RandomStringUtils.randomAlphabetic(10240).getBytes(); + ostream.write(data); + } + ostream.close(); + + + long startTime = System.currentTimeMillis(); + TraceScope ts = Trace.startSpan("testReadTraceHooks", Sampler.ALWAYS); + FSDataInputStream istream = dfs.open(filePath, 10240); + ByteBuffer buf = ByteBuffer.allocate(10240); + + int count = 0; + try { + while (istream.read(buf) > 0) { + count += 1; + buf.clear(); + istream.seek(istream.getPos() + 5); + } + } catch (IOException ioe) { + // Ignore this it's probably a seek after eof. + } finally { + istream.close(); + } + ts.getSpan().addTimelineAnnotation("count: " + count); + long endTime = System.currentTimeMillis(); + ts.close(); + + String[] expectedSpanNames = { + "testReadTraceHooks", + "org.apache.hadoop.hdfs.protocol.ClientProtocol.getBlockLocations", + "org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol.BlockingInterface.getBlockLocations" + }; + assertSpanNamesFound(expectedSpanNames); + + // The trace should last about the same amount of time as the test + Map> map = SetSpanReceiver.SetHolder.getMap(); + Span s = map.get("testReadTraceHooks").get(0); + Assert.assertNotNull(s); + + long spanStart = s.getStartTimeMillis(); + long spanEnd = s.getStopTimeMillis(); + Assert.assertTrue(spanStart - startTime < 100); + Assert.assertTrue(spanEnd - endTime < 100); + + // There should only be one trace id as it should all be homed in the + // top trace. + for (Span span : SetSpanReceiver.SetHolder.spans) { + Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId()); + } + } + + @Test + public void testReadWithoutTraceHooks() throws Exception { + String fileName = "withoutTraceReadTest.dat"; + Path filePath = new Path(fileName); + + // Create the file. + FSDataOutputStream ostream = dfs.create(filePath); + for (int i = 0; i < 50; i++) { + byte[] data = RandomStringUtils.randomAlphabetic(10240).getBytes(); + ostream.write(data); + } + ostream.close(); + + FSDataInputStream istream = dfs.open(filePath, 10240); + ByteBuffer buf = ByteBuffer.allocate(10240); + + int count = 0; + try { + while (istream.read(buf) > 0) { + count += 1; + buf.clear(); + istream.seek(istream.getPos() + 5); + } + } catch (IOException ioe) { + // Ignore this it's probably a seek after eof. + } finally { + istream.close(); + } + Assert.assertTrue(SetSpanReceiver.SetHolder.size() == 0); + } + + @Before + public void cleanSet() { + SetSpanReceiver.SetHolder.spans.clear(); + } + + @BeforeClass + public static void setupCluster() throws IOException { + conf = new Configuration(); + conf.setLong("dfs.blocksize", 100 * 1024); + conf.set(SpanReceiverHost.SPAN_RECEIVERS_CONF_KEY, + SetSpanReceiver.class.getName()); + + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(3) + .build(); + + dfs = cluster.getFileSystem(); + } + + @AfterClass + public static void shutDown() throws IOException { + cluster.shutdown(); + } + + private void assertSpanNamesFound(String[] expectedSpanNames) { + Map> map = SetSpanReceiver.SetHolder.getMap(); + for (String spanName : expectedSpanNames) { + Assert.assertTrue("Should find a span with name " + spanName, map.get(spanName) != null); + } + } + + /** + * Span receiver that puts all spans into a single set. + * This is useful for testing. + *

    + * We're not using HTrace's POJOReceiver here so as that doesn't + * push all the metrics to a static place, and would make testing + * SpanReceiverHost harder. + */ + public static class SetSpanReceiver implements SpanReceiver { + + public void configure(HTraceConfiguration conf) { + } + + public void receiveSpan(Span span) { + SetHolder.spans.add(span); + } + + public void close() { + } + + public static class SetHolder { + public static Set spans = new HashSet(); + + public static int size() { + return spans.size(); + } + + public static Map> getMap() { + Map> map = new HashMap>(); + + for (Span s : spans) { + List l = map.get(s.getDescription()); + if (l == null) { + l = new LinkedList(); + map.put(s.getDescription(), l); + } + l.add(s); + } + return map; + } + } + } +} diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index de0767d2a45..63bc08be967 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -154,16 +154,6 @@ Trunk (Unreleased) MAPREDUCE-5867. Fix NPE in KillAMPreemptionPolicy related to ProportionalCapacityPreemptionPolicy (Sunil G via devaraj) - BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS - - MAPREDUCE-5890. Support for encrypting Intermediate - data and spills in local filesystem. (asuresh via tucu) - - MAPREDUCE-6007. Add support to distcp to preserve raw.* namespace - extended attributes. (clamb) - - MAPREDUCE-6041. Fix TestOptionsParser. (clamb) - Release 2.6.0 - UNRELEASED INCOMPATIBLE CHANGES @@ -261,6 +251,22 @@ Release 2.6.0 - UNRELEASED MAPREDUCE-5885. build/test/test.mapred.spill causes release audit warnings (Chen He via jlowe) + BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS + + MAPREDUCE-5890. Support for encrypting Intermediate + data and spills in local filesystem. (asuresh via tucu) + + MAPREDUCE-6007. Add support to distcp to preserve raw.* namespace + extended attributes. (clamb) + + MAPREDUCE-6041. Fix TestOptionsParser. (clamb) + -- + + MAPREDUCE-6051. Fix typos in log messages. (Ray Chiang via cdouglas) + + MAPREDUCE-5931. Validate SleepJob command line parameters (Gera Shegalov + via jlowe) + Release 2.5.1 - UNRELEASED INCOMPATIBLE CHANGES @@ -273,7 +279,7 @@ Release 2.5.1 - UNRELEASED BUG FIXES - MAPREDUCE-6033. Updated access check for displaying job information + MAPREDUCE-6033. Updated access check for displaying job information (Yu Gao via Eric Yang) Release 2.5.0 - 2014-08-11 diff --git a/hadoop-mapreduce-project/LICENSE.txt b/hadoop-mapreduce-project/LICENSE.txt deleted file mode 100644 index 3721a43c310..00000000000 --- a/hadoop-mapreduce-project/LICENSE.txt +++ /dev/null @@ -1,341 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - -APACHE HADOOP SUBCOMPONENTS: - -The Apache Hadoop project contains subcomponents with separate copyright -notices and license terms. Your use of the source code for the these -subcomponents is subject to the terms and conditions of the following -licenses. - -For the org.apache.hadoop.util.bloom.* classes: - -/** - * - * Copyright (c) 2005, European Commission project OneLab under contract - * 034819 (http://www.one-lab.org) - * All rights reserved. - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the distribution. - * - Neither the name of the University Catholique de Louvain - UCL - * nor the names of its contributors may be used to endorse or - * promote products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -The binary distribution of this product bundles binaries of leveldbjni -(https://github.com/fusesource/leveldbjni), which is available under the -following license: - -Copyright (c) 2011 FuseSource Corp. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of FuseSource Corp. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -The binary distribution of this product bundles binaries of leveldb -(http://code.google.com/p/leveldb/), which is available under the following -license: - -Copyright (c) 2011 The LevelDB Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -The binary distribution of this product bundles binaries of snappy -(http://code.google.com/p/snappy/), which is available under the following -license: - -Copyright 2011, Google Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/hadoop-mapreduce-project/NOTICE.txt b/hadoop-mapreduce-project/NOTICE.txt deleted file mode 100644 index 62fc5816c99..00000000000 --- a/hadoop-mapreduce-project/NOTICE.txt +++ /dev/null @@ -1,2 +0,0 @@ -This product includes software developed by The Apache Software -Foundation (http://www.apache.org/). diff --git a/hadoop-mapreduce-project/bin/mapred b/hadoop-mapreduce-project/bin/mapred index cbfdc7e7d09..8f3063774f8 100755 --- a/hadoop-mapreduce-project/bin/mapred +++ b/hadoop-mapreduce-project/bin/mapred @@ -64,13 +64,15 @@ shift case ${COMMAND} in mradmin|jobtracker|tasktracker|groups) - echo "Sorry, the ${COMMAND} command is no longer supported." - echo "You may find similar functionality with the \"yarn\" shell command." + hadoop_error "Sorry, the ${COMMAND} command is no longer supported." + hadoop_error "You may find similar functionality with the \"yarn\" shell command." hadoop_exit_with_usage 1 ;; archive) CLASS=org.apache.hadoop.tools.HadoopArchives + hadoop_debug "Injecting TOOL_PATH into CLASSPATH" hadoop_add_classpath "${TOOL_PATH}" + hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS" HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}" ;; classpath) @@ -80,12 +82,15 @@ case ${COMMAND} in ;; distcp) CLASS=org.apache.hadoop.tools.DistCp + hadoop_debug "Injecting TOOL_PATH into CLASSPATH" hadoop_add_classpath "${TOOL_PATH}" + hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS" HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}" ;; historyserver) daemon="true" CLASS=org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer + hadoop_debug "Appending HADOOP_JOB_HISTORYSERVER_OPTS onto HADOOP_OPTS" HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_JOB_HISTORYSERVER_OPTS}" if [ -n "${HADOOP_JOB_HISTORYSERVER_HEAPSIZE}" ]; then JAVA_HEAP_MAX="-Xmx${HADOOP_JOB_HISTORYSERVER_HEAPSIZE}m" @@ -97,6 +102,7 @@ case ${COMMAND} in ;; pipes) CLASS=org.apache.hadoop.mapred.pipes.Submitter + hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS" HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}" ;; queue) @@ -104,10 +110,12 @@ case ${COMMAND} in ;; sampler) CLASS=org.apache.hadoop.mapred.lib.InputSampler + hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS" HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}" ;; version) CLASS=org.apache.hadoop.util.VersionInfo + hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS" HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}" ;; -*|*) @@ -130,8 +138,6 @@ fi hadoop_add_param HADOOP_OPTS Xmx "${JAVA_HEAP_MAX}" hadoop_finalize -export CLASSPATH - if [[ -n "${daemon}" ]]; then if [[ -n "${secure_service}" ]]; then hadoop_secure_daemon_handler "${HADOOP_DAEMON_MODE}" "${COMMAND}"\ diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java index 8c3be86cb11..d56c1e5aeb2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java @@ -202,7 +202,7 @@ public class CommitterEventHandler extends AbstractService private synchronized void cancelJobCommit() { Thread threadCommitting = jobCommitThread; if (threadCommitting != null && threadCommitting.isAlive()) { - LOG.info("Canceling commit"); + LOG.info("Cancelling commit"); threadCommitting.interrupt(); // wait up to configured timeout for commit thread to finish diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java index 4b32c045238..6e9f3138b4a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java @@ -335,7 +335,7 @@ public abstract class RMCommunicator extends AbstractService public void setSignalled(boolean isSignalled) { this.isSignalled = isSignalled; - LOG.info("RMCommunicator notified that iSignalled is: " + LOG.info("RMCommunicator notified that isSignalled is: " + isSignalled); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java index 34dcb1294d3..392a51aebd6 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java @@ -134,16 +134,16 @@ public class DefaultSpeculator extends AbstractService implements estimator.contextualize(conf, context); } catch (InstantiationException ex) { - LOG.error("Can't make a speculation runtime extimator", ex); + LOG.error("Can't make a speculation runtime estimator", ex); throw new YarnRuntimeException(ex); } catch (IllegalAccessException ex) { - LOG.error("Can't make a speculation runtime extimator", ex); + LOG.error("Can't make a speculation runtime estimator", ex); throw new YarnRuntimeException(ex); } catch (InvocationTargetException ex) { - LOG.error("Can't make a speculation runtime extimator", ex); + LOG.error("Can't make a speculation runtime estimator", ex); throw new YarnRuntimeException(ex); } catch (NoSuchMethodException ex) { - LOG.error("Can't make a speculation runtime extimator", ex); + LOG.error("Can't make a speculation runtime estimator", ex); throw new YarnRuntimeException(ex); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java index be7fe181f90..e79ec664a56 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java @@ -607,7 +607,7 @@ public class BackupStore { int reserve(int requestedSize, int minSize) { if (availableSize < minSize) { - LOG.debug("No Space available. Available: " + availableSize + + LOG.debug("No space available. Available: " + availableSize + " MinSize: " + minSize); return 0; } else { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/HadoopArchives.md.vm b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/HadoopArchives.md.vm index 0cc0f1c93aa..db0a25f7e4f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/HadoopArchives.md.vm +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/HadoopArchives.md.vm @@ -38,7 +38,7 @@ Overview How to Create an Archive ------------------------ - `Usage: hadoop archive -archiveName name -p * ` + `Usage: hadoop archive -archiveName name -p [-r ] * ` -archiveName is the name of the archive you would like to create. An example would be foo.har. The name should have a \*.har extension. The parent argument @@ -52,9 +52,12 @@ How to Create an Archive would need a map reduce cluster to run this. For a detailed example the later sections. + -r indicates the desired replication factor; if this optional argument is + not specified, a replication factor of 10 will be used. + If you just want to archive a single directory /foo/bar then you can just use - `hadoop archive -archiveName zoo.har -p /foo/bar /outputdir` + `hadoop archive -archiveName zoo.har -p /foo/bar -r 3 /outputdir` How to Look Up Files in Archives -------------------------------- @@ -90,14 +93,15 @@ Archives Examples $H3 Creating an Archive - `hadoop archive -archiveName foo.har -p /user/hadoop dir1 dir2 /user/zoo` + `hadoop archive -archiveName foo.har -p /user/hadoop -r 3 dir1 dir2 /user/zoo` The above example is creating an archive using /user/hadoop as the relative archive directory. The directories /user/hadoop/dir1 and /user/hadoop/dir2 will be archived in the following file system directory -- /user/zoo/foo.har. Archiving does not delete the input files. If you want to delete the input files after creating the archives (to reduce namespace), you will have to do - it on your own. + it on your own. In this example, because `-r 3` is specified, a replication + factor of 3 will be used. $H3 Looking Up Files diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/SleepJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/SleepJob.java deleted file mode 100644 index 40fab8ce0b7..00000000000 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/SleepJob.java +++ /dev/null @@ -1,275 +0,0 @@ -/** -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -package org.apache.hadoop; - -import java.io.IOException; -import java.io.DataInput; -import java.io.DataOutput; -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.Configured; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.io.IntWritable; -import org.apache.hadoop.io.NullWritable; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.mapreduce.InputFormat; -import org.apache.hadoop.mapreduce.InputSplit; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.JobContext; -import org.apache.hadoop.mapreduce.MRJobConfig; -import org.apache.hadoop.mapreduce.Mapper; -import org.apache.hadoop.mapreduce.Partitioner; -import org.apache.hadoop.mapreduce.RecordReader; -import org.apache.hadoop.mapreduce.Reducer; -import org.apache.hadoop.mapreduce.TaskAttemptContext; -import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; -import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; -import org.apache.hadoop.util.Tool; -import org.apache.hadoop.util.ToolRunner; - -/** - * Dummy class for testing MR framefork. Sleeps for a defined period - * of time in mapper and reducer. Generates fake input for map / reduce - * jobs. Note that generated number of input pairs is in the order - * of numMappers * mapSleepTime / 100, so the job uses - * some disk space. - */ -public class SleepJob extends Configured implements Tool { - public static String MAP_SLEEP_COUNT = "mapreduce.sleepjob.map.sleep.count"; - public static String REDUCE_SLEEP_COUNT = - "mapreduce.sleepjob.reduce.sleep.count"; - public static String MAP_SLEEP_TIME = "mapreduce.sleepjob.map.sleep.time"; - public static String REDUCE_SLEEP_TIME = - "mapreduce.sleepjob.reduce.sleep.time"; - - public static class SleepJobPartitioner extends - Partitioner { - public int getPartition(IntWritable k, NullWritable v, int numPartitions) { - return k.get() % numPartitions; - } - } - - public static class EmptySplit extends InputSplit implements Writable { - public void write(DataOutput out) throws IOException { } - public void readFields(DataInput in) throws IOException { } - public long getLength() { return 0L; } - public String[] getLocations() { return new String[0]; } - } - - public static class SleepInputFormat - extends InputFormat { - - public List getSplits(JobContext jobContext) { - List ret = new ArrayList(); - int numSplits = jobContext.getConfiguration(). - getInt(MRJobConfig.NUM_MAPS, 1); - for (int i = 0; i < numSplits; ++i) { - ret.add(new EmptySplit()); - } - return ret; - } - - public RecordReader createRecordReader( - InputSplit ignored, TaskAttemptContext taskContext) - throws IOException { - Configuration conf = taskContext.getConfiguration(); - final int count = conf.getInt(MAP_SLEEP_COUNT, 1); - if (count < 0) throw new IOException("Invalid map count: " + count); - final int redcount = conf.getInt(REDUCE_SLEEP_COUNT, 1); - if (redcount < 0) - throw new IOException("Invalid reduce count: " + redcount); - final int emitPerMapTask = (redcount * taskContext.getNumReduceTasks()); - - return new RecordReader() { - private int records = 0; - private int emitCount = 0; - private IntWritable key = null; - private IntWritable value = null; - public void initialize(InputSplit split, TaskAttemptContext context) { - } - - public boolean nextKeyValue() - throws IOException { - if (count == 0) { - return false; - } - key = new IntWritable(); - key.set(emitCount); - int emit = emitPerMapTask / count; - if ((emitPerMapTask) % count > records) { - ++emit; - } - emitCount += emit; - value = new IntWritable(); - value.set(emit); - return records++ < count; - } - public IntWritable getCurrentKey() { return key; } - public IntWritable getCurrentValue() { return value; } - public void close() throws IOException { } - public float getProgress() throws IOException { - return count == 0 ? 100 : records / ((float)count); - } - }; - } - } - - public static class SleepMapper - extends Mapper { - private long mapSleepDuration = 100; - private int mapSleepCount = 1; - private int count = 0; - - protected void setup(Context context) - throws IOException, InterruptedException { - Configuration conf = context.getConfiguration(); - this.mapSleepCount = - conf.getInt(MAP_SLEEP_COUNT, mapSleepCount); - this.mapSleepDuration = mapSleepCount == 0 ? 0 : - conf.getLong(MAP_SLEEP_TIME , 100) / mapSleepCount; - } - - public void map(IntWritable key, IntWritable value, Context context - ) throws IOException, InterruptedException { - //it is expected that every map processes mapSleepCount number of records. - try { - context.setStatus("Sleeping... (" + - (mapSleepDuration * (mapSleepCount - count)) + ") ms left"); - Thread.sleep(mapSleepDuration); - } - catch (InterruptedException ex) { - throw (IOException)new IOException( - "Interrupted while sleeping").initCause(ex); - } - ++count; - // output reduceSleepCount * numReduce number of random values, so that - // each reducer will get reduceSleepCount number of keys. - int k = key.get(); - for (int i = 0; i < value.get(); ++i) { - context.write(new IntWritable(k + i), NullWritable.get()); - } - } - } - - public static class SleepReducer - extends Reducer { - private long reduceSleepDuration = 100; - private int reduceSleepCount = 1; - private int count = 0; - - protected void setup(Context context) - throws IOException, InterruptedException { - Configuration conf = context.getConfiguration(); - this.reduceSleepCount = - conf.getInt(REDUCE_SLEEP_COUNT, reduceSleepCount); - this.reduceSleepDuration = reduceSleepCount == 0 ? 0 : - conf.getLong(REDUCE_SLEEP_TIME , 100) / reduceSleepCount; - } - - public void reduce(IntWritable key, Iterable values, - Context context) - throws IOException { - try { - context.setStatus("Sleeping... (" + - (reduceSleepDuration * (reduceSleepCount - count)) + ") ms left"); - Thread.sleep(reduceSleepDuration); - - } - catch (InterruptedException ex) { - throw (IOException)new IOException( - "Interrupted while sleeping").initCause(ex); - } - count++; - } - } - - public static void main(String[] args) throws Exception { - int res = ToolRunner.run(new Configuration(), new SleepJob(), args); - System.exit(res); - } - - public Job createJob(int numMapper, int numReducer, - long mapSleepTime, int mapSleepCount, - long reduceSleepTime, int reduceSleepCount) - throws IOException { - Configuration conf = getConf(); - conf.setLong(MAP_SLEEP_TIME, mapSleepTime); - conf.setLong(REDUCE_SLEEP_TIME, reduceSleepTime); - conf.setInt(MAP_SLEEP_COUNT, mapSleepCount); - conf.setInt(REDUCE_SLEEP_COUNT, reduceSleepCount); - conf.setInt(MRJobConfig.NUM_MAPS, numMapper); - Job job = Job.getInstance(conf, "sleep"); - job.setNumReduceTasks(numReducer); - job.setJarByClass(SleepJob.class); - job.setMapperClass(SleepMapper.class); - job.setMapOutputKeyClass(IntWritable.class); - job.setMapOutputValueClass(NullWritable.class); - job.setReducerClass(SleepReducer.class); - job.setOutputFormatClass(NullOutputFormat.class); - job.setInputFormatClass(SleepInputFormat.class); - job.setPartitionerClass(SleepJobPartitioner.class); - job.setSpeculativeExecution(false); - job.setJobName("Sleep job"); - FileInputFormat.addInputPath(job, new Path("ignored")); - return job; - } - - public int run(String[] args) throws Exception { - - if(args.length < 1) { - System.err.println("SleepJob [-m numMapper] [-r numReducer]" + - " [-mt mapSleepTime (msec)] [-rt reduceSleepTime (msec)]" + - " [-recordt recordSleepTime (msec)]"); - ToolRunner.printGenericCommandUsage(System.err); - return 2; - } - - int numMapper = 1, numReducer = 1; - long mapSleepTime = 100, reduceSleepTime = 100, recSleepTime = 100; - int mapSleepCount = 1, reduceSleepCount = 1; - - for(int i=0; i < args.length; i++ ) { - if(args[i].equals("-m")) { - numMapper = Integer.parseInt(args[++i]); - } - else if(args[i].equals("-r")) { - numReducer = Integer.parseInt(args[++i]); - } - else if(args[i].equals("-mt")) { - mapSleepTime = Long.parseLong(args[++i]); - } - else if(args[i].equals("-rt")) { - reduceSleepTime = Long.parseLong(args[++i]); - } - else if (args[i].equals("-recordt")) { - recSleepTime = Long.parseLong(args[++i]); - } - } - - // sleep for *SleepTime duration in Task by recSleepTime per record - mapSleepCount = (int)Math.ceil(mapSleepTime / ((double)recSleepTime)); - reduceSleepCount = (int)Math.ceil(reduceSleepTime / ((double)recSleepTime)); - Job job = createJob(numMapper, numReducer, mapSleepTime, - mapSleepCount, reduceSleepTime, reduceSleepCount); - return job.waitForCompletion(true) ? 0 : 1; - } - -} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java index 347dd066a98..8b3f4c895e1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java @@ -1076,7 +1076,7 @@ public class JHLogAnalyzer { long execTime, Path resFileName ) throws IOException { - LOG.info("Analizing results ..."); + LOG.info("Analyzing results ..."); DataOutputStream out = null; BufferedWriter writer = null; try { @@ -1118,7 +1118,7 @@ public class JHLogAnalyzer { if(writer != null) writer.close(); if(out != null) out.close(); } - LOG.info("Analizing results ... done."); + LOG.info("Analyzing results ... done."); } private static void cleanup(Configuration conf) throws IOException { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SleepJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SleepJob.java index 97b76369c74..2b321833566 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SleepJob.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SleepJob.java @@ -224,11 +224,7 @@ public class SleepJob extends Configured implements Tool { public int run(String[] args) throws Exception { if(args.length < 1) { - System.err.println("SleepJob [-m numMapper] [-r numReducer]" + - " [-mt mapSleepTime (msec)] [-rt reduceSleepTime (msec)]" + - " [-recordt recordSleepTime (msec)]"); - ToolRunner.printGenericCommandUsage(System.err); - return 2; + return printUsage("number of arguments must be > 0"); } int numMapper = 1, numReducer = 1; @@ -238,18 +234,34 @@ public class SleepJob extends Configured implements Tool { for(int i=0; i < args.length; i++ ) { if(args[i].equals("-m")) { numMapper = Integer.parseInt(args[++i]); + if (numMapper < 0) { + return printUsage(numMapper + ": numMapper must be >= 0"); + } } else if(args[i].equals("-r")) { numReducer = Integer.parseInt(args[++i]); + if (numReducer < 0) { + return printUsage(numReducer + ": numReducer must be >= 0"); + } } else if(args[i].equals("-mt")) { mapSleepTime = Long.parseLong(args[++i]); + if (mapSleepTime < 0) { + return printUsage(mapSleepTime + ": mapSleepTime must be >= 0"); + } } else if(args[i].equals("-rt")) { reduceSleepTime = Long.parseLong(args[++i]); + if (reduceSleepTime < 0) { + return printUsage( + reduceSleepTime + ": reduceSleepTime must be >= 0"); + } } else if (args[i].equals("-recordt")) { recSleepTime = Long.parseLong(args[++i]); + if (recSleepTime < 0) { + return printUsage(recSleepTime + ": recordSleepTime must be >= 0"); + } } } @@ -261,4 +273,14 @@ public class SleepJob extends Configured implements Tool { return job.waitForCompletion(true) ? 0 : 1; } + private int printUsage(String error) { + if (error != null) { + System.err.println("ERROR: " + error); + } + System.err.println("SleepJob [-m numMapper] [-r numReducer]" + + " [-mt mapSleepTime (msec)] [-rt reduceSleepTime (msec)]" + + " [-recordt recordSleepTime (msec)]"); + ToolRunner.printGenericCommandUsage(System.err); + return 2; + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRAMWithNonNormalizedCapabilities.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRAMWithNonNormalizedCapabilities.java index dcd59acb5a0..7aaaa1b8d93 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRAMWithNonNormalizedCapabilities.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRAMWithNonNormalizedCapabilities.java @@ -25,7 +25,7 @@ import org.junit.Assert; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.SleepJob; +import org.apache.hadoop.mapreduce.SleepJob; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java index 32153996c8d..5699600acc0 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java @@ -40,8 +40,8 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.FailingMapper; import org.apache.hadoop.RandomTextWriterJob; import org.apache.hadoop.RandomTextWriterJob.RandomInputFormat; -import org.apache.hadoop.SleepJob; -import org.apache.hadoop.SleepJob.SleepMapper; +import org.apache.hadoop.mapreduce.SleepJob; +import org.apache.hadoop.mapreduce.SleepJob.SleepMapper; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataOutputStream; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java index b4581e65cb1..9fba91dbb1a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java @@ -28,7 +28,7 @@ import org.junit.Assert; import org.apache.avro.AvroRemoteException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.SleepJob; +import org.apache.hadoop.mapreduce.SleepJob; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithProfiler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithProfiler.java index e91f5c98071..df55f509d9e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithProfiler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithProfiler.java @@ -29,7 +29,7 @@ import org.junit.Assert; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.SleepJob; +import org.apache.hadoop.mapreduce.SleepJob; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.*; import org.apache.hadoop.fs.permission.FsPermission; diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index be5b3d51b59..76448cfcadc 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -61,12 +61,17 @@ 1.9 + + 1.9.13 + 2.5.0 ${env.HADOOP_PROTOC_PATH} 3.4.6 + + 6.0.41 @@ -398,6 +403,16 @@ jetty-util 6.1.26 + + org.apache.tomcat.embed + tomcat-embed-core + 7.0.55 + + + org.apache.tomcat.embed + tomcat-embed-logging-juli + 7.0.55 + javax.servlet.jsp jsp-api @@ -557,6 +572,11 @@ jets3t 0.9.0 + + com.amazonaws + aws-java-sdk + 1.7.2 + org.apache.mina mina-core @@ -625,22 +645,22 @@ org.codehaus.jackson jackson-mapper-asl - 1.9.13 + ${jackson.version} org.codehaus.jackson jackson-core-asl - 1.9.13 + ${jackson.version} org.codehaus.jackson jackson-jaxrs - 1.9.13 + ${jackson.version} org.codehaus.jackson jackson-xc - 1.9.13 + ${jackson.version} org.mockito @@ -677,6 +697,11 @@ jsch 0.1.42 + + org.htrace + htrace-core + 3.0.4 + org.jdom jdom diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml index 56288ee60ca..a42aff0a382 100644 --- a/hadoop-project/src/site/site.xml +++ b/hadoop-project/src/site/site.xml @@ -65,6 +65,7 @@ +

    diff --git a/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java index 93994b817a4..e53576d7d8f 100644 --- a/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java +++ b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java @@ -97,9 +97,12 @@ public class HadoopArchives implements Tool { long partSize = 2 * 1024 * 1024 * 1024l; /** size of blocks in hadoop archives **/ long blockSize = 512 * 1024 * 1024l; + /** the desired replication degree; default is 10 **/ + short repl = 10; private static final String usage = "archive" - + " -archiveName NAME -p * " + + + " -archiveName NAME -p [-r ]" + + "* " + "\n"; @@ -542,7 +545,7 @@ public class HadoopArchives implements Tool { srcWriter.close(); } //increase the replication of src files - jobfs.setReplication(srcFiles, (short) 10); + jobfs.setReplication(srcFiles, repl); conf.setInt(SRC_COUNT_LABEL, numFiles); conf.setLong(TOTAL_SIZE_LABEL, totalSize); int numMaps = (int)(totalSize/partSize); @@ -835,6 +838,11 @@ public class HadoopArchives implements Tool { } i+=2; + + if ("-r".equals(args[i])) { + repl = Short.parseShort(args[i+1]); + i+=2; + } //read the rest of the paths for (; i < args.length; i++) { if (i == (args.length - 1)) { diff --git a/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java b/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java index 65bbbe451bf..e7eef3f9666 100644 --- a/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java +++ b/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java @@ -157,6 +157,24 @@ public class TestHadoopArchives { final List harPaths = lsr(shell, fullHarPathStr); Assert.assertEquals(originalPaths, harPaths); } + + @Test + public void testRelativePathWitRepl() throws Exception { + final Path sub1 = new Path(inputPath, "dir1"); + fs.mkdirs(sub1); + createFile(inputPath, fs, sub1.getName(), "a"); + final FsShell shell = new FsShell(conf); + + final List originalPaths = lsr(shell, "input"); + System.out.println("originalPaths: " + originalPaths); + + // make the archive: + final String fullHarPathStr = makeArchiveWithRepl(); + + // compare results: + final List harPaths = lsr(shell, fullHarPathStr); + Assert.assertEquals(originalPaths, harPaths); + } @Test public void testPathWithSpaces() throws Exception { @@ -625,6 +643,29 @@ public class TestHadoopArchives { assertEquals(0, ToolRunner.run(har, args)); return fullHarPathStr; } + + /* + * Run the HadoopArchives tool to create an archive on the + * given file system with a specified replication degree. + */ + private String makeArchiveWithRepl() throws Exception { + final String inputPathStr = inputPath.toUri().getPath(); + System.out.println("inputPathStr = " + inputPathStr); + + final URI uri = fs.getUri(); + final String prefix = "har://hdfs-" + uri.getHost() + ":" + uri.getPort() + + archivePath.toUri().getPath() + Path.SEPARATOR; + + final String harName = "foo.har"; + final String fullHarPathStr = prefix + harName; + final String[] args = { "-archiveName", harName, "-p", inputPathStr, + "-r 3", "*", archivePath.toString() }; + System.setProperty(HadoopArchives.TEST_HADOOP_ARCHIVES_JAR_PATH, + HADOOP_ARCHIVES_JAR); + final HadoopArchives har = new HadoopArchives(conf); + assertEquals(0, ToolRunner.run(har, args)); + return fullHarPathStr; + } @Test /* diff --git a/hadoop-tools/hadoop-aws/dev-support/findbugs-exclude.xml b/hadoop-tools/hadoop-aws/dev-support/findbugs-exclude.xml new file mode 100644 index 00000000000..74e4923bf74 --- /dev/null +++ b/hadoop-tools/hadoop-aws/dev-support/findbugs-exclude.xml @@ -0,0 +1,19 @@ + + + + diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml new file mode 100644 index 00000000000..c01a33ddd41 --- /dev/null +++ b/hadoop-tools/hadoop-aws/pom.xml @@ -0,0 +1,116 @@ + + + + 4.0.0 + + org.apache.hadoop + hadoop-project + 3.0.0-SNAPSHOT + ../../hadoop-project + + hadoop-aws + 3.0.0-SNAPSHOT + Apache Hadoop Amazon Web Services support + + This module contains code to support integration with Amazon Web Services. + It also declares the dependencies needed to work with AWS services. + + jar + + + UTF-8 + true + + + + + tests-off + + + src/test/resources/auth-keys.xml + + + + true + + + + tests-on + + + src/test/resources/auth-keys.xml + + + + false + + + + + + + + + org.codehaus.mojo + findbugs-maven-plugin + + true + true + ${basedir}/dev-support/findbugs-exclude.xml + + Max + + + + org.apache.maven.plugins + maven-project-info-reports-plugin + + false + false + + + + + + + + org.apache.hadoop + hadoop-common + compile + + + + org.apache.hadoop + hadoop-common + compile + test-jar + + + + com.amazonaws + aws-java-sdk + compile + + + + junit + junit + test + + + + diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java index 1d573822d9b..029fa877f0b 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java @@ -32,7 +32,6 @@ import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.Resource; -import org.apache.hadoop.yarn.api.records.ResourceOption; import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode @@ -55,7 +54,7 @@ public class NodeInfo { private String nodeAddr; private String httpAddress; private int cmdPort; - private volatile ResourceOption perNode; + private volatile Resource perNode; private String rackName; private String healthReport; private NodeState state; @@ -63,7 +62,7 @@ public class NodeInfo { private List toCleanUpApplications; public FakeRMNodeImpl(NodeId nodeId, String nodeAddr, String httpAddress, - ResourceOption perNode, String rackName, String healthReport, + Resource perNode, String rackName, String healthReport, int cmdPort, String hostName, NodeState state) { this.nodeId = nodeId; this.nodeAddr = nodeAddr; @@ -111,10 +110,6 @@ public class NodeInfo { } public Resource getTotalCapability() { - return perNode.getResource(); - } - - public ResourceOption getResourceOption() { return perNode; } @@ -159,32 +154,26 @@ public class NodeInfo { return list; } - @Override - public String getNodeManagerVersion() { - // TODO Auto-generated method stub - return null; - } - @Override - public void setResourceOption(ResourceOption resourceOption) { - perNode = resourceOption; + public String getNodeManagerVersion() { + return null; } + } public static RMNode newNodeInfo(String rackName, String hostName, - final ResourceOption resourceOption, int port) { + final Resource resource, int port) { final NodeId nodeId = newNodeID(hostName, port); final String nodeAddr = hostName + ":" + port; final String httpAddress = hostName; return new FakeRMNodeImpl(nodeId, nodeAddr, httpAddress, - resourceOption, rackName, "Me good", + resource, rackName, "Me good", port, hostName, null); } public static RMNode newNodeInfo(String rackName, String hostName, final Resource resource) { - return newNodeInfo(rackName, hostName, ResourceOption.newInstance(resource, - RMNode.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT), NODE_ID++); + return newNodeInfo(rackName, hostName, resource, NODE_ID++); } } diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java index da9b56fd546..7eca66fb779 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java @@ -26,7 +26,6 @@ import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.Resource; -import org.apache.hadoop.yarn.api.records.ResourceOption; import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode @@ -148,14 +147,4 @@ public class RMNodeWrapper implements RMNode { return node.getNodeManagerVersion(); } - @Override - public void setResourceOption(ResourceOption resourceOption) { - node.setResourceOption(resourceOption); - } - - @Override - public ResourceOption getResourceOption() { - return node.getResourceOption(); - } - } diff --git a/hadoop-tools/pom.xml b/hadoop-tools/pom.xml index bed0f9b6a97..25f06e04051 100644 --- a/hadoop-tools/pom.xml +++ b/hadoop-tools/pom.xml @@ -43,6 +43,7 @@ hadoop-openstack hadoop-sls hadoop-azure + hadoop-aws diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 871829ac369..64ccd288d57 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -58,6 +58,12 @@ Release 2.6.0 - UNRELEASED YARN-2393. FairScheduler: Add the notion of steady fair share. (Wei Yan via kasha) + YARN-2395. FairScheduler: Preemption timeout should be configurable per + queue. (Wei Yan via kasha) + + YARN-2394. FairScheduler: Configure fairSharePreemptionThreshold per queue. + (Wei Yan via kasha) + IMPROVEMENTS YARN-2197. Add a link to YARN CHANGES.txt in the left side of doc @@ -160,6 +166,15 @@ Release 2.6.0 - UNRELEASED YARN-2182. Updated ContainerId#toString() to append RM Epoch number. (Tsuyoshi OZAWA via jianhe) + YARN-2406. Move RM recovery related proto to + yarn_server_resourcemanager_recovery.proto. (Tsuyoshi Ozawa via jianhe) + + YARN-2360. Fair Scheduler: Display dynamic fair share for queues on the + scheduler page. (Ashwin Shankar and Wei Yan via kasha) + + YARN-1506. Changed RMNode/SchedulerNode to update resource with event + notification. (Junping Du via jianhe) + OPTIMIZATIONS BUG FIXES @@ -246,6 +261,20 @@ Release 2.6.0 - UNRELEASED YARN-2035. FileSystemApplicationHistoryStore should not make working dir when it already exists. (Jonathan Eagles via zjshen) + YARN-2405. NPE in FairSchedulerAppsBlock. (Tsuyoshi Ozawa via kasha) + + YARN-2449. Fixed the bug that TimelineAuthenticationFilterInitializer + is not automatically added when hadoop.http.filter.initializers is not + configured. (Varun Vasudev via zjshen) + + YARN-2450. Fix typos in log messages. (Ray Chiang via hitesh) + + YARN-2447. RM web service app submission doesn't pass secrets correctly. + (Varun Vasudev via jianhe) + + YARN-2462. TestNodeManagerResync#testBlockNewContainerRequestsOnStartAndResync + should have a test timeout (Eric Payne via jlowe) + Release 2.5.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/LICENSE.txt b/hadoop-yarn-project/LICENSE.txt deleted file mode 100644 index 3721a43c310..00000000000 --- a/hadoop-yarn-project/LICENSE.txt +++ /dev/null @@ -1,341 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - -APACHE HADOOP SUBCOMPONENTS: - -The Apache Hadoop project contains subcomponents with separate copyright -notices and license terms. Your use of the source code for the these -subcomponents is subject to the terms and conditions of the following -licenses. - -For the org.apache.hadoop.util.bloom.* classes: - -/** - * - * Copyright (c) 2005, European Commission project OneLab under contract - * 034819 (http://www.one-lab.org) - * All rights reserved. - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the distribution. - * - Neither the name of the University Catholique de Louvain - UCL - * nor the names of its contributors may be used to endorse or - * promote products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -The binary distribution of this product bundles binaries of leveldbjni -(https://github.com/fusesource/leveldbjni), which is available under the -following license: - -Copyright (c) 2011 FuseSource Corp. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of FuseSource Corp. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -The binary distribution of this product bundles binaries of leveldb -(http://code.google.com/p/leveldb/), which is available under the following -license: - -Copyright (c) 2011 The LevelDB Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -The binary distribution of this product bundles binaries of snappy -(http://code.google.com/p/snappy/), which is available under the following -license: - -Copyright 2011, Google Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/hadoop-yarn-project/NOTICE.txt b/hadoop-yarn-project/NOTICE.txt deleted file mode 100644 index efa865f5bda..00000000000 --- a/hadoop-yarn-project/NOTICE.txt +++ /dev/null @@ -1,17 +0,0 @@ -This product includes software developed by The Apache Software -Foundation (http://www.apache.org/). - -The binary distribution of this product bundles binaries of -org.iq80.leveldb:leveldb-api (https://github.com/dain/leveldb), which has the -following notices: -* Copyright 2011 Dain Sundstrom -* Copyright 2011 FuseSource Corp. http://fusesource.com - -The binary distribution of this product bundles binaries of -org.fusesource.hawtjni:hawtjni-runtime (https://github.com/fusesource/hawtjni), -which has the following notices: -* This product includes software developed by FuseSource Corp. - http://fusesource.com -* This product includes software developed at - Progress Software Corporation and/or its subsidiaries or affiliates. -* This product includes software developed by IBM Corporation and others. diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn b/hadoop-yarn-project/hadoop-yarn/bin/yarn index dfef8112f05..371d23d712d 100644 --- a/hadoop-yarn-project/hadoop-yarn/bin/yarn +++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn @@ -72,6 +72,7 @@ shift case "${COMMAND}" in application|applicationattempt|container) CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI + hadoop_debug "Append YARN_CLIENT_OPTS onto YARN_OPTS" YARN_OPTS="${YARN_OPTS} ${YARN_CLIENT_OPTS}" set -- "${COMMAND}" "$@" ;; @@ -82,10 +83,12 @@ case "${COMMAND}" in ;; daemonlog) CLASS=org.apache.hadoop.log.LogLevel + hadoop_debug "Append YARN_CLIENT_OPTS onto YARN_OPTS" YARN_OPTS="${YARN_OPTS} ${YARN_CLIENT_OPTS}" ;; jar) CLASS=org.apache.hadoop.util.RunJar + hadoop_debug "Append YARN_CLIENT_OPTS onto YARN_OPTS" YARN_OPTS="${YARN_OPTS} ${YARN_CLIENT_OPTS}" ;; historyserver) @@ -97,15 +100,18 @@ case "${COMMAND}" in ;; logs) CLASS=org.apache.hadoop.yarn.logaggregation.LogDumper + hadoop_debug "Append YARN_CLIENT_OPTS onto YARN_OPTS" YARN_OPTS="${YARN_OPTS} ${YARN_CLIENT_OPTS}" ;; node) CLASS=org.apache.hadoop.yarn.client.cli.NodeCLI + hadoop_debug "Append YARN_CLIENT_OPTS onto YARN_OPTS" YARN_OPTS="${YARN_OPTS} ${YARN_CLIENT_OPTS}" ;; nodemanager) daemon="true" CLASS='org.apache.hadoop.yarn.server.nodemanager.NodeManager' + hadoop_debug "Append YARN_NODEMANAGER_OPTS onto YARN_OPTS" YARN_OPTS="${YARN_OPTS} ${YARN_NODEMANAGER_OPTS}" if [[ -n "${YARN_NODEMANAGER_HEAPSIZE}" ]]; then JAVA_HEAP_MAX="-Xmx${YARN_NODEMANAGER_HEAPSIZE}m" @@ -114,6 +120,7 @@ case "${COMMAND}" in proxyserver) daemon="true" CLASS='org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer' + hadoop_debug "Append YARN_PROXYSERVER_OPTS onto YARN_OPTS" YARN_OPTS="${YARN_OPTS} ${YARN_PROXYSERVER_OPTS}" if [[ -n "${YARN_PROXYSERVER_HEAPSIZE}" ]]; then JAVA_HEAP_MAX="-Xmx${YARN_PROXYSERVER_HEAPSIZE}m" @@ -123,17 +130,20 @@ case "${COMMAND}" in daemon="true" CLASS='org.apache.hadoop.yarn.server.resourcemanager.ResourceManager' YARN_OPTS="${YARN_OPTS} ${YARN_RESOURCEMANAGER_OPTS}" + hadoop_debug "Append YARN_RESOURCEMANAGER_OPTS onto YARN_OPTS" if [[ -n "${YARN_RESOURCEMANAGER_HEAPSIZE}" ]]; then JAVA_HEAP_MAX="-Xmx${YARN_RESOURCEMANAGER_HEAPSIZE}m" fi ;; rmadmin) CLASS='org.apache.hadoop.yarn.client.cli.RMAdminCLI' + hadoop_debug "Append YARN_CLIENT_OPTS onto YARN_OPTS" YARN_OPTS="${YARN_OPTS} ${YARN_CLIENT_OPTS}" ;; timelineserver) daemon="true" CLASS='org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer' + hadoop_debug "Append YARN_TIMELINESERVER_OPTS onto YARN_OPTS" YARN_OPTS="${YARN_OPTS} ${YARN_TIMELINESERVER_OPTS}" if [[ -n "${YARN_TIMELINESERVER_HEAPSIZE}" ]]; then JAVA_HEAP_MAX="-Xmx${YARN_TIMELINESERVER_HEAPSIZE}m" @@ -141,6 +151,7 @@ case "${COMMAND}" in ;; version) CLASS=org.apache.hadoop.util.VersionInfo + hadoop_debug "Append YARN_CLIENT_OPTS onto YARN_OPTS" YARN_OPTS="${YARN_OPTS} ${YARN_CLIENT_OPTS}" ;; -*) @@ -153,6 +164,8 @@ esac # set HADOOP_OPTS to YARN_OPTS so that we can use # finalize, etc, without doing anything funky +hadoop_debug "Resetting HADOOP_OPTS=YARN_OPTS" +# shellcheck disable=SC2034 HADOOP_OPTS="${YARN_OPTS}" daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.out" @@ -180,8 +193,6 @@ hadoop_add_param HADOOP_OPTS yarn.root.logger "-Dyarn.root.logger=${YARN_ROOT_LO hadoop_finalize -export CLASSPATH - if [[ -n "${daemon}" ]]; then if [[ -n "${secure_service}" ]]; then hadoop_secure_daemon_handler "${HADOOP_DAEMON_MODE}" "${COMMAND}" \ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceOption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceOption.java index 380f38d74a1..1ca90ccedf9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceOption.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceOption.java @@ -62,6 +62,8 @@ public abstract class ResourceOption { @Evolving protected abstract void setOverCommitTimeout(int overCommitTimeout); + @Private + @Evolving protected abstract void build(); @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/UpdateNodeResourceResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/UpdateNodeResourceResponse.java index 5155101d244..8603ea31c9e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/UpdateNodeResourceResponse.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/UpdateNodeResourceResponse.java @@ -17,9 +17,10 @@ */ package org.apache.hadoop.yarn.server.api.protocolrecords; -import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol; +import org.apache.hadoop.yarn.util.Records; /** *

    The response sent by the ResourceManager to Admin client on @@ -30,8 +31,13 @@ import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol; * @see ResourceManagerAdministrationProtocol#updateNodeResource( * UpdateNodeResourceRequest) */ -@Public +@Private @Evolving -public interface UpdateNodeResourceResponse { +public abstract class UpdateNodeResourceResponse { + public static UpdateNodeResourceResponse newInstance(){ + UpdateNodeResourceResponse response = + Records.newRecord(UpdateNodeResourceResponse.class); + return response; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto index 08c937f68d0..4637f0348b6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto @@ -75,64 +75,6 @@ message UpdateNodeResourceRequestProto { message UpdateNodeResourceResponseProto { } -//////////////////////////////////////////////////////////////////////// -////// RM recovery related records ///////////////////////////////////// -//////////////////////////////////////////////////////////////////////// -enum RMAppAttemptStateProto { - RMATTEMPT_NEW = 1; - RMATTEMPT_SUBMITTED = 2; - RMATTEMPT_SCHEDULED = 3; - RMATTEMPT_ALLOCATED = 4; - RMATTEMPT_LAUNCHED = 5; - RMATTEMPT_FAILED = 6; - RMATTEMPT_RUNNING = 7; - RMATTEMPT_FINISHING = 8; - RMATTEMPT_FINISHED = 9; - RMATTEMPT_KILLED = 10; - RMATTEMPT_ALLOCATED_SAVING = 11; - RMATTEMPT_LAUNCHED_UNMANAGED_SAVING = 12; - RMATTEMPT_RECOVERED = 13; - RMATTEMPT_FINAL_SAVING = 14; -} - -enum RMAppStateProto { - RMAPP_NEW = 1; - RMAPP_NEW_SAVING = 2; - RMAPP_SUBMITTED = 3; - RMAPP_ACCEPTED = 4; - RMAPP_RUNNING = 5; - RMAPP_FINAL_SAVING = 6; - RMAPP_FINISHING = 7; - RMAPP_FINISHED = 8; - RMAPP_FAILED = 9; - RMAPP_KILLED = 10; -} - -message ApplicationStateDataProto { - optional int64 submit_time = 1; - optional ApplicationSubmissionContextProto application_submission_context = 2; - optional string user = 3; - optional int64 start_time = 4; - optional RMAppStateProto application_state = 5; - optional string diagnostics = 6 [default = "N/A"]; - optional int64 finish_time = 7; -} - -message ApplicationAttemptStateDataProto { - optional ApplicationAttemptIdProto attemptId = 1; - optional ContainerProto master_container = 2; - optional bytes app_attempt_tokens = 3; - optional RMAppAttemptStateProto app_attempt_state = 4; - optional string final_tracking_url = 5; - optional string diagnostics = 6 [default = "N/A"]; - optional int64 start_time = 7; - optional FinalApplicationStatusProto final_application_status = 8; - optional int32 am_container_exit_status = 9 [default = -1000]; -} - -message EpochProto { - optional int64 epoch = 1; -} ////////////////////////////////////////////////////////////////// ///////////// RM Failover related records //////////////////////// diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java index 4a842458691..2451030af2c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java @@ -519,7 +519,7 @@ public class ApplicationMaster { publishApplicationAttemptEvent(timelineClient, appAttemptID.toString(), DSEvent.DS_APP_ATTEMPT_START); } catch (Exception e) { - LOG.error("App Attempt start event coud not be pulished for " + LOG.error("App Attempt start event could not be published for " + appAttemptID.toString(), e); } @@ -616,7 +616,7 @@ public class ApplicationMaster { publishApplicationAttemptEvent(timelineClient, appAttemptID.toString(), DSEvent.DS_APP_ATTEMPT_END); } catch (Exception e) { - LOG.error("App Attempt start event coud not be pulished for " + LOG.error("App Attempt start event could not be published for " + appAttemptID.toString(), e); } } @@ -726,7 +726,7 @@ public class ApplicationMaster { try { publishContainerEndEvent(timelineClient, containerStatus); } catch (Exception e) { - LOG.error("Container start event could not be pulished for " + LOG.error("Container start event could not be published for " + containerStatus.getContainerId().toString(), e); } } @@ -847,7 +847,7 @@ public class ApplicationMaster { ApplicationMaster.publishContainerStartEvent( applicationMaster.timelineClient, container); } catch (Exception e) { - LOG.error("Container start event coud not be pulished for " + LOG.error("Container start event could not be published for " + container.getId().toString(), e); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java index 05fd883be93..a86b52132eb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java @@ -197,7 +197,7 @@ public class Client { } result = client.run(); } catch (Throwable t) { - LOG.fatal("Error running CLient", t); + LOG.fatal("Error running Client", t); System.exit(1); } if (result) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceOptionPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceOptionPBImpl.java index 79f479ee99d..5a4a44e648a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceOptionPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceOptionPBImpl.java @@ -22,14 +22,15 @@ import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceOption; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceOptionProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ResourceOptionProtoOrBuilder; import com.google.common.base.Preconditions; public class ResourceOptionPBImpl extends ResourceOption { - ResourceOptionProto proto = null; + ResourceOptionProto proto = ResourceOptionProto.getDefaultInstance(); ResourceOptionProto.Builder builder = null; - private Resource resource = null; + boolean viaProto = false; public ResourceOptionPBImpl() { builder = ResourceOptionProto.newBuilder(); @@ -37,39 +38,46 @@ public class ResourceOptionPBImpl extends ResourceOption { public ResourceOptionPBImpl(ResourceOptionProto proto) { this.proto = proto; - this.resource = convertFromProtoFormat(proto.getResource()); + viaProto = true; } public ResourceOptionProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; return proto; } @Override public Resource getResource() { - return this.resource; + ResourceOptionProtoOrBuilder p = viaProto ? proto : builder; + return convertFromProtoFormat(p.getResource()); } @Override protected void setResource(Resource resource) { - if (resource != null) { - Preconditions.checkNotNull(builder); - builder.setResource(convertToProtoFormat(resource)); - } - this.resource = resource; + maybeInitBuilder(); + builder.setResource(convertToProtoFormat(resource)); } @Override public int getOverCommitTimeout() { - Preconditions.checkNotNull(proto); - return proto.getOverCommitTimeout(); + ResourceOptionProtoOrBuilder p = viaProto ? proto : builder; + return p.getOverCommitTimeout(); } @Override protected void setOverCommitTimeout(int overCommitTimeout) { - Preconditions.checkNotNull(builder); + maybeInitBuilder(); builder.setOverCommitTimeout(overCommitTimeout); } + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = ResourceOptionProto.newBuilder(proto); + } + viaProto = false; + } + private ResourceProto convertToProtoFormat( Resource resource) { return ((ResourcePBImpl)resource).getProto(); @@ -83,6 +91,7 @@ public class ResourceOptionPBImpl extends ResourceOption { @Override protected void build() { proto = builder.build(); + viaProto = true; builder = null; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UpdateNodeResourceResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UpdateNodeResourceResponsePBImpl.java index f314f861b65..3e2aca559ea 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UpdateNodeResourceResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UpdateNodeResourceResponsePBImpl.java @@ -20,7 +20,7 @@ package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceResponseProto; import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse; -public class UpdateNodeResourceResponsePBImpl implements UpdateNodeResourceResponse { +public class UpdateNodeResourceResponsePBImpl extends UpdateNodeResourceResponse { UpdateNodeResourceResponseProto proto = UpdateNodeResourceResponseProto.getDefaultInstance(); UpdateNodeResourceResponseProto.Builder builder = null; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java index c61b80e1993..6ec0d4221f0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java @@ -197,6 +197,7 @@ public class ApplicationHistoryServer extends CompositeService { // the customized filter will be loaded by the timeline server to do Kerberos // + DT authentication. String initializers = conf.get("hadoop.http.filter.initializers"); + boolean modifiedInitialiers = false; initializers = initializers == null || initializers.length() == 0 ? "" : initializers; @@ -206,6 +207,7 @@ public class ApplicationHistoryServer extends CompositeService { initializers = TimelineAuthenticationFilterInitializer.class.getName() + "," + initializers; + modifiedInitialiers = true; } String[] parts = initializers.split(","); @@ -214,13 +216,14 @@ public class ApplicationHistoryServer extends CompositeService { filterInitializer = filterInitializer.trim(); if (filterInitializer.equals(AuthenticationFilterInitializer.class .getName())) { + modifiedInitialiers = true; continue; } target.add(filterInitializer); } String actualInitializers = org.apache.commons.lang.StringUtils.join(target, ","); - if (!actualInitializers.equals(initializers)) { + if (modifiedInitialiers) { conf.set("hadoop.http.filter.initializers", actualInitializers); } String bindAddress = WebAppUtils.getWebAppBindURL(conf, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java index bcd8e454c5e..807d2df3c8f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java @@ -23,6 +23,7 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.fail; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.http.lib.StaticUserWebFilter; import org.apache.hadoop.security.AuthenticationFilterInitializer; import org.apache.hadoop.service.Service.STATE; import org.apache.hadoop.util.ExitUtil; @@ -33,6 +34,9 @@ import org.junit.After; import org.junit.Assert; import org.junit.Test; +import java.util.HashMap; +import java.util.Map; + public class TestApplicationHistoryServer { ApplicationHistoryServer historyServer = null; @@ -75,23 +79,32 @@ public class TestApplicationHistoryServer { @Test(timeout = 50000) public void testFilteOverrides() throws Exception { - String[] filterInitializers = - { - AuthenticationFilterInitializer.class.getName(), - TimelineAuthenticationFilterInitializer.class.getName(), - AuthenticationFilterInitializer.class.getName() + "," - + TimelineAuthenticationFilterInitializer.class.getName(), - AuthenticationFilterInitializer.class.getName() + ", " - + TimelineAuthenticationFilterInitializer.class.getName() }; - for (String filterInitializer : filterInitializers) { + HashMap driver = new HashMap(); + driver.put("", TimelineAuthenticationFilterInitializer.class.getName()); + driver.put(StaticUserWebFilter.class.getName(), + TimelineAuthenticationFilterInitializer.class.getName() + "," + + StaticUserWebFilter.class.getName()); + driver.put(AuthenticationFilterInitializer.class.getName(), + TimelineAuthenticationFilterInitializer.class.getName()); + driver.put(TimelineAuthenticationFilterInitializer.class.getName(), + TimelineAuthenticationFilterInitializer.class.getName()); + driver.put(AuthenticationFilterInitializer.class.getName() + "," + + TimelineAuthenticationFilterInitializer.class.getName(), + TimelineAuthenticationFilterInitializer.class.getName()); + driver.put(AuthenticationFilterInitializer.class.getName() + ", " + + TimelineAuthenticationFilterInitializer.class.getName(), + TimelineAuthenticationFilterInitializer.class.getName()); + + for (Map.Entry entry : driver.entrySet()) { + String filterInitializer = entry.getKey(); + String expectedValue = entry.getValue(); historyServer = new ApplicationHistoryServer(); Configuration config = new YarnConfiguration(); config.set("hadoop.http.filter.initializers", filterInitializer); historyServer.init(config); historyServer.start(); Configuration tmp = historyServer.getConfig(); - assertEquals(TimelineAuthenticationFilterInitializer.class.getName(), - tmp.get("hadoop.http.filter.initializers")); + assertEquals(expectedValue, tmp.get("hadoop.http.filter.initializers")); historyServer.stop(); AHSWebApp.resetInstance(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java index 64a0b37cc31..a092b59650b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java @@ -801,7 +801,7 @@ public class ResourceLocalizationService extends CompositeService try { Path local = completed.get(); if (null == assoc) { - LOG.error("Localized unkonwn resource to " + completed); + LOG.error("Localized unknown resource to " + completed); // TODO delete return; } @@ -810,7 +810,7 @@ public class ResourceLocalizationService extends CompositeService .getDU(new File(local.toUri())))); assoc.getResource().unlock(); } catch (ExecutionException e) { - LOG.info("Failed to download rsrc " + assoc.getResource(), + LOG.info("Failed to download resource " + assoc.getResource(), e.getCause()); LocalResourceRequest req = assoc.getResource().getRequest(); publicRsrc.handle(new ResourceFailedLocalizationEvent(req, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java index bd531865815..acda2a9970c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java @@ -159,7 +159,7 @@ public class TestNodeManagerResync { // This test tests new container requests are blocked when NM starts from // scratch until it register with RM AND while NM is resyncing with RM @SuppressWarnings("unchecked") - @Test + @Test(timeout=60000) public void testBlockNewContainerRequestsOnStartAndResync() throws IOException, InterruptedException, YarnException { NodeManager nm = new TestNodeManager2(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java index c47f49e207e..ff0a249bce9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java @@ -72,6 +72,7 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMapp import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeResourceUpdateEvent; import org.apache.hadoop.yarn.server.resourcemanager.security.authorize.RMPolicyProvider; import com.google.common.annotations.VisibleForTesting; @@ -513,9 +514,20 @@ public class AdminService extends CompositeService implements return UserGroupInformation.createRemoteUser(user).getGroupNames(); } + @SuppressWarnings("unchecked") @Override public UpdateNodeResourceResponse updateNodeResource( UpdateNodeResourceRequest request) throws YarnException, IOException { + String argName = "updateNodeResource"; + UserGroupInformation user = checkAcls(argName); + + if (!isRMActive()) { + RMAuditLogger.logFailure(user.getShortUserName(), argName, + adminAcl.toString(), "AdminService", + "ResourceManager is not active. Can not update node resource."); + throwStandbyException(); + } + Map nodeResourceMap = request.getNodeResourceMap(); Set nodeIds = nodeResourceMap.keySet(); // verify nodes are all valid first. @@ -536,21 +548,31 @@ public class AdminService extends CompositeService implements // Notice: it is still possible to have invalid NodeIDs as nodes decommission // may happen just at the same time. This time, only log and skip absent // nodes without throwing any exceptions. + boolean allSuccess = true; for (Map.Entry entry : nodeResourceMap.entrySet()) { ResourceOption newResourceOption = entry.getValue(); NodeId nodeId = entry.getKey(); RMNode node = this.rmContext.getRMNodes().get(nodeId); + if (node == null) { LOG.warn("Resource update get failed on an unrecognized node: " + nodeId); + allSuccess = false; } else { - node.setResourceOption(newResourceOption); - LOG.info("Update resource successfully on node(" + node.getNodeID() - +") with resource(" + newResourceOption.toString() + ")"); + // update resource to RMNode + this.rmContext.getDispatcher().getEventHandler() + .handle(new RMNodeResourceUpdateEvent(nodeId, newResourceOption)); + LOG.info("Update resource on node(" + node.getNodeID() + + ") with resource(" + newResourceOption.toString() + ")"); + } } - UpdateNodeResourceResponse response = recordFactory.newRecordInstance( - UpdateNodeResourceResponse.class); - return response; + if (allSuccess) { + RMAuditLogger.logSuccess(user.getShortUserName(), argName, + "AdminService"); + } + UpdateNodeResourceResponse response = + UpdateNodeResourceResponse.newInstance(); + return response; } private synchronized Configuration getConfiguration(Configuration conf, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java index b532dd56309..4798120c0da 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java @@ -36,7 +36,6 @@ import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Resource; -import org.apache.hadoop.yarn.api.records.ResourceOption; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; @@ -299,8 +298,7 @@ public class ResourceTrackerService extends AbstractService implements .getCurrentKey()); RMNode rmNode = new RMNodeImpl(nodeId, rmContext, host, cmPort, httpPort, - resolve(host), ResourceOption.newInstance(capability, RMNode.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT), - nodeManagerVersion); + resolve(host), capability, nodeManagerVersion); RMNode oldNode = this.rmContext.getRMNodes().putIfAbsent(nodeId, rmNode); if (oldNode == null) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java index d57669cce6e..0a3b269c97a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java @@ -46,9 +46,9 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.AMRMTokenSecretManagerStateProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.EpochProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ApplicationAttemptStateDataProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ApplicationStateDataProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.EpochProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.ApplicationAttemptStateDataProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.ApplicationStateDataProto; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; import org.apache.hadoop.yarn.server.records.Version; import org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl; @@ -300,7 +300,7 @@ public class FileSystemRMStateStore extends RMStateStore { assert appState != null; appState.attempts.put(attemptState.getAttemptId(), attemptState); } - LOG.info("Done Loading applications from FS state store"); + LOG.info("Done loading applications from FS state store"); } catch (Exception e) { LOG.error("Failed to load state.", e); throw e; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java index 1544dcc3458..1b1ec7629b1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java @@ -46,9 +46,9 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.AMRMTokenSecretManagerStateProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ApplicationAttemptStateDataProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ApplicationStateDataProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.EpochProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.ApplicationAttemptStateDataProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.ApplicationStateDataProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.EpochProto; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; import org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl; import org.apache.hadoop.yarn.server.records.Version; @@ -608,7 +608,7 @@ public class ZKRMStateStore extends RMStateStore { appState.attempts.put(attemptState.getAttemptId(), attemptState); } } - LOG.debug("Done Loading applications from ZK state store"); + LOG.debug("Done loading applications from ZK state store"); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/ApplicationAttemptStateData.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/ApplicationAttemptStateData.java index 90fb3ec0d2c..5cb9787fac0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/ApplicationAttemptStateData.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/ApplicationAttemptStateData.java @@ -28,7 +28,7 @@ import org.apache.hadoop.security.Credentials; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ApplicationAttemptStateDataProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.ApplicationAttemptStateDataProto; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.ApplicationAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.util.Records; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/ApplicationStateData.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/ApplicationStateData.java index 55b726ffd0d..eff0445155d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/ApplicationStateData.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/ApplicationStateData.java @@ -24,7 +24,7 @@ import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ApplicationStateDataProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.ApplicationStateDataProto; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.ApplicationState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.util.Records; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/Epoch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/Epoch.java index 066878918d0..80ec48ce634 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/Epoch.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/Epoch.java @@ -20,7 +20,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.recovery.records; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.EpochProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.EpochProto; import org.apache.hadoop.yarn.util.Records; /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationAttemptStateDataPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationAttemptStateDataPBImpl.java index a90bda49030..5c62d634c32 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationAttemptStateDataPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationAttemptStateDataPBImpl.java @@ -27,9 +27,9 @@ import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils; import org.apache.hadoop.yarn.proto.YarnProtos.FinalApplicationStatusProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ApplicationAttemptStateDataProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ApplicationAttemptStateDataProtoOrBuilder; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RMAppAttemptStateProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.ApplicationAttemptStateDataProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.ApplicationAttemptStateDataProtoOrBuilder; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.RMAppAttemptStateProto; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationAttemptStateData; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationStateDataPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationStateDataPBImpl.java index 8aaf1a4a7ca..d8cbd2384e3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationStateDataPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationStateDataPBImpl.java @@ -20,9 +20,9 @@ package org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ApplicationStateDataProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ApplicationStateDataProtoOrBuilder; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RMAppStateProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.ApplicationStateDataProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.ApplicationStateDataProtoOrBuilder; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.RMAppStateProto; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/EpochPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/EpochPBImpl.java index 4430672d079..a6ddeadb49c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/EpochPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/EpochPBImpl.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.EpochProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.EpochProtoOrBuilder; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.EpochProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.EpochProtoOrBuilder; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.Epoch; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java index 24793e86f17..a423ea50675 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java @@ -101,18 +101,6 @@ public interface RMNode { */ public Resource getTotalCapability(); - /** - * Set resource option with total available resource and overCommitTimoutMillis - * @param resourceOption - */ - public void setResourceOption(ResourceOption resourceOption); - - /** - * resource option with total available resource and overCommitTimoutMillis - * @return ResourceOption - */ - public ResourceOption getResourceOption(); - /** * The rack name for this node manager. * @return the rack name. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java index ef644be7000..c0096b9b90d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java @@ -24,6 +24,9 @@ public enum RMNodeEventType { // Source: AdminService DECOMMISSION, + + // Source: AdminService, ResourceTrackerService + RESOURCE_UPDATE, // ResourceTrackerService STATUS_UPDATE, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java index 9ead898db40..3ce641662cc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java @@ -58,6 +58,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRunningOnNodeEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeResourceUpdateSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; import org.apache.hadoop.yarn.server.utils.BuilderUtils.ContainerIdComparator; import org.apache.hadoop.yarn.state.InvalidStateTransitonException; @@ -96,7 +97,7 @@ public class RMNodeImpl implements RMNode, EventHandler { private int httpPort; private final String nodeAddress; // The containerManager address private String httpAddress; - private volatile ResourceOption resourceOption; + private volatile Resource totalCapability; private final Node node; private String healthReport; @@ -129,6 +130,9 @@ public class RMNodeImpl implements RMNode, EventHandler { //Transitions from NEW state .addTransition(NodeState.NEW, NodeState.RUNNING, RMNodeEventType.STARTED, new AddNodeTransition()) + .addTransition(NodeState.NEW, NodeState.NEW, + RMNodeEventType.RESOURCE_UPDATE, + new UpdateNodeResourceWhenUnusableTransition()) //Transitions from RUNNING state .addTransition(NodeState.RUNNING, @@ -149,6 +153,23 @@ public class RMNodeImpl implements RMNode, EventHandler { RMNodeEventType.CLEANUP_CONTAINER, new CleanUpContainerTransition()) .addTransition(NodeState.RUNNING, NodeState.RUNNING, RMNodeEventType.RECONNECTED, new ReconnectNodeTransition()) + .addTransition(NodeState.RUNNING, NodeState.RUNNING, + RMNodeEventType.RESOURCE_UPDATE, new UpdateNodeResourceWhenRunningTransition()) + + //Transitions from REBOOTED state + .addTransition(NodeState.REBOOTED, NodeState.REBOOTED, + RMNodeEventType.RESOURCE_UPDATE, + new UpdateNodeResourceWhenUnusableTransition()) + + //Transitions from DECOMMISSIONED state + .addTransition(NodeState.DECOMMISSIONED, NodeState.DECOMMISSIONED, + RMNodeEventType.RESOURCE_UPDATE, + new UpdateNodeResourceWhenUnusableTransition()) + + //Transitions from LOST state + .addTransition(NodeState.LOST, NodeState.LOST, + RMNodeEventType.RESOURCE_UPDATE, + new UpdateNodeResourceWhenUnusableTransition()) //Transitions from UNHEALTHY state .addTransition(NodeState.UNHEALTHY, @@ -169,6 +190,8 @@ public class RMNodeImpl implements RMNode, EventHandler { RMNodeEventType.CLEANUP_APP, new CleanUpAppTransition()) .addTransition(NodeState.UNHEALTHY, NodeState.UNHEALTHY, RMNodeEventType.CLEANUP_CONTAINER, new CleanUpContainerTransition()) + .addTransition(NodeState.UNHEALTHY, NodeState.UNHEALTHY, + RMNodeEventType.RESOURCE_UPDATE, new UpdateNodeResourceWhenUnusableTransition()) // create the topology tables .installTopology(); @@ -177,13 +200,13 @@ public class RMNodeImpl implements RMNode, EventHandler { RMNodeEvent> stateMachine; public RMNodeImpl(NodeId nodeId, RMContext context, String hostName, - int cmPort, int httpPort, Node node, ResourceOption resourceOption, String nodeManagerVersion) { + int cmPort, int httpPort, Node node, Resource capability, String nodeManagerVersion) { this.nodeId = nodeId; this.context = context; this.hostName = hostName; this.commandPort = cmPort; this.httpPort = httpPort; - this.resourceOption = resourceOption; + this.totalCapability = capability; this.nodeAddress = hostName + ":" + cmPort; this.httpAddress = hostName + ":" + httpPort; this.node = node; @@ -239,17 +262,7 @@ public class RMNodeImpl implements RMNode, EventHandler { @Override public Resource getTotalCapability() { - return this.resourceOption.getResource(); - } - - @Override - public void setResourceOption(ResourceOption resourceOption) { - this.resourceOption = resourceOption; - } - - @Override - public ResourceOption getResourceOption(){ - return this.resourceOption; + return this.totalCapability; } @Override @@ -473,6 +486,13 @@ public class RMNodeImpl implements RMNode, EventHandler { context.getDispatcher().getEventHandler() .handle(new RMAppRunningOnNodeEvent(appId, nodeId)); } + + private static void updateNodeResourceFromEvent(RMNodeImpl rmNode, + RMNodeResourceUpdateEvent event){ + ResourceOption resourceOption = event.getResourceOption(); + // Set resource on RMNode + rmNode.totalCapability = resourceOption.getResource(); + } public static class AddNodeTransition implements SingleArcTransition { @@ -526,8 +546,8 @@ public class RMNodeImpl implements RMNode, EventHandler { rmNode.nodeManagerVersion = newNode.getNodeManagerVersion(); rmNode.httpPort = newNode.getHttpPort(); rmNode.httpAddress = newNode.getHttpAddress(); - rmNode.resourceOption = newNode.getResourceOption(); - + rmNode.totalCapability = newNode.getTotalCapability(); + // Reset heartbeat ID since node just restarted. rmNode.getLastNodeHeartBeatResponse().setResponseId(0); @@ -540,9 +560,43 @@ public class RMNodeImpl implements RMNode, EventHandler { rmNode.context.getDispatcher().getEventHandler().handle( new NodesListManagerEvent( NodesListManagerEventType.NODE_USABLE, rmNode)); + if (rmNode.getState().equals(NodeState.RUNNING)) { + // Update scheduler node's capacity for reconnect node. + rmNode.context.getDispatcher().getEventHandler().handle( + new NodeResourceUpdateSchedulerEvent(rmNode, + ResourceOption.newInstance(rmNode.totalCapability, -1))); + } + } } + + public static class UpdateNodeResourceWhenRunningTransition + implements SingleArcTransition { + @Override + public void transition(RMNodeImpl rmNode, RMNodeEvent event) { + RMNodeResourceUpdateEvent updateEvent = (RMNodeResourceUpdateEvent)event; + updateNodeResourceFromEvent(rmNode, updateEvent); + // Notify new resourceOption to scheduler + rmNode.context.getDispatcher().getEventHandler().handle( + new NodeResourceUpdateSchedulerEvent(rmNode, updateEvent.getResourceOption())); + } + } + + public static class UpdateNodeResourceWhenUnusableTransition + implements SingleArcTransition { + + @Override + public void transition(RMNodeImpl rmNode, RMNodeEvent event) { + // The node is not usable, only log a warn message + LOG.warn("Try to update resource on a "+ rmNode.getState().toString() + + " node: "+rmNode.toString()); + updateNodeResourceFromEvent(rmNode, (RMNodeResourceUpdateEvent)event); + // No need to notify scheduler as schedulerNode is not function now + // and can sync later from RMnode. + } + } + public static class CleanUpAppTransition implements SingleArcTransition { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeResourceUpdateEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeResourceUpdateEvent.java new file mode 100644 index 00000000000..bf1f148b125 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeResourceUpdateEvent.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.rmnode; + +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.ResourceOption; + +public class RMNodeResourceUpdateEvent extends RMNodeEvent { + + private final ResourceOption resourceOption; + + public RMNodeResourceUpdateEvent(NodeId nodeId, ResourceOption resourceOption) { + super(nodeId, RMNodeEventType.RESOURCE_UPDATE); + this.resourceOption = resourceOption; + } + + public ResourceOption getResourceOption() { + return resourceOption; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java index ab56bb97212..ee5dcbe7ece 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java @@ -41,6 +41,7 @@ import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceOption; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; @@ -473,4 +474,32 @@ public abstract class AbstractYarnScheduler .handle(new RMAppEvent(app.getApplicationId(), RMAppEventType.KILL)); } } + + /** + * Process resource update on a node. + */ + public synchronized void updateNodeResource(RMNode nm, + ResourceOption resourceOption) { + + SchedulerNode node = getSchedulerNode(nm.getNodeID()); + Resource newResource = resourceOption.getResource(); + Resource oldResource = node.getTotalResource(); + if(!oldResource.equals(newResource)) { + // Log resource change + LOG.info("Update resource on node: " + node.getNodeName() + + " from: " + oldResource + ", to: " + + newResource); + + // update resource to node + node.setTotalResource(newResource); + + // update resource to clusterResource + Resources.subtractFrom(clusterResource, oldResource); + Resources.addTo(clusterResource, newResource); + } else { + // Log resource change + LOG.warn("Update resource on node: " + node.getNodeName() + + " with the same resource: " + newResource); + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java index 7074059ecf4..f4d8731a012 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java @@ -77,6 +77,16 @@ public abstract class SchedulerNode { return this.rmNode; } + /** + * Set total resources on the node. + * @param resource total resources on the node. + */ + public synchronized void setTotalResource(Resource resource){ + this.totalResourceCapability = resource; + this.availableResource = Resources.subtract(totalResourceCapability, + this.usedResource); + } + /** * Get the ID of the node which contains both its hostname and port. * @@ -158,7 +168,7 @@ public abstract class SchedulerNode { * * @return total resources on the node. */ - public Resource getTotalResource() { + public synchronized Resource getTotalResource() { return this.totalResourceCapability; } @@ -259,19 +269,6 @@ public abstract class SchedulerNode { this.reservedContainer = reservedContainer; } - /** - * Apply delta resource on node's available resource. - * - * @param deltaResource - * the delta of resource need to apply to node - */ - public synchronized void - applyDeltaOnAvailableResource(Resource deltaResource) { - // we can only adjust available resource if total resource is changed. - Resources.addTo(this.availableResource, deltaResource); - } - - public synchronized void recoverContainer(RMContainer rmContainer) { if (rmContainer.getState().equals(RMContainerState.COMPLETED)) { return; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java index d3df93fcc6e..ac37c2f0bc9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java @@ -19,7 +19,6 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler; import java.util.List; -import org.apache.commons.logging.Log; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; @@ -147,42 +146,6 @@ public class SchedulerUtils { ask.setCapability(normalized); } - /** - * Update resource in SchedulerNode if any resource change in RMNode. - * @param node SchedulerNode with old resource view - * @param rmNode RMNode with new resource view - * @param clusterResource the cluster's resource that need to update - * @param log Scheduler's log for resource change - * @return true if the resources have changed - */ - public static boolean updateResourceIfChanged(SchedulerNode node, - RMNode rmNode, Resource clusterResource, Log log) { - boolean result = false; - Resource oldAvailableResource = node.getAvailableResource(); - Resource newAvailableResource = Resources.subtract( - rmNode.getTotalCapability(), node.getUsedResource()); - - if (!newAvailableResource.equals(oldAvailableResource)) { - result = true; - Resource deltaResource = Resources.subtract(newAvailableResource, - oldAvailableResource); - // Reflect resource change to scheduler node. - node.applyDeltaOnAvailableResource(deltaResource); - // Reflect resource change to clusterResource. - Resources.addTo(clusterResource, deltaResource); - // TODO process resource over-commitment case (allocated containers - // > total capacity) in different option by getting value of - // overCommitTimeoutMillis. - - // Log resource change - log.info("Resource change on node: " + rmNode.getNodeAddress() - + " with delta: CPU: " + deltaResource.getMemory() + "core, Memory: " - + deltaResource.getMemory() +"MB"); - } - - return result; - } - /** * Utility method to normalize a list of resource requests, by insuring that * the memory for each request is a multiple of minMemory and is not zero. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index c8a73bfb530..a8ef94224b9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -50,6 +50,8 @@ import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceOption; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; @@ -82,6 +84,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppRemovedS import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerExpiredSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeResourceUpdateSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; @@ -866,12 +869,6 @@ public class CapacityScheduler extends FiCaSchedulerNode node = getNode(nm.getNodeID()); - // Update resource if any change - if (SchedulerUtils.updateResourceIfChanged(node, nm, clusterResource, - LOG)) { - root.updateClusterResource(clusterResource); - } - List containerInfoList = nm.pullContainerUpdates(); List newlyLaunchedContainers = new ArrayList(); List completedContainers = new ArrayList(); @@ -899,6 +896,15 @@ public class CapacityScheduler extends + " availableResource: " + node.getAvailableResource()); } } + + /** + * Process resource update on a node. + */ + private synchronized void updateNodeAndQueueResource(RMNode nm, + ResourceOption resourceOption) { + updateNodeResource(nm, resourceOption); + root.updateClusterResource(clusterResource); + } private synchronized void allocateContainersToNode(FiCaSchedulerNode node) { @@ -969,6 +975,14 @@ public class CapacityScheduler extends removeNode(nodeRemovedEvent.getRemovedRMNode()); } break; + case NODE_RESOURCE_UPDATE: + { + NodeResourceUpdateSchedulerEvent nodeResourceUpdatedEvent = + (NodeResourceUpdateSchedulerEvent)event; + updateNodeAndQueueResource(nodeResourceUpdatedEvent.getRMNode(), + nodeResourceUpdatedEvent.getResourceOption()); + } + break; case NODE_UPDATE: { NodeUpdateSchedulerEvent nodeUpdatedEvent = (NodeUpdateSchedulerEvent)event; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/NodeResourceUpdateSchedulerEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/NodeResourceUpdateSchedulerEvent.java new file mode 100644 index 00000000000..df32b283f6a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/NodeResourceUpdateSchedulerEvent.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.event; + +import org.apache.hadoop.yarn.api.records.ResourceOption; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; + +public class NodeResourceUpdateSchedulerEvent extends SchedulerEvent { + + private final RMNode rmNode; + private final ResourceOption resourceOption; + + public NodeResourceUpdateSchedulerEvent(RMNode rmNode, + ResourceOption resourceOption) { + super(SchedulerEventType.NODE_RESOURCE_UPDATE); + this.rmNode = rmNode; + this.resourceOption = resourceOption; + } + + public RMNode getRMNode() { + return rmNode; + } + + public ResourceOption getResourceOption() { + return resourceOption; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/SchedulerEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/SchedulerEventType.java index 243c72ba676..062f831c4ca 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/SchedulerEventType.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/SchedulerEventType.java @@ -24,6 +24,7 @@ public enum SchedulerEventType { NODE_ADDED, NODE_REMOVED, NODE_UPDATE, + NODE_RESOURCE_UPDATE, // Source: RMApp APP_ADDED, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java index d4ba88faf14..de5a999c2dd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java @@ -65,13 +65,16 @@ public class AllocationConfiguration { // preempt other jobs' tasks. private final Map minSharePreemptionTimeouts; - // Default min share preemption timeout for queues where it is not set - // explicitly. - private final long defaultMinSharePreemptionTimeout; + // Fair share preemption timeout for each queue in seconds. If a job in the + // queue waits this long without receiving its fair share threshold, it is + // allowed to preempt other jobs' tasks. + private final Map fairSharePreemptionTimeouts; - // Preemption timeout for jobs below fair share in seconds. If a job remains - // below half its fair share for this long, it is allowed to preempt tasks. - private final long fairSharePreemptionTimeout; + // The fair share preemption threshold for each queue. If a queue waits + // fairSharePreemptionTimeout without receiving + // fairshare * fairSharePreemptionThreshold resources, it is allowed to + // preempt other queues' tasks. + private final Map fairSharePreemptionThresholds; private final Map schedulingPolicies; @@ -94,8 +97,9 @@ public class AllocationConfiguration { Map schedulingPolicies, SchedulingPolicy defaultSchedulingPolicy, Map minSharePreemptionTimeouts, + Map fairSharePreemptionTimeouts, + Map fairSharePreemptionThresholds, Map> queueAcls, - long fairSharePreemptionTimeout, long defaultMinSharePreemptionTimeout, QueuePlacementPolicy placementPolicy, Map> configuredQueues) { this.minQueueResources = minQueueResources; @@ -110,9 +114,9 @@ public class AllocationConfiguration { this.defaultSchedulingPolicy = defaultSchedulingPolicy; this.schedulingPolicies = schedulingPolicies; this.minSharePreemptionTimeouts = minSharePreemptionTimeouts; + this.fairSharePreemptionTimeouts = fairSharePreemptionTimeouts; + this.fairSharePreemptionThresholds = fairSharePreemptionThresholds; this.queueAcls = queueAcls; - this.fairSharePreemptionTimeout = fairSharePreemptionTimeout; - this.defaultMinSharePreemptionTimeout = defaultMinSharePreemptionTimeout; this.placementPolicy = placementPolicy; this.configuredQueues = configuredQueues; } @@ -129,8 +133,8 @@ public class AllocationConfiguration { queueMaxAMShareDefault = -1.0f; queueAcls = new HashMap>(); minSharePreemptionTimeouts = new HashMap(); - defaultMinSharePreemptionTimeout = Long.MAX_VALUE; - fairSharePreemptionTimeout = Long.MAX_VALUE; + fairSharePreemptionTimeouts = new HashMap(); + fairSharePreemptionThresholds = new HashMap(); schedulingPolicies = new HashMap(); defaultSchedulingPolicy = SchedulingPolicy.DEFAULT_POLICY; configuredQueues = new HashMap>(); @@ -159,25 +163,35 @@ public class AllocationConfiguration { } /** - * Get a queue's min share preemption timeout, in milliseconds. This is the - * time after which jobs in the queue may kill other queues' tasks if they - * are below their min share. + * Get a queue's min share preemption timeout configured in the allocation + * file, in milliseconds. Return -1 if not set. */ public long getMinSharePreemptionTimeout(String queueName) { Long minSharePreemptionTimeout = minSharePreemptionTimeouts.get(queueName); - return (minSharePreemptionTimeout == null) ? defaultMinSharePreemptionTimeout - : minSharePreemptionTimeout; + return (minSharePreemptionTimeout == null) ? -1 : minSharePreemptionTimeout; } - + /** - * Get the fair share preemption, in milliseconds. This is the time - * after which any job may kill other jobs' tasks if it is below half - * its fair share. + * Get a queue's fair share preemption timeout configured in the allocation + * file, in milliseconds. Return -1 if not set. */ - public long getFairSharePreemptionTimeout() { - return fairSharePreemptionTimeout; + public long getFairSharePreemptionTimeout(String queueName) { + Long fairSharePreemptionTimeout = fairSharePreemptionTimeouts.get(queueName); + return (fairSharePreemptionTimeout == null) ? + -1 : fairSharePreemptionTimeout; } - + + /** + * Get a queue's fair share preemption threshold in the allocation file. + * Return -1f if not set. + */ + public float getFairSharePreemptionThreshold(String queueName) { + Float fairSharePreemptionThreshold = + fairSharePreemptionThresholds.get(queueName); + return (fairSharePreemptionThreshold == null) ? + -1f : fairSharePreemptionThreshold; + } + public ResourceWeights getQueueWeight(String queue) { ResourceWeights weight = queueWeights.get(queue); return (weight == null) ? ResourceWeights.NEUTRAL : weight; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java index 4cc88c140d4..c2dfc84a536 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java @@ -217,27 +217,31 @@ public class AllocationFileLoaderService extends AbstractService { Map queueWeights = new HashMap(); Map queuePolicies = new HashMap(); Map minSharePreemptionTimeouts = new HashMap(); + Map fairSharePreemptionTimeouts = new HashMap(); + Map fairSharePreemptionThresholds = + new HashMap(); Map> queueAcls = new HashMap>(); int userMaxAppsDefault = Integer.MAX_VALUE; int queueMaxAppsDefault = Integer.MAX_VALUE; float queueMaxAMShareDefault = -1.0f; - long fairSharePreemptionTimeout = Long.MAX_VALUE; + long defaultFairSharePreemptionTimeout = Long.MAX_VALUE; long defaultMinSharePreemptionTimeout = Long.MAX_VALUE; + float defaultFairSharePreemptionThreshold = 0.5f; SchedulingPolicy defaultSchedPolicy = SchedulingPolicy.DEFAULT_POLICY; - + QueuePlacementPolicy newPlacementPolicy = null; // Remember all queue names so we can display them on web UI, etc. // configuredQueues is segregated based on whether it is a leaf queue // or a parent queue. This information is used for creating queues // and also for making queue placement decisions(QueuePlacementRule.java). - Map> configuredQueues = + Map> configuredQueues = new HashMap>(); for (FSQueueType queueType : FSQueueType.values()) { configuredQueues.put(queueType, new HashSet()); } - + // Read and parse the allocations file. DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory.newInstance(); @@ -276,14 +280,28 @@ public class AllocationFileLoaderService extends AbstractService { String text = ((Text)element.getFirstChild()).getData().trim(); int val = Integer.parseInt(text); userMaxAppsDefault = val; - } else if ("fairSharePreemptionTimeout".equals(element.getTagName())) { + } else if ("defaultFairSharePreemptionTimeout" + .equals(element.getTagName())) { String text = ((Text)element.getFirstChild()).getData().trim(); long val = Long.parseLong(text) * 1000L; - fairSharePreemptionTimeout = val; - } else if ("defaultMinSharePreemptionTimeout".equals(element.getTagName())) { + defaultFairSharePreemptionTimeout = val; + } else if ("fairSharePreemptionTimeout".equals(element.getTagName())) { + if (defaultFairSharePreemptionTimeout == Long.MAX_VALUE) { + String text = ((Text)element.getFirstChild()).getData().trim(); + long val = Long.parseLong(text) * 1000L; + defaultFairSharePreemptionTimeout = val; + } + } else if ("defaultMinSharePreemptionTimeout" + .equals(element.getTagName())) { String text = ((Text)element.getFirstChild()).getData().trim(); long val = Long.parseLong(text) * 1000L; defaultMinSharePreemptionTimeout = val; + } else if ("defaultFairSharePreemptionThreshold" + .equals(element.getTagName())) { + String text = ((Text)element.getFirstChild()).getData().trim(); + float val = Float.parseFloat(text); + val = Math.max(Math.min(val, 1.0f), 0.0f); + defaultFairSharePreemptionThreshold = val; } else if ("queueMaxAppsDefault".equals(element.getTagName())) { String text = ((Text)element.getFirstChild()).getData().trim(); int val = Integer.parseInt(text); @@ -304,7 +322,7 @@ public class AllocationFileLoaderService extends AbstractService { } } } - + // Load queue elements. A root queue can either be included or omitted. If // it's included, all other queues must be inside it. for (Element element : queueElements) { @@ -318,10 +336,10 @@ public class AllocationFileLoaderService extends AbstractService { } loadQueue(parent, element, minQueueResources, maxQueueResources, queueMaxApps, userMaxApps, queueMaxAMShares, queueWeights, - queuePolicies, minSharePreemptionTimeouts, queueAcls, - configuredQueues); + queuePolicies, minSharePreemptionTimeouts, fairSharePreemptionTimeouts, + fairSharePreemptionThresholds, queueAcls, configuredQueues); } - + // Load placement policy and pass it configured queues Configuration conf = getConfig(); if (placementPolicyElement != null) { @@ -331,11 +349,29 @@ public class AllocationFileLoaderService extends AbstractService { newPlacementPolicy = QueuePlacementPolicy.fromConfiguration(conf, configuredQueues); } - - AllocationConfiguration info = new AllocationConfiguration(minQueueResources, maxQueueResources, - queueMaxApps, userMaxApps, queueWeights, queueMaxAMShares, userMaxAppsDefault, - queueMaxAppsDefault, queueMaxAMShareDefault, queuePolicies, defaultSchedPolicy, minSharePreemptionTimeouts, - queueAcls, fairSharePreemptionTimeout, defaultMinSharePreemptionTimeout, + + // Set the min/fair share preemption timeout for the root queue + if (!minSharePreemptionTimeouts.containsKey(QueueManager.ROOT_QUEUE)){ + minSharePreemptionTimeouts.put(QueueManager.ROOT_QUEUE, + defaultMinSharePreemptionTimeout); + } + if (!fairSharePreemptionTimeouts.containsKey(QueueManager.ROOT_QUEUE)) { + fairSharePreemptionTimeouts.put(QueueManager.ROOT_QUEUE, + defaultFairSharePreemptionTimeout); + } + + // Set the fair share preemption threshold for the root queue + if (!fairSharePreemptionThresholds.containsKey(QueueManager.ROOT_QUEUE)) { + fairSharePreemptionThresholds.put(QueueManager.ROOT_QUEUE, + defaultFairSharePreemptionThreshold); + } + + AllocationConfiguration info = new AllocationConfiguration(minQueueResources, + maxQueueResources, queueMaxApps, userMaxApps, queueWeights, + queueMaxAMShares, userMaxAppsDefault, queueMaxAppsDefault, + queueMaxAMShareDefault, queuePolicies, defaultSchedPolicy, + minSharePreemptionTimeouts, fairSharePreemptionTimeouts, + fairSharePreemptionThresholds, queueAcls, newPlacementPolicy, configuredQueues); lastSuccessfulReload = clock.getTime(); @@ -347,12 +383,15 @@ public class AllocationFileLoaderService extends AbstractService { /** * Loads a queue from a queue element in the configuration file */ - private void loadQueue(String parentName, Element element, Map minQueueResources, + private void loadQueue(String parentName, Element element, + Map minQueueResources, Map maxQueueResources, Map queueMaxApps, Map userMaxApps, Map queueMaxAMShares, Map queueWeights, Map queuePolicies, Map minSharePreemptionTimeouts, + Map fairSharePreemptionTimeouts, + Map fairSharePreemptionThresholds, Map> queueAcls, Map> configuredQueues) throws AllocationConfigurationException { @@ -395,6 +434,15 @@ public class AllocationFileLoaderService extends AbstractService { String text = ((Text)field.getFirstChild()).getData().trim(); long val = Long.parseLong(text) * 1000L; minSharePreemptionTimeouts.put(queueName, val); + } else if ("fairSharePreemptionTimeout".equals(field.getTagName())) { + String text = ((Text)field.getFirstChild()).getData().trim(); + long val = Long.parseLong(text) * 1000L; + fairSharePreemptionTimeouts.put(queueName, val); + } else if ("fairSharePreemptionThreshold".equals(field.getTagName())) { + String text = ((Text)field.getFirstChild()).getData().trim(); + float val = Float.parseFloat(text); + val = Math.max(Math.min(val, 1.0f), 0.0f); + fairSharePreemptionThresholds.put(queueName, val); } else if ("schedulingPolicy".equals(field.getTagName()) || "schedulingMode".equals(field.getTagName())) { String text = ((Text)field.getFirstChild()).getData().trim(); @@ -410,8 +458,9 @@ public class AllocationFileLoaderService extends AbstractService { "pool".equals(field.getTagName())) { loadQueue(queueName, field, minQueueResources, maxQueueResources, queueMaxApps, userMaxApps, queueMaxAMShares, queueWeights, - queuePolicies, minSharePreemptionTimeouts, queueAcls, - configuredQueues); + queuePolicies, minSharePreemptionTimeouts, + fairSharePreemptionTimeouts, fairSharePreemptionThresholds, + queueAcls, configuredQueues); configuredQueues.get(FSQueueType.PARENT).add(queueName); isLeaf = false; } @@ -426,11 +475,15 @@ public class AllocationFileLoaderService extends AbstractService { } } queueAcls.put(queueName, acls); - if (maxQueueResources.containsKey(queueName) && minQueueResources.containsKey(queueName) + if (maxQueueResources.containsKey(queueName) && + minQueueResources.containsKey(queueName) && !Resources.fitsIn(minQueueResources.get(queueName), maxQueueResources.get(queueName))) { - LOG.warn(String.format("Queue %s has max resources %s less than min resources %s", - queueName, maxQueueResources.get(queueName), minQueueResources.get(queueName))); + LOG.warn( + String.format( + "Queue %s has max resources %s less than min resources %s", + queueName, maxQueueResources.get(queueName), + minQueueResources.get(queueName))); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java index 49e8ef06122..345ea8b7c36 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java @@ -24,6 +24,7 @@ import java.util.Collections; import java.util.Comparator; import java.util.List; +import com.google.common.annotations.VisibleForTesting; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; @@ -54,7 +55,7 @@ public class FSLeafQueue extends FSQueue { // Variables used for preemption private long lastTimeAtMinShare; - private long lastTimeAtHalfFairShare; + private long lastTimeAtFairShareThreshold; // Track the AM resource usage for this queue private Resource amResourceUsage; @@ -65,7 +66,7 @@ public class FSLeafQueue extends FSQueue { FSParentQueue parent) { super(name, scheduler, parent); this.lastTimeAtMinShare = scheduler.getClock().getTime(); - this.lastTimeAtHalfFairShare = scheduler.getClock().getTime(); + this.lastTimeAtFairShareThreshold = scheduler.getClock().getTime(); activeUsersManager = new ActiveUsersManager(getMetrics()); amResourceUsage = Resource.newInstance(0, 0); } @@ -275,16 +276,17 @@ public class FSLeafQueue extends FSQueue { return lastTimeAtMinShare; } - public void setLastTimeAtMinShare(long lastTimeAtMinShare) { + private void setLastTimeAtMinShare(long lastTimeAtMinShare) { this.lastTimeAtMinShare = lastTimeAtMinShare; } - public long getLastTimeAtHalfFairShare() { - return lastTimeAtHalfFairShare; + public long getLastTimeAtFairShareThreshold() { + return lastTimeAtFairShareThreshold; } - public void setLastTimeAtHalfFairShare(long lastTimeAtHalfFairShare) { - this.lastTimeAtHalfFairShare = lastTimeAtHalfFairShare; + private void setLastTimeAtFairShareThreshold( + long lastTimeAtFairShareThreshold) { + this.lastTimeAtFairShareThreshold = lastTimeAtFairShareThreshold; } @Override @@ -328,6 +330,20 @@ public class FSLeafQueue extends FSQueue { // TODO Auto-generated method stub } + /** + * Update the preemption fields for the queue, i.e. the times since last was + * at its guaranteed share and over its fair share threshold. + */ + public void updateStarvationStats() { + long now = scheduler.getClock().getTime(); + if (!isStarvedForMinShare()) { + setLastTimeAtMinShare(now); + } + if (!isStarvedForFairShare()) { + setLastTimeAtFairShareThreshold(now); + } + } + /** * Helper method to check if the queue should preempt containers * @@ -337,4 +353,28 @@ public class FSLeafQueue extends FSQueue { return parent.getPolicy().checkIfUsageOverFairShare(getResourceUsage(), getFairShare()); } + + /** + * Is a queue being starved for its min share. + */ + @VisibleForTesting + boolean isStarvedForMinShare() { + return isStarved(getMinShare()); + } + + /** + * Is a queue being starved for its fair share threshold. + */ + @VisibleForTesting + boolean isStarvedForFairShare() { + return isStarved( + Resources.multiply(getFairShare(), getFairSharePreemptionThreshold())); + } + + private boolean isStarved(Resource share) { + Resource desiredShare = Resources.min(FairScheduler.getResourceCalculator(), + scheduler.getClusterResource(), share, getDemand()); + return Resources.lessThan(FairScheduler.getResourceCalculator(), + scheduler.getClusterResource(), getResourceUsage(), desiredShare); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java index 26a706c7f03..f74106a7da9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java @@ -77,6 +77,15 @@ public class FSParentQueue extends FSQueue { } } + @Override + public void updatePreemptionVariables() { + super.updatePreemptionVariables(); + // For child queues + for (FSQueue childQueue : childQueues) { + childQueue.updatePreemptionVariables(); + } + } + @Override public Resource getDemand() { return demand; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java index 00f0795e1da..d4e043d8850 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java @@ -52,6 +52,10 @@ public abstract class FSQueue implements Queue, Schedulable { protected SchedulingPolicy policy = SchedulingPolicy.DEFAULT_POLICY; + private long fairSharePreemptionTimeout = Long.MAX_VALUE; + private long minSharePreemptionTimeout = Long.MAX_VALUE; + private float fairSharePreemptionThreshold = 0.5f; + public FSQueue(String name, FairScheduler scheduler, FSParentQueue parent) { this.name = name; this.scheduler = scheduler; @@ -166,13 +170,61 @@ public abstract class FSQueue implements Queue, Schedulable { public boolean hasAccess(QueueACL acl, UserGroupInformation user) { return scheduler.getAllocationConfiguration().hasAccess(name, acl, user); } - + + public long getFairSharePreemptionTimeout() { + return fairSharePreemptionTimeout; + } + + public void setFairSharePreemptionTimeout(long fairSharePreemptionTimeout) { + this.fairSharePreemptionTimeout = fairSharePreemptionTimeout; + } + + public long getMinSharePreemptionTimeout() { + return minSharePreemptionTimeout; + } + + public void setMinSharePreemptionTimeout(long minSharePreemptionTimeout) { + this.minSharePreemptionTimeout = minSharePreemptionTimeout; + } + + public float getFairSharePreemptionThreshold() { + return fairSharePreemptionThreshold; + } + + public void setFairSharePreemptionThreshold(float fairSharePreemptionThreshold) { + this.fairSharePreemptionThreshold = fairSharePreemptionThreshold; + } + /** * Recomputes the shares for all child queues and applications based on this * queue's current share */ public abstract void recomputeShares(); + /** + * Update the min/fair share preemption timeouts and threshold for this queue. + */ + public void updatePreemptionVariables() { + // For min share timeout + minSharePreemptionTimeout = scheduler.getAllocationConfiguration() + .getMinSharePreemptionTimeout(getName()); + if (minSharePreemptionTimeout == -1 && parent != null) { + minSharePreemptionTimeout = parent.getMinSharePreemptionTimeout(); + } + // For fair share timeout + fairSharePreemptionTimeout = scheduler.getAllocationConfiguration() + .getFairSharePreemptionTimeout(getName()); + if (fairSharePreemptionTimeout == -1 && parent != null) { + fairSharePreemptionTimeout = parent.getFairSharePreemptionTimeout(); + } + // For fair share preemption threshold + fairSharePreemptionThreshold = scheduler.getAllocationConfiguration() + .getFairSharePreemptionThreshold(getName()); + if (fairSharePreemptionThreshold < 0 && parent != null) { + fairSharePreemptionThreshold = parent.getFairSharePreemptionThreshold(); + } + } + /** * Gets the children of this queue, if any. */ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index 40c72a621e7..a35e49f282e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -45,6 +45,7 @@ import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceOption; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; @@ -79,6 +80,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppRemovedS import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerExpiredSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeResourceUpdateSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; @@ -297,7 +299,7 @@ public class FairScheduler extends */ protected synchronized void update() { long start = getClock().getTime(); - updatePreemptionVariables(); // Determine if any queues merit preemption + updateStarvationStats(); // Determine if any queues merit preemption FSQueue rootQueue = queueMgr.getRootQueue(); @@ -327,48 +329,20 @@ public class FairScheduler extends /** * Update the preemption fields for all QueueScheduables, i.e. the times since - * each queue last was at its guaranteed share and at > 1/2 of its fair share - * for each type of task. + * each queue last was at its guaranteed share and over its fair share + * threshold for each type of task. */ - private void updatePreemptionVariables() { - long now = getClock().getTime(); - lastPreemptionUpdateTime = now; + private void updateStarvationStats() { + lastPreemptionUpdateTime = clock.getTime(); for (FSLeafQueue sched : queueMgr.getLeafQueues()) { - if (!isStarvedForMinShare(sched)) { - sched.setLastTimeAtMinShare(now); - } - if (!isStarvedForFairShare(sched)) { - sched.setLastTimeAtHalfFairShare(now); - } + sched.updateStarvationStats(); } } - /** - * Is a queue below its min share for the given task type? - */ - boolean isStarvedForMinShare(FSLeafQueue sched) { - Resource desiredShare = Resources.min(RESOURCE_CALCULATOR, clusterResource, - sched.getMinShare(), sched.getDemand()); - return Resources.lessThan(RESOURCE_CALCULATOR, clusterResource, - sched.getResourceUsage(), desiredShare); - } - - /** - * Is a queue being starved for fair share for the given task type? This is - * defined as being below half its fair share. - */ - boolean isStarvedForFairShare(FSLeafQueue sched) { - Resource desiredFairShare = Resources.min(RESOURCE_CALCULATOR, - clusterResource, - Resources.multiply(sched.getFairShare(), .5), sched.getDemand()); - return Resources.lessThan(RESOURCE_CALCULATOR, clusterResource, - sched.getResourceUsage(), desiredFairShare); - } - /** * Check for queues that need tasks preempted, either because they have been * below their guaranteed share for minSharePreemptionTimeout or they have - * been below half their fair share for the fairSharePreemptionTimeout. If + * been below their fair share threshold for the fairSharePreemptionTimeout. If * such queues exist, compute how many tasks of each type need to be preempted * and then select the right ones using preemptTasks. */ @@ -497,16 +471,15 @@ public class FairScheduler extends * Return the resource amount that this queue is allowed to preempt, if any. * If the queue has been below its min share for at least its preemption * timeout, it should preempt the difference between its current share and - * this min share. If it has been below half its fair share for at least the - * fairSharePreemptionTimeout, it should preempt enough tasks to get up to its - * full fair share. If both conditions hold, we preempt the max of the two - * amounts (this shouldn't happen unless someone sets the timeouts to be - * identical for some reason). + * this min share. If it has been below its fair share preemption threshold + * for at least the fairSharePreemptionTimeout, it should preempt enough tasks + * to get up to its full fair share. If both conditions hold, we preempt the + * max of the two amounts (this shouldn't happen unless someone sets the + * timeouts to be identical for some reason). */ protected Resource resToPreempt(FSLeafQueue sched, long curTime) { - String queue = sched.getName(); - long minShareTimeout = allocConf.getMinSharePreemptionTimeout(queue); - long fairShareTimeout = allocConf.getFairSharePreemptionTimeout(); + long minShareTimeout = sched.getMinSharePreemptionTimeout(); + long fairShareTimeout = sched.getFairSharePreemptionTimeout(); Resource resDueToMinShare = Resources.none(); Resource resDueToFairShare = Resources.none(); if (curTime - sched.getLastTimeAtMinShare() > minShareTimeout) { @@ -515,7 +488,7 @@ public class FairScheduler extends resDueToMinShare = Resources.max(RESOURCE_CALCULATOR, clusterResource, Resources.none(), Resources.subtract(target, sched.getResourceUsage())); } - if (curTime - sched.getLastTimeAtHalfFairShare() > fairShareTimeout) { + if (curTime - sched.getLastTimeAtFairShareThreshold() > fairShareTimeout) { Resource target = Resources.min(RESOURCE_CALCULATOR, clusterResource, sched.getFairShare(), sched.getDemand()); resDueToFairShare = Resources.max(RESOURCE_CALCULATOR, clusterResource, @@ -956,7 +929,7 @@ public class FairScheduler extends allocation.getNMTokenList()); } } - + /** * Process a heartbeat update from a node. */ @@ -967,9 +940,6 @@ public class FairScheduler extends } eventLog.log("HEARTBEAT", nm.getHostName()); FSSchedulerNode node = getFSSchedulerNode(nm.getNodeID()); - - // Update resource if any change - SchedulerUtils.updateResourceIfChanged(node, nm, clusterResource, LOG); List containerInfoList = nm.pullContainerUpdates(); List newlyLaunchedContainers = new ArrayList(); @@ -1096,7 +1066,11 @@ public class FairScheduler extends public FSAppAttempt getSchedulerApp(ApplicationAttemptId appAttemptId) { return super.getApplicationAttempt(appAttemptId); } - + + public static ResourceCalculator getResourceCalculator() { + return RESOURCE_CALCULATOR; + } + /** * Subqueue metrics might be a little out of date because fair shares are * recalculated at the update interval, but the root queue metrics needs to @@ -1173,6 +1147,15 @@ public class FairScheduler extends removeApplication(appRemovedEvent.getApplicationID(), appRemovedEvent.getFinalState()); break; + case NODE_RESOURCE_UPDATE: + if (!(event instanceof NodeResourceUpdateSchedulerEvent)) { + throw new RuntimeException("Unexpected event type: " + event); + } + NodeResourceUpdateSchedulerEvent nodeResourceUpdatedEvent = + (NodeResourceUpdateSchedulerEvent)event; + updateNodeResource(nodeResourceUpdatedEvent.getRMNode(), + nodeResourceUpdatedEvent.getResourceOption()); + break; case APP_ATTEMPT_ADDED: if (!(event instanceof AppAttemptAddedSchedulerEvent)) { throw new RuntimeException("Unexpected event type: " + event); @@ -1534,4 +1517,16 @@ public class FairScheduler extends } return queue1; // names are identical } + + /** + * Process resource update on a node and update Queue. + */ + @Override + public synchronized void updateNodeResource(RMNode nm, + ResourceOption resourceOption) { + super.updateNodeResource(nm, resourceOption); + updateRootQueueMetrics(); + queueMgr.getRootQueue().setSteadyFairShare(clusterResource); + queueMgr.getRootQueue().recomputeSteadyShares(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java index 490ba686598..61b3b6c325f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java @@ -181,6 +181,7 @@ public class QueueManager { parent.addChildQueue(leafQueue); queues.put(leafQueue.getName(), leafQueue); leafQueues.add(leafQueue); + leafQueue.updatePreemptionVariables(); return leafQueue; } else { FSParentQueue newParent = new FSParentQueue(queueName, scheduler, parent); @@ -192,6 +193,7 @@ public class QueueManager { } parent.addChildQueue(newParent); queues.put(newParent.getName(), newParent); + newParent.updatePreemptionVariables(); parent = newParent; } } @@ -384,5 +386,8 @@ public class QueueManager { // Update steady fair shares for all queues rootQueue.recomputeSteadyShares(); + // Update the fair share preemption timeouts and preemption for all queues + // recursively + rootQueue.updatePreemptionVariables(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java index dd2ea433ebb..d72e7966064 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java @@ -85,6 +85,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppRemovedS import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerExpiredSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeResourceUpdateSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; import org.apache.hadoop.yarn.server.utils.BuilderUtils; @@ -681,9 +682,6 @@ public class FifoScheduler extends private synchronized void nodeUpdate(RMNode rmNode) { FiCaSchedulerNode node = getNode(rmNode.getNodeID()); - // Update resource if any change - SchedulerUtils.updateResourceIfChanged(node, rmNode, clusterResource, LOG); - List containerInfoList = rmNode.pullContainerUpdates(); List newlyLaunchedContainers = new ArrayList(); List completedContainers = new ArrayList(); @@ -750,6 +748,14 @@ public class FifoScheduler extends removeNode(nodeRemovedEvent.getRemovedRMNode()); } break; + case NODE_RESOURCE_UPDATE: + { + NodeResourceUpdateSchedulerEvent nodeResourceUpdatedEvent = + (NodeResourceUpdateSchedulerEvent)event; + updateNodeResource(nodeResourceUpdatedEvent.getRMNode(), + nodeResourceUpdatedEvent.getResourceOption()); + } + break; case NODE_UPDATE: { NodeUpdateSchedulerEvent nodeUpdatedEvent = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java index bdcfd0460ef..e0c32247dfa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java @@ -289,7 +289,7 @@ public class DelegationTokenRenewer extends AbstractService { tokenWithConf = queue.take(); final TokenWithConf current = tokenWithConf; if (LOG.isDebugEnabled()) { - LOG.debug("Canceling token " + tokenWithConf.token.getService()); + LOG.debug("Cancelling token " + tokenWithConf.token.getService()); } // need to use doAs so that http can find the kerberos tgt UserGroupInformation.getLoginUser() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java index b1aff9078ca..2a1442ea09d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java @@ -110,6 +110,10 @@ public class FairSchedulerAppsBlock extends HtmlBlock { String percent = String.format("%.1f", appInfo.getProgress()); ApplicationAttemptId attemptId = app.getCurrentAppAttempt().getAppAttemptId(); int fairShare = fsinfo.getAppFairShare(attemptId); + if (fairShare == FairSchedulerInfo.INVALID_FAIR_SHARE) { + // FairScheduler#applications don't have the entry. Skip it. + continue; + } //AppID numerical value parsed by parseHadoopID in yarn.dt.plugins.js appsTableData.append("[\"") diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java index aca3e448485..bcf7781fc47 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java @@ -44,10 +44,12 @@ public class FairSchedulerPage extends RmView { static final float Q_MAX_WIDTH = 0.8f; static final float Q_STATS_POS = Q_MAX_WIDTH + 0.05f; static final String Q_END = "left:101%"; - static final String Q_GIVEN = "left:0%;background:none;border:1px dashed rgba(0,0,0,0.25)"; + static final String Q_GIVEN = "left:0%;background:none;border:1px solid rgba(0,0,0,1)"; + static final String Q_INSTANTANEOUS_FS = "left:0%;background:none;border:1px dashed rgba(0,0,0,1)"; static final String Q_OVER = "background:rgba(255, 140, 0, 0.8)"; static final String Q_UNDER = "background:rgba(50, 205, 50, 0.8)"; - + static final String STEADY_FAIR_SHARE = "Steady Fair Share"; + static final String INSTANTANEOUS_FAIR_SHARE = "Instantaneous Fair Share"; @RequestScoped static class FSQInfo { FairSchedulerQueueInfo qinfo; @@ -73,8 +75,8 @@ public class FairSchedulerPage extends RmView { if (maxApps < Integer.MAX_VALUE) { ri._("Max Running Applications:", qinfo.getMaxApplications()); } - ri._("Fair Share:", qinfo.getFairShare().toString()); - + ri._(STEADY_FAIR_SHARE + ":", qinfo.getSteadyFairShare().toString()); + ri._(INSTANTANEOUS_FAIR_SHARE + ":", qinfo.getFairShare().toString()); html._(InfoBlock.class); // clear the info contents so this queue's info doesn't accumulate into another queue's info @@ -95,16 +97,21 @@ public class FairSchedulerPage extends RmView { UL ul = html.ul("#pq"); for (FairSchedulerQueueInfo info : subQueues) { float capacity = info.getMaxResourcesFraction(); - float fairShare = info.getFairShareMemoryFraction(); + float steadyFairShare = info.getSteadyFairShareMemoryFraction(); + float instantaneousFairShare = info.getFairShareMemoryFraction(); float used = info.getUsedMemoryFraction(); LI> li = ul. li(). a(_Q).$style(width(capacity * Q_MAX_WIDTH)). - $title(join("Fair Share:", percent(fairShare))). - span().$style(join(Q_GIVEN, ";font-size:1px;", width(fairShare/capacity))). + $title(join(join(STEADY_FAIR_SHARE + ":", percent(steadyFairShare)), + join(" " + INSTANTANEOUS_FAIR_SHARE + ":", percent(instantaneousFairShare)))). + span().$style(join(Q_GIVEN, ";font-size:1px;", width(steadyFairShare / capacity))). + _('.')._(). + span().$style(join(Q_INSTANTANEOUS_FS, ";font-size:1px;", + width(instantaneousFairShare/capacity))). _('.')._(). span().$style(join(width(used/capacity), - ";font-size:1px;left:0%;", used > fairShare ? Q_OVER : Q_UNDER)). + ";font-size:1px;left:0%;", used > instantaneousFairShare ? Q_OVER : Q_UNDER)). _('.')._(). span(".q", info.getQueueName())._(). span().$class("qstats").$style(left(Q_STATS_POS)). @@ -156,7 +163,13 @@ public class FairSchedulerPage extends RmView { li().$style("margin-bottom: 1em"). span().$style("font-weight: bold")._("Legend:")._(). span().$class("qlegend ui-corner-all").$style(Q_GIVEN). - _("Fair Share")._(). + $title("The steady fair shares consider all queues, " + + "both active (with running applications) and inactive."). + _(STEADY_FAIR_SHARE)._(). + span().$class("qlegend ui-corner-all").$style(Q_INSTANTANEOUS_FS). + $title("The instantaneous fair shares consider only active " + + "queues (with running applications)."). + _(INSTANTANEOUS_FAIR_SHARE)._(). span().$class("qlegend ui-corner-all").$style(Q_UNDER). _("Used")._(). span().$class("qlegend ui-corner-all").$style(Q_OVER). diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java index a8ec19260ed..24a90bd69aa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java @@ -1061,7 +1061,7 @@ public class RMWebServices { token.decodeFromUrlString(entry.getValue()); ret.addToken(alias, token); } - for (Map.Entry entry : credentials.getTokens().entrySet()) { + for (Map.Entry entry : credentials.getSecrets().entrySet()) { Text alias = new Text(entry.getKey()); Base64 decoder = new Base64(0, null, true); byte[] secret = decoder.decode(entry.getValue()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerInfo.java index 23f8c01c38a..f97ff8ae64b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerInfo.java @@ -25,12 +25,14 @@ import javax.xml.bind.annotation.XmlTransient; import javax.xml.bind.annotation.XmlType; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler; @XmlRootElement(name = "fairScheduler") @XmlType(name = "fairScheduler") @XmlAccessorType(XmlAccessType.FIELD) public class FairSchedulerInfo extends SchedulerInfo { + public static final int INVALID_FAIR_SHARE = -1; private FairSchedulerQueueInfo rootQueue; @XmlTransient @@ -44,9 +46,18 @@ public class FairSchedulerInfo extends SchedulerInfo { rootQueue = new FairSchedulerQueueInfo(scheduler.getQueueManager(). getRootQueue(), scheduler); } - + + /** + * Get the fair share assigned to the appAttemptId. + * @param appAttemptId + * @return The fair share assigned to the appAttemptId, + * FairSchedulerInfo#INVALID_FAIR_SHARE if the scheduler does + * not know about this application attempt. + */ public int getAppFairShare(ApplicationAttemptId appAttemptId) { - return scheduler.getSchedulerApp(appAttemptId).getFairShare().getMemory(); + FSAppAttempt fsAppAttempt = scheduler.getSchedulerApp(appAttemptId); + return fsAppAttempt == null ? + INVALID_FAIR_SHARE : fsAppAttempt.getFairShare().getMemory(); } public FairSchedulerQueueInfo getRootQueueInfo() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java index 2c1bc4796d4..c62aaf08c64 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java @@ -28,7 +28,6 @@ import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlSeeAlso; import javax.xml.bind.annotation.XmlTransient; -import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.AllocationConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSLeafQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue; @@ -44,6 +43,8 @@ public class FairSchedulerQueueInfo { @XmlTransient private float fractionMemUsed; @XmlTransient + private float fractionMemSteadyFairShare; + @XmlTransient private float fractionMemFairShare; @XmlTransient private float fractionMemMinShare; @@ -53,6 +54,7 @@ public class FairSchedulerQueueInfo { private ResourceInfo minResources; private ResourceInfo maxResources; private ResourceInfo usedResources; + private ResourceInfo steadyFairResources; private ResourceInfo fairResources; private ResourceInfo clusterResources; @@ -75,15 +77,19 @@ public class FairSchedulerQueueInfo { usedResources = new ResourceInfo(queue.getResourceUsage()); fractionMemUsed = (float)usedResources.getMemory() / clusterResources.getMemory(); - + + steadyFairResources = new ResourceInfo(queue.getSteadyFairShare()); fairResources = new ResourceInfo(queue.getFairShare()); minResources = new ResourceInfo(queue.getMinShare()); maxResources = new ResourceInfo(queue.getMaxShare()); maxResources = new ResourceInfo( Resources.componentwiseMin(queue.getMaxShare(), scheduler.getClusterResource())); - - fractionMemFairShare = (float)fairResources.getMemory() / clusterResources.getMemory(); + + fractionMemSteadyFairShare = + (float)steadyFairResources.getMemory() / clusterResources.getMemory(); + fractionMemFairShare = (float) fairResources.getMemory() + / clusterResources.getMemory(); fractionMemMinShare = (float)minResources.getMemory() / clusterResources.getMemory(); fractionMemMaxShare = (float)maxResources.getMemory() / clusterResources.getMemory(); @@ -100,20 +106,34 @@ public class FairSchedulerQueueInfo { } } + /** + * Returns the steady fair share as a fraction of the entire cluster capacity. + */ + public float getSteadyFairShareMemoryFraction() { + return fractionMemSteadyFairShare; + } + /** * Returns the fair share as a fraction of the entire cluster capacity. */ public float getFairShareMemoryFraction() { return fractionMemFairShare; } - + /** - * Returns the fair share of this queue in megabytes. + * Returns the steady fair share of this queue in megabytes. + */ + public ResourceInfo getSteadyFairShare() { + return steadyFairResources; + } + + /** + * Returns the fair share of this queue in megabytes */ public ResourceInfo getFairShare() { return fairResources; } - + public ResourceInfo getMinResources() { return minResources; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_recovery.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_recovery.proto index ae56b9fd346..eab6af15787 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_recovery.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_recovery.proto @@ -23,6 +23,66 @@ option java_generate_equals_and_hash = true; package hadoop.yarn; import "yarn_server_common_protos.proto"; +import "yarn_protos.proto"; + +//////////////////////////////////////////////////////////////////////// +////// RM recovery related records ///////////////////////////////////// +//////////////////////////////////////////////////////////////////////// +enum RMAppAttemptStateProto { + RMATTEMPT_NEW = 1; + RMATTEMPT_SUBMITTED = 2; + RMATTEMPT_SCHEDULED = 3; + RMATTEMPT_ALLOCATED = 4; + RMATTEMPT_LAUNCHED = 5; + RMATTEMPT_FAILED = 6; + RMATTEMPT_RUNNING = 7; + RMATTEMPT_FINISHING = 8; + RMATTEMPT_FINISHED = 9; + RMATTEMPT_KILLED = 10; + RMATTEMPT_ALLOCATED_SAVING = 11; + RMATTEMPT_LAUNCHED_UNMANAGED_SAVING = 12; + RMATTEMPT_RECOVERED = 13; + RMATTEMPT_FINAL_SAVING = 14; +} + +enum RMAppStateProto { + RMAPP_NEW = 1; + RMAPP_NEW_SAVING = 2; + RMAPP_SUBMITTED = 3; + RMAPP_ACCEPTED = 4; + RMAPP_RUNNING = 5; + RMAPP_FINAL_SAVING = 6; + RMAPP_FINISHING = 7; + RMAPP_FINISHED = 8; + RMAPP_FAILED = 9; + RMAPP_KILLED = 10; +} + +message ApplicationStateDataProto { + optional int64 submit_time = 1; + optional ApplicationSubmissionContextProto application_submission_context = 2; + optional string user = 3; + optional int64 start_time = 4; + optional RMAppStateProto application_state = 5; + optional string diagnostics = 6 [default = "N/A"]; + optional int64 finish_time = 7; +} + +message ApplicationAttemptStateDataProto { + optional ApplicationAttemptIdProto attemptId = 1; + optional ContainerProto master_container = 2; + optional bytes app_attempt_tokens = 3; + optional RMAppAttemptStateProto app_attempt_state = 4; + optional string final_tracking_url = 5; + optional string diagnostics = 6 [default = "N/A"]; + optional int64 start_time = 7; + optional FinalApplicationStatusProto final_application_status = 8; + optional int32 am_container_exit_status = 9 [default = -1000]; +} + +message EpochProto { + optional int64 epoch = 1; +} message AMRMTokenSecretManagerStateProto { optional MasterKeyProto current_master_key = 1; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java index 8ef01d998d7..79f909806ea 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java @@ -27,7 +27,6 @@ import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.Resource; -import org.apache.hadoop.yarn.api.records.ResourceOption; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse; @@ -94,14 +93,14 @@ public class MockNodes { private String nodeAddr; private String httpAddress; private int cmdPort; - private ResourceOption perNode; + private Resource perNode; private String rackName; private String healthReport; private long lastHealthReportTime; private NodeState state; public MockRMNodeImpl(NodeId nodeId, String nodeAddr, String httpAddress, - ResourceOption perNode, String rackName, String healthReport, + Resource perNode, String rackName, String healthReport, long lastHealthReportTime, int cmdPort, String hostName, NodeState state) { this.nodeId = nodeId; this.nodeAddr = nodeAddr; @@ -147,7 +146,7 @@ public class MockNodes { @Override public Resource getTotalCapability() { - return this.perNode.getResource(); + return this.perNode; } @Override @@ -203,16 +202,6 @@ public class MockNodes { public long getLastHealthReportTime() { return lastHealthReportTime; } - - @Override - public void setResourceOption(ResourceOption resourceOption) { - this.perNode = resourceOption; - } - - @Override - public ResourceOption getResourceOption(){ - return this.perNode; - } }; @@ -232,9 +221,8 @@ public class MockNodes { final String httpAddress = httpAddr; String healthReport = (state == NodeState.UNHEALTHY) ? null : "HealthyMe"; - return new MockRMNodeImpl(nodeID, nodeAddr, httpAddress, - ResourceOption.newInstance(perNode, RMNode.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT), - rackName, healthReport, 0, nid, hostName, state); + return new MockRMNodeImpl(nodeID, nodeAddr, httpAddress, perNode, + rackName, healthReport, 0, nid, hostName, state); } public static RMNode nodeInfo(int rack, final Resource perNode, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java index a7f624029b2..3817637676b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java @@ -457,7 +457,6 @@ public class MockRM extends ResourceManager { @Override protected ResourceTrackerService createResourceTrackerService() { - Configuration conf = new Configuration(); RMContainerTokenSecretManager containerTokenSecretManager = getRMContext().getContainerTokenSecretManager(); @@ -547,6 +546,10 @@ public class MockRM extends ResourceManager { public RMAppManager getRMAppManager() { return this.rmAppManager; } + + public AdminService getAdminService() { + return this.adminService; + } @Override protected void startWepApp() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java index 420fc942ae2..12f7498b7b0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java @@ -23,7 +23,9 @@ import static org.junit.Assert.fail; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -35,9 +37,13 @@ import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceOption; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; @@ -509,6 +515,85 @@ public class TestFifoScheduler { rm.stop(); } + @Test + public void testResourceOverCommit() throws Exception { + MockRM rm = new MockRM(conf); + rm.start(); + + MockNM nm1 = rm.registerNode("127.0.0.1:1234", 4 * GB); + + RMApp app1 = rm.submitApp(2048); + // kick the scheduling, 2 GB given to AM1, remaining 2GB on nm1 + nm1.nodeHeartbeat(true); + RMAppAttempt attempt1 = app1.getCurrentAppAttempt(); + MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId()); + am1.registerAppAttempt(); + SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport( + nm1.getNodeId()); + // check node report, 2 GB used and 2 GB available + Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory()); + Assert.assertEquals(2 * GB, report_nm1.getAvailableResource().getMemory()); + + // add request for containers + am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 2 * GB, 1, 1); + AllocateResponse alloc1Response = am1.schedule(); // send the request + + // kick the scheduler, 2 GB given to AM1, resource remaining 0 + nm1.nodeHeartbeat(true); + while (alloc1Response.getAllocatedContainers().size() < 1) { + LOG.info("Waiting for containers to be created for app 1..."); + Thread.sleep(1000); + alloc1Response = am1.schedule(); + } + + List allocated1 = alloc1Response.getAllocatedContainers(); + Assert.assertEquals(1, allocated1.size()); + Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemory()); + Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId()); + + report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); + // check node report, 4 GB used and 0 GB available + Assert.assertEquals(0, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory()); + + // check container is assigned with 2 GB. + Container c1 = allocated1.get(0); + Assert.assertEquals(2 * GB, c1.getResource().getMemory()); + + // update node resource to 2 GB, so resource is over-consumed. + Map nodeResourceMap = + new HashMap(); + nodeResourceMap.put(nm1.getNodeId(), + ResourceOption.newInstance(Resource.newInstance(2 * GB, 1), -1)); + UpdateNodeResourceRequest request = + UpdateNodeResourceRequest.newInstance(nodeResourceMap); + AdminService as = rm.adminService; + as.updateNodeResource(request); + + // Now, the used resource is still 4 GB, and available resource is minus value. + report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); + Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory()); + Assert.assertEquals(-2 * GB, report_nm1.getAvailableResource().getMemory()); + + // Check container can complete successfully in case of resource over-commitment. + ContainerStatus containerStatus = BuilderUtils.newContainerStatus( + c1.getId(), ContainerState.COMPLETE, "", 0); + nm1.containerStatus(containerStatus); + int waitCount = 0; + while (attempt1.getJustFinishedContainers().size() < 1 + && waitCount++ != 20) { + LOG.info("Waiting for containers to be finished for app 1... Tried " + + waitCount + " times already.."); + Thread.sleep(100); + } + Assert.assertEquals(1, attempt1.getJustFinishedContainers().size()); + Assert.assertEquals(1, am1.schedule().getCompletedContainersStatuses().size()); + report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); + Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory()); + // As container return 2 GB back, the available resource becomes 0 again. + Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory()); + rm.stop(); + } public static void main(String[] args) throws Exception { TestFifoScheduler t = new TestFifoScheduler(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java index aa2cfc2eba1..d877e25c2d6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.yarn.server.resourcemanager; +import static org.junit.Assert.assertEquals; import static org.mockito.Matchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doReturn; @@ -48,6 +49,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeReconnectEvent; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeResourceUpdateEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeStartedEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeStatusEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo; @@ -463,8 +465,7 @@ public class TestRMNodeTransitions { NodeId nodeId = BuilderUtils.newNodeId("localhost", 0); Resource capability = Resource.newInstance(4096, 4); RMNodeImpl node = new RMNodeImpl(nodeId, rmContext,null, 0, 0, - null, ResourceOption.newInstance(capability, - RMNode.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT), nmVersion); + null, capability, nmVersion); node.handle(new RMNodeStartedEvent(node.getNodeID(), null, null)); Assert.assertEquals(NodeState.RUNNING, node.getState()); return node; @@ -486,6 +487,25 @@ public class TestRMNodeTransitions { RMNodeImpl node = new RMNodeImpl(nodeId, rmContext, null, 0, 0, null, null, null); return node; } + + private RMNodeImpl getNewNode(Resource capability) { + NodeId nodeId = BuilderUtils.newNodeId("localhost", 0); + RMNodeImpl node = new RMNodeImpl(nodeId, rmContext, null, 0, 0, null, + capability, null); + return node; + } + + private RMNodeImpl getRebootedNode() { + NodeId nodeId = BuilderUtils.newNodeId("localhost", 0); + Resource capability = Resource.newInstance(4096, 4); + RMNodeImpl node = new RMNodeImpl(nodeId, rmContext,null, 0, 0, + null, capability, null); + node.handle(new RMNodeStartedEvent(node.getNodeID(), null, null)); + Assert.assertEquals(NodeState.RUNNING, node.getState()); + node.handle(new RMNodeEvent(node.getNodeID(), RMNodeEventType.REBOOTING)); + Assert.assertEquals(NodeState.REBOOTED, node.getState()); + return node; + } @Test public void testAdd() { @@ -534,6 +554,57 @@ public class TestRMNodeTransitions { Assert.assertEquals(NodesListManagerEventType.NODE_USABLE, nodesListManagerEvent.getType()); } + + @Test + public void testResourceUpdateOnRunningNode() { + RMNodeImpl node = getRunningNode(); + Resource oldCapacity = node.getTotalCapability(); + assertEquals("Memory resource is not match.", oldCapacity.getMemory(), 4096); + assertEquals("CPU resource is not match.", oldCapacity.getVirtualCores(), 4); + node.handle(new RMNodeResourceUpdateEvent(node.getNodeID(), + ResourceOption.newInstance(Resource.newInstance(2048, 2), + RMNode.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT))); + Resource newCapacity = node.getTotalCapability(); + assertEquals("Memory resource is not match.", newCapacity.getMemory(), 2048); + assertEquals("CPU resource is not match.", newCapacity.getVirtualCores(), 2); + + Assert.assertEquals(NodeState.RUNNING, node.getState()); + Assert.assertNotNull(nodesListManagerEvent); + Assert.assertEquals(NodesListManagerEventType.NODE_USABLE, + nodesListManagerEvent.getType()); + } + + @Test + public void testResourceUpdateOnNewNode() { + RMNodeImpl node = getNewNode(Resource.newInstance(4096, 4)); + Resource oldCapacity = node.getTotalCapability(); + assertEquals("Memory resource is not match.", oldCapacity.getMemory(), 4096); + assertEquals("CPU resource is not match.", oldCapacity.getVirtualCores(), 4); + node.handle(new RMNodeResourceUpdateEvent(node.getNodeID(), + ResourceOption.newInstance(Resource.newInstance(2048, 2), + RMNode.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT))); + Resource newCapacity = node.getTotalCapability(); + assertEquals("Memory resource is not match.", newCapacity.getMemory(), 2048); + assertEquals("CPU resource is not match.", newCapacity.getVirtualCores(), 2); + + Assert.assertEquals(NodeState.NEW, node.getState()); + } + + @Test + public void testResourceUpdateOnRebootedNode() { + RMNodeImpl node = getRebootedNode(); + Resource oldCapacity = node.getTotalCapability(); + assertEquals("Memory resource is not match.", oldCapacity.getMemory(), 4096); + assertEquals("CPU resource is not match.", oldCapacity.getVirtualCores(), 4); + node.handle(new RMNodeResourceUpdateEvent(node.getNodeID(), + ResourceOption.newInstance(Resource.newInstance(2048, 2), + RMNode.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT))); + Resource newCapacity = node.getTotalCapability(); + assertEquals("Memory resource is not match.", newCapacity.getMemory(), 2048); + assertEquals("CPU resource is not match.", newCapacity.getVirtualCores(), 2); + + Assert.assertEquals(NodeState.REBOOTED, node.getState()); + } @Test public void testReconnnectUpdate() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java index cced69aea36..d16d5510365 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java @@ -18,6 +18,9 @@ package org.apache.hadoop.yarn.server.resourcemanager.resourcetracker; +import java.util.ArrayList; +import java.util.List; + import org.junit.Assert; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.NodeId; @@ -47,14 +50,14 @@ public class TestNMReconnect { private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); - private RMNodeEvent rmNodeEvent = null; + private List rmNodeEvents = new ArrayList(); private class TestRMNodeEventDispatcher implements EventHandler { @Override public void handle(RMNodeEvent event) { - rmNodeEvent = event; + rmNodeEvents.add(event); } } @@ -109,16 +112,18 @@ public class TestNMReconnect { request1.setResource(capability); resourceTrackerService.registerNodeManager(request1); - Assert.assertEquals(RMNodeEventType.STARTED, rmNodeEvent.getType()); + Assert.assertEquals(RMNodeEventType.STARTED, rmNodeEvents.get(0).getType()); - rmNodeEvent = null; + rmNodeEvents.clear(); resourceTrackerService.registerNodeManager(request1); - Assert.assertEquals(RMNodeEventType.RECONNECTED, rmNodeEvent.getType()); + Assert.assertEquals(RMNodeEventType.RECONNECTED, + rmNodeEvents.get(0).getType()); - rmNodeEvent = null; + rmNodeEvents.clear(); resourceTrackerService.registerNodeManager(request1); capability = BuilderUtils.newResource(1024, 2); request1.setResource(capability); - Assert.assertEquals(RMNodeEventType.RECONNECTED, rmNodeEvent.getType()); + Assert.assertEquals(RMNodeEventType.RECONNECTED, + rmNodeEvents.get(0).getType()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index f64bd62e078..e029749f1db 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -47,23 +47,30 @@ import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.yarn.LocalConfigurationProvider; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerState; +import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.QueueState; import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceOption; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.ipc.YarnRPC; +import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest; +import org.apache.hadoop.yarn.server.resourcemanager.AdminService; import org.apache.hadoop.yarn.server.resourcemanager.Application; import org.apache.hadoop.yarn.server.resourcemanager.MockAM; import org.apache.hadoop.yarn.server.resourcemanager.MockNM; @@ -90,6 +97,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplication; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.TestSchedulerUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; @@ -591,7 +599,6 @@ public class TestCapacityScheduler { return result; } - @SuppressWarnings("resource") @Test public void testBlackListNodes() throws Exception { Configuration conf = new Configuration(); @@ -627,6 +634,104 @@ public class TestCapacityScheduler { Assert.assertFalse(cs.getApplicationAttempt(appAttemptId).isBlacklisted(host)); rm.stop(); } + + @Test + public void testResourceOverCommit() throws Exception { + Configuration conf = new Configuration(); + conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, + ResourceScheduler.class); + MockRM rm = new MockRM(conf); + rm.start(); + + MockNM nm1 = rm.registerNode("127.0.0.1:1234", 4 * GB); + RMApp app1 = rm.submitApp(2048); + // kick the scheduling, 2 GB given to AM1, remaining 2GB on nm1 + nm1.nodeHeartbeat(true); + RMAppAttempt attempt1 = app1.getCurrentAppAttempt(); + MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId()); + am1.registerAppAttempt(); + SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport( + nm1.getNodeId()); + // check node report, 2 GB used and 2 GB available + Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory()); + Assert.assertEquals(2 * GB, report_nm1.getAvailableResource().getMemory()); + + // add request for containers + am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 2 * GB, 1, 1); + AllocateResponse alloc1Response = am1.schedule(); // send the request + + // kick the scheduler, 2 GB given to AM1, resource remaining 0 + nm1.nodeHeartbeat(true); + while (alloc1Response.getAllocatedContainers().size() < 1) { + LOG.info("Waiting for containers to be created for app 1..."); + Thread.sleep(100); + alloc1Response = am1.schedule(); + } + + List allocated1 = alloc1Response.getAllocatedContainers(); + Assert.assertEquals(1, allocated1.size()); + Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemory()); + Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId()); + + report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); + // check node report, 4 GB used and 0 GB available + Assert.assertEquals(0, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory()); + + // check container is assigned with 2 GB. + Container c1 = allocated1.get(0); + Assert.assertEquals(2 * GB, c1.getResource().getMemory()); + + // update node resource to 2 GB, so resource is over-consumed. + Map nodeResourceMap = + new HashMap(); + nodeResourceMap.put(nm1.getNodeId(), + ResourceOption.newInstance(Resource.newInstance(2 * GB, 1), -1)); + UpdateNodeResourceRequest request = + UpdateNodeResourceRequest.newInstance(nodeResourceMap); + AdminService as = ((MockRM)rm).getAdminService(); + as.updateNodeResource(request); + + // Now, the used resource is still 4 GB, and available resource is minus value. + report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); + Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory()); + Assert.assertEquals(-2 * GB, report_nm1.getAvailableResource().getMemory()); + + // Check container can complete successfully in case of resource over-commitment. + ContainerStatus containerStatus = BuilderUtils.newContainerStatus( + c1.getId(), ContainerState.COMPLETE, "", 0); + nm1.containerStatus(containerStatus); + int waitCount = 0; + while (attempt1.getJustFinishedContainers().size() < 1 + && waitCount++ != 20) { + LOG.info("Waiting for containers to be finished for app 1... Tried " + + waitCount + " times already.."); + Thread.sleep(100); + } + Assert.assertEquals(1, attempt1.getJustFinishedContainers().size()); + Assert.assertEquals(1, am1.schedule().getCompletedContainersStatuses().size()); + report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); + Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory()); + // As container return 2 GB back, the available resource becomes 0 again. + Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory()); + + // Verify no NPE is trigger in schedule after resource is updated. + am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 3 * GB, 1, 1); + alloc1Response = am1.schedule(); + Assert.assertEquals("Shouldn't have enough resource to allocate containers", + 0, alloc1Response.getAllocatedContainers().size()); + int times = 0; + // try 10 times as scheduling is async process. + while (alloc1Response.getAllocatedContainers().size() < 1 + && times++ < 10) { + LOG.info("Waiting for containers to be allocated for app 1... Tried " + + times + " times already.."); + Thread.sleep(100); + } + Assert.assertEquals("Shouldn't have enough resource to allocate containers", + 0, alloc1Response.getAllocatedContainers().size()); + rm.stop(); + } @Test (timeout = 5000) public void testApplicationComparator() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java index 2a4992c32ab..656e20d4c7a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java @@ -186,9 +186,16 @@ public class TestAllocationFileLoaderService { //Make queue F a parent queue without configured leaf queues using the 'type' attribute out.println(""); out.println(""); - //Create hierarchical queues G,H + // Create hierarchical queues G,H, with different min/fair share preemption + // timeouts and preemption thresholds out.println(""); + out.println("120"); + out.println("50"); + out.println("0.6"); out.println(" "); + out.println(" 180"); + out.println(" 40"); + out.println(" 0.7"); out.println(" "); out.println(""); // Set default limit of apps per queue to 15 @@ -204,8 +211,10 @@ public class TestAllocationFileLoaderService { // Set default min share preemption timeout to 2 minutes out.println("120" + ""); - // Set fair share preemption timeout to 5 minutes - out.println("300"); + // Set default fair share preemption timeout to 5 minutes + out.println("300"); + // Set default fair share preemption threshold to 0.4 + out.println("0.4"); // Set default scheduling policy to DRF out.println("drf"); out.println(""); @@ -270,16 +279,50 @@ public class TestAllocationFileLoaderService { assertEquals("alice,bob admins", queueConf.getQueueAcl("root.queueC", QueueACL.SUBMIT_APPLICATIONS).getAclString()); - assertEquals(120000, queueConf.getMinSharePreemptionTimeout("root." + + assertEquals(120000, queueConf.getMinSharePreemptionTimeout("root")); + assertEquals(-1, queueConf.getMinSharePreemptionTimeout("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); - assertEquals(120000, queueConf.getMinSharePreemptionTimeout("root.queueA")); - assertEquals(120000, queueConf.getMinSharePreemptionTimeout("root.queueB")); - assertEquals(120000, queueConf.getMinSharePreemptionTimeout("root.queueC")); - assertEquals(120000, queueConf.getMinSharePreemptionTimeout("root.queueD")); - assertEquals(120000, queueConf.getMinSharePreemptionTimeout("root.queueA")); + assertEquals(-1, queueConf.getMinSharePreemptionTimeout("root.queueA")); + assertEquals(-1, queueConf.getMinSharePreemptionTimeout("root.queueB")); + assertEquals(-1, queueConf.getMinSharePreemptionTimeout("root.queueC")); + assertEquals(-1, queueConf.getMinSharePreemptionTimeout("root.queueD")); assertEquals(60000, queueConf.getMinSharePreemptionTimeout("root.queueE")); - assertEquals(300000, queueConf.getFairSharePreemptionTimeout()); - + assertEquals(-1, queueConf.getMinSharePreemptionTimeout("root.queueF")); + assertEquals(50000, queueConf.getMinSharePreemptionTimeout("root.queueG")); + assertEquals(40000, queueConf.getMinSharePreemptionTimeout("root.queueG.queueH")); + + assertEquals(300000, queueConf.getFairSharePreemptionTimeout("root")); + assertEquals(-1, queueConf.getFairSharePreemptionTimeout("root." + + YarnConfiguration.DEFAULT_QUEUE_NAME)); + assertEquals(-1, queueConf.getFairSharePreemptionTimeout("root.queueA")); + assertEquals(-1, queueConf.getFairSharePreemptionTimeout("root.queueB")); + assertEquals(-1, queueConf.getFairSharePreemptionTimeout("root.queueC")); + assertEquals(-1, queueConf.getFairSharePreemptionTimeout("root.queueD")); + assertEquals(-1, queueConf.getFairSharePreemptionTimeout("root.queueE")); + assertEquals(-1, queueConf.getFairSharePreemptionTimeout("root.queueF")); + assertEquals(120000, queueConf.getFairSharePreemptionTimeout("root.queueG")); + assertEquals(180000, queueConf.getFairSharePreemptionTimeout("root.queueG.queueH")); + + assertEquals(.4f, queueConf.getFairSharePreemptionThreshold("root"), 0.01); + assertEquals(-1, queueConf.getFairSharePreemptionThreshold("root." + + YarnConfiguration.DEFAULT_QUEUE_NAME), 0.01); + assertEquals(-1, + queueConf.getFairSharePreemptionThreshold("root.queueA"), 0.01); + assertEquals(-1, + queueConf.getFairSharePreemptionThreshold("root.queueB"), 0.01); + assertEquals(-1, + queueConf.getFairSharePreemptionThreshold("root.queueC"), 0.01); + assertEquals(-1, + queueConf.getFairSharePreemptionThreshold("root.queueD"), 0.01); + assertEquals(-1, + queueConf.getFairSharePreemptionThreshold("root.queueE"), 0.01); + assertEquals(-1, + queueConf.getFairSharePreemptionThreshold("root.queueF"), 0.01); + assertEquals(.6f, + queueConf.getFairSharePreemptionThreshold("root.queueG"), 0.01); + assertEquals(.7f, + queueConf.getFairSharePreemptionThreshold("root.queueG.queueH"), 0.01); + assertTrue(queueConf.getConfiguredQueues() .get(FSQueueType.PARENT) .contains("root.queueF")); @@ -327,9 +370,10 @@ public class TestAllocationFileLoaderService { out.println(""); out.println("3"); out.println(""); - // Give queue E a preemption timeout of one minute + // Give queue E a preemption timeout of one minute and 0.3f threshold out.println(""); out.println("60"); + out.println("0.3"); out.println(""); // Set default limit of apps per queue to 15 out.println("15"); @@ -344,6 +388,8 @@ public class TestAllocationFileLoaderService { + ""); // Set fair share preemption timeout to 5 minutes out.println("300"); + // Set default fair share preemption threshold to 0.6f + out.println("0.6"); out.println(""); out.close(); @@ -393,16 +439,37 @@ public class TestAllocationFileLoaderService { assertEquals("alice,bob admins", queueConf.getQueueAcl("root.queueC", QueueACL.SUBMIT_APPLICATIONS).getAclString()); - - assertEquals(120000, queueConf.getMinSharePreemptionTimeout("root." + + assertEquals(120000, queueConf.getMinSharePreemptionTimeout("root")); + assertEquals(-1, queueConf.getMinSharePreemptionTimeout("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); - assertEquals(120000, queueConf.getMinSharePreemptionTimeout("root.queueA")); - assertEquals(120000, queueConf.getMinSharePreemptionTimeout("root.queueB")); - assertEquals(120000, queueConf.getMinSharePreemptionTimeout("root.queueC")); - assertEquals(120000, queueConf.getMinSharePreemptionTimeout("root.queueD")); - assertEquals(120000, queueConf.getMinSharePreemptionTimeout("root.queueA")); + assertEquals(-1, queueConf.getMinSharePreemptionTimeout("root.queueA")); + assertEquals(-1, queueConf.getMinSharePreemptionTimeout("root.queueB")); + assertEquals(-1, queueConf.getMinSharePreemptionTimeout("root.queueC")); + assertEquals(-1, queueConf.getMinSharePreemptionTimeout("root.queueD")); assertEquals(60000, queueConf.getMinSharePreemptionTimeout("root.queueE")); - assertEquals(300000, queueConf.getFairSharePreemptionTimeout()); + + assertEquals(300000, queueConf.getFairSharePreemptionTimeout("root")); + assertEquals(-1, queueConf.getFairSharePreemptionTimeout("root." + + YarnConfiguration.DEFAULT_QUEUE_NAME)); + assertEquals(-1, queueConf.getFairSharePreemptionTimeout("root.queueA")); + assertEquals(-1, queueConf.getFairSharePreemptionTimeout("root.queueB")); + assertEquals(-1, queueConf.getFairSharePreemptionTimeout("root.queueC")); + assertEquals(-1, queueConf.getFairSharePreemptionTimeout("root.queueD")); + assertEquals(-1, queueConf.getFairSharePreemptionTimeout("root.queueE")); + + assertEquals(.6f, queueConf.getFairSharePreemptionThreshold("root"), 0.01); + assertEquals(-1, queueConf.getFairSharePreemptionThreshold("root." + + YarnConfiguration.DEFAULT_QUEUE_NAME), 0.01); + assertEquals(-1, + queueConf.getFairSharePreemptionThreshold("root.queueA"), 0.01); + assertEquals(-1, + queueConf.getFairSharePreemptionThreshold("root.queueB"), 0.01); + assertEquals(-1, + queueConf.getFairSharePreemptionThreshold("root.queueC"), 0.01); + assertEquals(-1, + queueConf.getFairSharePreemptionThreshold("root.queueD"), 0.01); + assertEquals(.3f, + queueConf.getFairSharePreemptionThreshold("root.queueE"), 0.01); } @Test diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java index 7323b6ab050..97736bedd04 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java @@ -18,50 +18,66 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import java.io.File; +import java.io.FileWriter; import java.io.IOException; +import java.io.PrintWriter; +import java.util.Collection; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.event.AsyncDispatcher; -import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; +import org.apache.hadoop.yarn.server.resourcemanager.MockNodes; +import org.apache.hadoop.yarn.server.resourcemanager.MockRM; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; import org.apache.hadoop.yarn.util.resource.Resources; +import org.junit.After; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; -public class TestFSLeafQueue { - private FSLeafQueue schedulable = null; - private Resource maxResource = Resources.createResource(10); +public class TestFSLeafQueue extends FairSchedulerTestBase { + private final static String ALLOC_FILE = new File(TEST_DIR, + TestFSLeafQueue.class.getName() + ".xml").getAbsolutePath(); + private Resource maxResource = Resources.createResource(1024 * 8); @Before public void setup() throws IOException { - FairScheduler scheduler = new FairScheduler(); - Configuration conf = createConfiguration(); - // All tests assume only one assignment per node update - conf.set(FairSchedulerConfiguration.ASSIGN_MULTIPLE, "false"); - ResourceManager resourceManager = new ResourceManager(); - resourceManager.init(conf); - ((AsyncDispatcher)resourceManager.getRMContext().getDispatcher()).start(); - scheduler.init(conf); - scheduler.start(); - scheduler.reinitialize(conf, resourceManager.getRMContext()); - - String queueName = "root.queue1"; - scheduler.allocConf = mock(AllocationConfiguration.class); - when(scheduler.allocConf.getMaxResources(queueName)).thenReturn(maxResource); - when(scheduler.allocConf.getMinResources(queueName)).thenReturn(Resources.none()); + conf = createConfiguration(); + conf.setClass(YarnConfiguration.RM_SCHEDULER, FairScheduler.class, + ResourceScheduler.class); + } - schedulable = new FSLeafQueue(queueName, scheduler, null); + @After + public void teardown() { + if (resourceManager != null) { + resourceManager.stop(); + resourceManager = null; + } + conf = null; } @Test public void testUpdateDemand() { + conf.set(FairSchedulerConfiguration.ASSIGN_MULTIPLE, "false"); + resourceManager = new MockRM(conf); + resourceManager.start(); + scheduler = (FairScheduler) resourceManager.getResourceScheduler(); + scheduler.allocConf = mock(AllocationConfiguration.class); + + String queueName = "root.queue1"; + when(scheduler.allocConf.getMaxResources(queueName)).thenReturn(maxResource); + when(scheduler.allocConf.getMinResources(queueName)).thenReturn(Resources.none()); + FSLeafQueue schedulable = new FSLeafQueue(queueName, scheduler, null); + FSAppAttempt app = mock(FSAppAttempt.class); Mockito.when(app.getDemand()).thenReturn(maxResource); @@ -73,11 +89,137 @@ public class TestFSLeafQueue { assertTrue("Demand is greater than max allowed ", Resources.equals(schedulable.getDemand(), maxResource)); } - - private Configuration createConfiguration() { - Configuration conf = new YarnConfiguration(); - conf.setClass(YarnConfiguration.RM_SCHEDULER, FairScheduler.class, - ResourceScheduler.class); - return conf; + + @Test (timeout = 5000) + public void test() throws Exception { + conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); + PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); + out.println(""); + out.println(""); + out.println(""); + out.println("2048mb,0vcores"); + out.println(""); + out.println(""); + out.println("2048mb,0vcores"); + out.println(""); + out.println(""); + out.close(); + + resourceManager = new MockRM(conf); + resourceManager.start(); + scheduler = (FairScheduler) resourceManager.getResourceScheduler(); + + // Add one big node (only care about aggregate capacity) + RMNode node1 = + MockNodes.newNodeInfo(1, Resources.createResource(4 * 1024, 4), 1, + "127.0.0.1"); + NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); + scheduler.handle(nodeEvent1); + + scheduler.update(); + + // Queue A wants 3 * 1024. Node update gives this all to A + createSchedulingRequest(3 * 1024, "queueA", "user1"); + scheduler.update(); + NodeUpdateSchedulerEvent nodeEvent2 = new NodeUpdateSchedulerEvent(node1); + scheduler.handle(nodeEvent2); + + // Queue B arrives and wants 1 * 1024 + createSchedulingRequest(1 * 1024, "queueB", "user1"); + scheduler.update(); + Collection queues = scheduler.getQueueManager().getLeafQueues(); + assertEquals(3, queues.size()); + + // Queue A should be above min share, B below. + FSLeafQueue queueA = + scheduler.getQueueManager().getLeafQueue("queueA", false); + FSLeafQueue queueB = + scheduler.getQueueManager().getLeafQueue("queueB", false); + assertFalse(queueA.isStarvedForMinShare()); + assertTrue(queueB.isStarvedForMinShare()); + + // Node checks in again, should allocate for B + scheduler.handle(nodeEvent2); + // Now B should have min share ( = demand here) + assertFalse(queueB.isStarvedForMinShare()); + } + + @Test (timeout = 5000) + public void testIsStarvedForFairShare() throws Exception { + conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); + PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); + out.println(""); + out.println(""); + out.println(""); + out.println(".2"); + out.println(""); + out.println(""); + out.println(".8"); + out.println(".4"); + out.println(""); + out.println(""); + out.println(""); + out.println(".6"); + out.println(""); + out.println(""); + out.println(".5"); + out.println(""); + out.close(); + + resourceManager = new MockRM(conf); + resourceManager.start(); + scheduler = (FairScheduler) resourceManager.getResourceScheduler(); + + // Add one big node (only care about aggregate capacity) + RMNode node1 = + MockNodes.newNodeInfo(1, Resources.createResource(10 * 1024, 10), 1, + "127.0.0.1"); + NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); + scheduler.handle(nodeEvent1); + + scheduler.update(); + + // Queue A wants 4 * 1024. Node update gives this all to A + createSchedulingRequest(1 * 1024, "queueA", "user1", 4); + scheduler.update(); + NodeUpdateSchedulerEvent nodeEvent2 = new NodeUpdateSchedulerEvent(node1); + for (int i = 0; i < 4; i ++) { + scheduler.handle(nodeEvent2); + } + + QueueManager queueMgr = scheduler.getQueueManager(); + FSLeafQueue queueA = queueMgr.getLeafQueue("queueA", false); + assertEquals(4 * 1024, queueA.getResourceUsage().getMemory()); + + // Both queue B1 and queue B2 want 3 * 1024 + createSchedulingRequest(1 * 1024, "queueB.queueB1", "user1", 3); + createSchedulingRequest(1 * 1024, "queueB.queueB2", "user1", 3); + scheduler.update(); + for (int i = 0; i < 4; i ++) { + scheduler.handle(nodeEvent2); + } + + FSLeafQueue queueB1 = queueMgr.getLeafQueue("queueB.queueB1", false); + FSLeafQueue queueB2 = queueMgr.getLeafQueue("queueB.queueB2", false); + assertEquals(2 * 1024, queueB1.getResourceUsage().getMemory()); + assertEquals(2 * 1024, queueB2.getResourceUsage().getMemory()); + + // For queue B1, the fairSharePreemptionThreshold is 0.4, and the fair share + // threshold is 1.6 * 1024 + assertFalse(queueB1.isStarvedForFairShare()); + + // For queue B2, the fairSharePreemptionThreshold is 0.6, and the fair share + // threshold is 2.4 * 1024 + assertTrue(queueB2.isStarvedForFairShare()); + + // Node checks in again + scheduler.handle(nodeEvent2); + scheduler.handle(nodeEvent2); + assertEquals(3 * 1024, queueB1.getResourceUsage().getMemory()); + assertEquals(3 * 1024, queueB2.getResourceUsage().getMemory()); + + // Both queue B1 and queue B2 usages go to 3 * 1024 + assertFalse(queueB1.isStarvedForFairShare()); + assertFalse(queueB2.isStarvedForFairShare()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index 79e3184e79c..05b1925575c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -1059,7 +1059,13 @@ public class TestFairScheduler extends FairSchedulerTestBase { out.println(" "); out.println(" 1024mb,4vcores"); out.println(" "); + out.println(" 100"); + out.println(" 120"); + out.println(" .5"); out.println(""); + out.println("300"); + out.println("200"); + out.println(".6"); out.println(""); out.close(); @@ -1073,125 +1079,10 @@ public class TestFairScheduler extends FairSchedulerTestBase { assertNotNull(queueManager.getLeafQueue("child1", false)); assertNotNull(queueManager.getLeafQueue("child2", false)); - } - - @Test (timeout = 5000) - public void testIsStarvedForMinShare() throws Exception { - conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); - PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); - out.println(""); - out.println(""); - out.println(""); - out.println("2048mb,0vcores"); - out.println(""); - out.println(""); - out.println("2048mb,0vcores"); - out.println(""); - out.println(""); - out.close(); - - scheduler.init(conf); - scheduler.start(); - scheduler.reinitialize(conf, resourceManager.getRMContext()); - - // Add one big node (only care about aggregate capacity) - RMNode node1 = - MockNodes.newNodeInfo(1, Resources.createResource(4 * 1024, 4), 1, - "127.0.0.1"); - NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); - scheduler.handle(nodeEvent1); - - // Queue A wants 3 * 1024. Node update gives this all to A - createSchedulingRequest(3 * 1024, "queueA", "user1"); - scheduler.update(); - NodeUpdateSchedulerEvent nodeEvent2 = new NodeUpdateSchedulerEvent(node1); - scheduler.handle(nodeEvent2); - - // Queue B arrives and wants 1 * 1024 - createSchedulingRequest(1 * 1024, "queueB", "user1"); - scheduler.update(); - Collection queues = scheduler.getQueueManager().getLeafQueues(); - assertEquals(3, queues.size()); - - // Queue A should be above min share, B below. - for (FSLeafQueue p : queues) { - if (p.getName().equals("root.queueA")) { - assertEquals(false, scheduler.isStarvedForMinShare(p)); - } - else if (p.getName().equals("root.queueB")) { - assertEquals(true, scheduler.isStarvedForMinShare(p)); - } - } - - // Node checks in again, should allocate for B - scheduler.handle(nodeEvent2); - // Now B should have min share ( = demand here) - for (FSLeafQueue p : queues) { - if (p.getName().equals("root.queueB")) { - assertEquals(false, scheduler.isStarvedForMinShare(p)); - } - } - } - - @Test (timeout = 5000) - public void testIsStarvedForFairShare() throws Exception { - conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); - - PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); - out.println(""); - out.println(""); - out.println(""); - out.println(".25"); - out.println(""); - out.println(""); - out.println(".75"); - out.println(""); - out.println(""); - out.close(); - - scheduler.init(conf); - scheduler.start(); - scheduler.reinitialize(conf, resourceManager.getRMContext()); - - // Add one big node (only care about aggregate capacity) - RMNode node1 = - MockNodes.newNodeInfo(1, Resources.createResource(4 * 1024, 4), 1, - "127.0.0.1"); - NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); - scheduler.handle(nodeEvent1); - - // Queue A wants 3 * 1024. Node update gives this all to A - createSchedulingRequest(3 * 1024, "queueA", "user1"); - scheduler.update(); - NodeUpdateSchedulerEvent nodeEvent2 = new NodeUpdateSchedulerEvent(node1); - scheduler.handle(nodeEvent2); - - // Queue B arrives and wants 1 * 1024 - createSchedulingRequest(1 * 1024, "queueB", "user1"); - scheduler.update(); - Collection queues = scheduler.getQueueManager().getLeafQueues(); - assertEquals(3, queues.size()); - - // Queue A should be above fair share, B below. - for (FSLeafQueue p : queues) { - if (p.getName().equals("root.queueA")) { - assertEquals(false, scheduler.isStarvedForFairShare(p)); - } - else if (p.getName().equals("root.queueB")) { - assertEquals(true, scheduler.isStarvedForFairShare(p)); - } - } - - // Node checks in again, should allocate for B - scheduler.handle(nodeEvent2); - // B should not be starved for fair share, since entire demand is - // satisfied. - for (FSLeafQueue p : queues) { - if (p.getName().equals("root.queueB")) { - assertEquals(false, scheduler.isStarvedForFairShare(p)); - } - } + assertEquals(100000, root.getFairSharePreemptionTimeout()); + assertEquals(120000, root.getMinSharePreemptionTimeout()); + assertEquals(0.5f, root.getFairSharePreemptionThreshold(), 0.01); } @Test (timeout = 5000) @@ -1378,7 +1269,8 @@ public class TestFairScheduler extends FairSchedulerTestBase { out.println(""); out.println("2"); out.println(""); - out.print("10"); + out.println("10"); + out.println(".5"); out.println(""); out.close(); @@ -1461,8 +1353,9 @@ public class TestFairScheduler extends FairSchedulerTestBase { out.println(".25"); out.println("1024mb,0vcores"); out.println(""); - out.print("5"); - out.print("10"); + out.println("5"); + out.println("10"); + out.println(".5"); out.println(""); out.close(); @@ -1489,7 +1382,6 @@ public class TestFairScheduler extends FairSchedulerTestBase { NodeAddedSchedulerEvent nodeEvent3 = new NodeAddedSchedulerEvent(node3); scheduler.handle(nodeEvent3); - // Queue A and B each request three containers ApplicationAttemptId app1 = createSchedulingRequest(1 * 1024, "queueA", "user1", 1, 1); @@ -1563,6 +1455,303 @@ public class TestFairScheduler extends FairSchedulerTestBase { 1536, scheduler.resToPreempt(schedD, clock.getTime()).getMemory()); } + @Test + /** + * Tests the various timing of decision to preempt tasks. + */ + public void testPreemptionDecisionWithVariousTimeout() throws Exception { + conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); + MockClock clock = new MockClock(); + scheduler.setClock(clock); + + PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); + out.println(""); + out.println(""); + out.println(""); + out.println("0mb,0vcores"); + out.println(""); + out.println(""); + out.println("1"); + out.println("1024mb,0vcores"); + out.println(""); + out.println(""); + out.println("2"); + out.println("10"); + out.println("25"); + out.println(""); + out.println("1024mb,0vcores"); + out.println("5"); + out.println(""); + out.println(""); + out.println("1024mb,0vcores"); + out.println("20"); + out.println(""); + out.println(""); + out.println(""); + out.println("1"); + out.println("1024mb,0vcores"); + out.println(""); + out.print("15"); + out.print("30"); + out.println(""); + out.close(); + + scheduler.init(conf); + scheduler.start(); + scheduler.reinitialize(conf, resourceManager.getRMContext()); + + // Check the min/fair share preemption timeout for each queue + QueueManager queueMgr = scheduler.getQueueManager(); + assertEquals(30000, queueMgr.getQueue("root") + .getFairSharePreemptionTimeout()); + assertEquals(30000, queueMgr.getQueue("default") + .getFairSharePreemptionTimeout()); + assertEquals(30000, queueMgr.getQueue("queueA") + .getFairSharePreemptionTimeout()); + assertEquals(25000, queueMgr.getQueue("queueB") + .getFairSharePreemptionTimeout()); + assertEquals(25000, queueMgr.getQueue("queueB.queueB1") + .getFairSharePreemptionTimeout()); + assertEquals(20000, queueMgr.getQueue("queueB.queueB2") + .getFairSharePreemptionTimeout()); + assertEquals(30000, queueMgr.getQueue("queueC") + .getFairSharePreemptionTimeout()); + assertEquals(15000, queueMgr.getQueue("root") + .getMinSharePreemptionTimeout()); + assertEquals(15000, queueMgr.getQueue("default") + .getMinSharePreemptionTimeout()); + assertEquals(15000, queueMgr.getQueue("queueA") + .getMinSharePreemptionTimeout()); + assertEquals(10000, queueMgr.getQueue("queueB") + .getMinSharePreemptionTimeout()); + assertEquals(5000, queueMgr.getQueue("queueB.queueB1") + .getMinSharePreemptionTimeout()); + assertEquals(10000, queueMgr.getQueue("queueB.queueB2") + .getMinSharePreemptionTimeout()); + assertEquals(15000, queueMgr.getQueue("queueC") + .getMinSharePreemptionTimeout()); + + // Create one big node + RMNode node1 = + MockNodes.newNodeInfo(1, Resources.createResource(6 * 1024, 6), 1, + "127.0.0.1"); + NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); + scheduler.handle(nodeEvent1); + + // Queue A takes all resources + for (int i = 0; i < 6; i ++) { + createSchedulingRequest(1 * 1024, "queueA", "user1", 1, 1); + } + + scheduler.update(); + + // Sufficient node check-ins to fully schedule containers + NodeUpdateSchedulerEvent nodeUpdate1 = new NodeUpdateSchedulerEvent(node1); + for (int i = 0; i < 6; i++) { + scheduler.handle(nodeUpdate1); + } + + // Now new requests arrive from queues B1, B2 and C + createSchedulingRequest(1 * 1024, "queueB.queueB1", "user1", 1, 1); + createSchedulingRequest(1 * 1024, "queueB.queueB1", "user1", 1, 2); + createSchedulingRequest(1 * 1024, "queueB.queueB1", "user1", 1, 3); + createSchedulingRequest(1 * 1024, "queueB.queueB2", "user1", 1, 1); + createSchedulingRequest(1 * 1024, "queueB.queueB2", "user1", 1, 2); + createSchedulingRequest(1 * 1024, "queueB.queueB2", "user1", 1, 3); + createSchedulingRequest(1 * 1024, "queueC", "user1", 1, 1); + createSchedulingRequest(1 * 1024, "queueC", "user1", 1, 2); + createSchedulingRequest(1 * 1024, "queueC", "user1", 1, 3); + + scheduler.update(); + + FSLeafQueue queueB1 = queueMgr.getLeafQueue("queueB.queueB1", true); + FSLeafQueue queueB2 = queueMgr.getLeafQueue("queueB.queueB2", true); + FSLeafQueue queueC = queueMgr.getLeafQueue("queueC", true); + + assertTrue(Resources.equals( + Resources.none(), scheduler.resToPreempt(queueB1, clock.getTime()))); + assertTrue(Resources.equals( + Resources.none(), scheduler.resToPreempt(queueB2, clock.getTime()))); + assertTrue(Resources.equals( + Resources.none(), scheduler.resToPreempt(queueC, clock.getTime()))); + + // After 5 seconds, queueB1 wants to preempt min share + scheduler.update(); + clock.tick(6); + assertEquals( + 1024, scheduler.resToPreempt(queueB1, clock.getTime()).getMemory()); + assertEquals( + 0, scheduler.resToPreempt(queueB2, clock.getTime()).getMemory()); + assertEquals( + 0, scheduler.resToPreempt(queueC, clock.getTime()).getMemory()); + + // After 10 seconds, queueB2 wants to preempt min share + scheduler.update(); + clock.tick(5); + assertEquals( + 1024, scheduler.resToPreempt(queueB1, clock.getTime()).getMemory()); + assertEquals( + 1024, scheduler.resToPreempt(queueB2, clock.getTime()).getMemory()); + assertEquals( + 0, scheduler.resToPreempt(queueC, clock.getTime()).getMemory()); + + // After 15 seconds, queueC wants to preempt min share + scheduler.update(); + clock.tick(5); + assertEquals( + 1024, scheduler.resToPreempt(queueB1, clock.getTime()).getMemory()); + assertEquals( + 1024, scheduler.resToPreempt(queueB2, clock.getTime()).getMemory()); + assertEquals( + 1024, scheduler.resToPreempt(queueC, clock.getTime()).getMemory()); + + // After 20 seconds, queueB2 should want to preempt fair share + scheduler.update(); + clock.tick(5); + assertEquals( + 1024, scheduler.resToPreempt(queueB1, clock.getTime()).getMemory()); + assertEquals( + 1536, scheduler.resToPreempt(queueB2, clock.getTime()).getMemory()); + assertEquals( + 1024, scheduler.resToPreempt(queueC, clock.getTime()).getMemory()); + + // After 25 seconds, queueB1 should want to preempt fair share + scheduler.update(); + clock.tick(5); + assertEquals( + 1536, scheduler.resToPreempt(queueB1, clock.getTime()).getMemory()); + assertEquals( + 1536, scheduler.resToPreempt(queueB2, clock.getTime()).getMemory()); + assertEquals( + 1024, scheduler.resToPreempt(queueC, clock.getTime()).getMemory()); + + // After 30 seconds, queueC should want to preempt fair share + scheduler.update(); + clock.tick(5); + assertEquals( + 1536, scheduler.resToPreempt(queueB1, clock.getTime()).getMemory()); + assertEquals( + 1536, scheduler.resToPreempt(queueB2, clock.getTime()).getMemory()); + assertEquals( + 1536, scheduler.resToPreempt(queueC, clock.getTime()).getMemory()); + } + + @Test + public void testBackwardsCompatiblePreemptionConfiguration() throws Exception { + conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); + + PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); + out.println(""); + out.println(""); + out.println(""); + out.println(""); + out.println(""); + out.println(""); + out.println(""); + out.println(""); + out.println("5"); + out.println(""); + out.println(""); + out.println(""); + out.println(""); + out.println(""); + out.println(""); + out.print("15"); + out.print("30"); + out.print("40"); + out.println(""); + out.close(); + + scheduler.init(conf); + scheduler.start(); + scheduler.reinitialize(conf, resourceManager.getRMContext()); + + // Check the min/fair share preemption timeout for each queue + QueueManager queueMgr = scheduler.getQueueManager(); + assertEquals(30000, queueMgr.getQueue("root") + .getFairSharePreemptionTimeout()); + assertEquals(30000, queueMgr.getQueue("default") + .getFairSharePreemptionTimeout()); + assertEquals(30000, queueMgr.getQueue("queueA") + .getFairSharePreemptionTimeout()); + assertEquals(30000, queueMgr.getQueue("queueB") + .getFairSharePreemptionTimeout()); + assertEquals(30000, queueMgr.getQueue("queueB.queueB1") + .getFairSharePreemptionTimeout()); + assertEquals(30000, queueMgr.getQueue("queueB.queueB2") + .getFairSharePreemptionTimeout()); + assertEquals(30000, queueMgr.getQueue("queueC") + .getFairSharePreemptionTimeout()); + assertEquals(15000, queueMgr.getQueue("root") + .getMinSharePreemptionTimeout()); + assertEquals(15000, queueMgr.getQueue("default") + .getMinSharePreemptionTimeout()); + assertEquals(15000, queueMgr.getQueue("queueA") + .getMinSharePreemptionTimeout()); + assertEquals(15000, queueMgr.getQueue("queueB") + .getMinSharePreemptionTimeout()); + assertEquals(5000, queueMgr.getQueue("queueB.queueB1") + .getMinSharePreemptionTimeout()); + assertEquals(15000, queueMgr.getQueue("queueB.queueB2") + .getMinSharePreemptionTimeout()); + assertEquals(15000, queueMgr.getQueue("queueC") + .getMinSharePreemptionTimeout()); + + // If both exist, we take the default one + out = new PrintWriter(new FileWriter(ALLOC_FILE)); + out.println(""); + out.println(""); + out.println(""); + out.println(""); + out.println(""); + out.println(""); + out.println(""); + out.println(""); + out.println("5"); + out.println(""); + out.println(""); + out.println(""); + out.println(""); + out.println(""); + out.println(""); + out.print("15"); + out.print("25"); + out.print("30"); + out.println(""); + out.close(); + + scheduler.reinitialize(conf, resourceManager.getRMContext()); + + assertEquals(25000, queueMgr.getQueue("root") + .getFairSharePreemptionTimeout()); + } + + @Test + public void testPreemptionVariablesForQueueCreatedRuntime() throws Exception { + conf.set(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE, "true"); + scheduler.init(conf); + scheduler.start(); + scheduler.reinitialize(conf, resourceManager.getRMContext()); + + // Set preemption variables for the root queue + FSParentQueue root = scheduler.getQueueManager().getRootQueue(); + root.setMinSharePreemptionTimeout(10000); + root.setFairSharePreemptionTimeout(15000); + root.setFairSharePreemptionThreshold(.6f); + + // User1 submits one application + ApplicationAttemptId appAttemptId = createAppAttemptId(1, 1); + createApplicationWithAMResource(appAttemptId, "default", "user1", null); + + // The user1 queue should inherit the configurations from the root queue + FSLeafQueue userQueue = + scheduler.getQueueManager().getLeafQueue("user1", true); + assertEquals(1, userQueue.getRunnableAppSchedulables().size()); + assertEquals(10000, userQueue.getMinSharePreemptionTimeout()); + assertEquals(15000, userQueue.getFairSharePreemptionTimeout()); + assertEquals(.6f, userQueue.getFairSharePreemptionThreshold(), 0.001); + } + @Test (timeout = 5000) public void testMultipleContainersWaitingForReservation() throws IOException { scheduler.init(conf); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java index a0e22799290..3d383647ba4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java @@ -68,6 +68,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaS import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeResourceUpdateSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM; @@ -278,17 +279,16 @@ public class TestFifoScheduler { (Map) method.invoke(scheduler); assertEquals(schedulerNodes.values().size(), 1); - // set resource of RMNode to 1024 and verify it works. - node0.setResourceOption(ResourceOption.newInstance( - Resources.createResource(1024, 4), RMNode.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT)); - assertEquals(node0.getTotalCapability().getMemory(), 1024); - // verify that SchedulerNode's resource hasn't been changed. - assertEquals(schedulerNodes.get(node0.getNodeID()). - getAvailableResource().getMemory(), 2048); - // now, NM heartbeat comes. - NodeUpdateSchedulerEvent node0Update = new NodeUpdateSchedulerEvent(node0); - scheduler.handle(node0Update); - // SchedulerNode's available resource is changed. + Resource newResource = Resources.createResource(1024, 4); + + NodeResourceUpdateSchedulerEvent node0ResourceUpdate = new + NodeResourceUpdateSchedulerEvent(node0, ResourceOption.newInstance( + newResource, RMNode.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT)); + scheduler.handle(node0ResourceUpdate); + + // SchedulerNode's total resource and available resource are changed. + assertEquals(schedulerNodes.get(node0.getNodeID()).getTotalResource() + .getMemory(), 1024); assertEquals(schedulerNodes.get(node0.getNodeID()). getAvailableResource().getMemory(), 1024); QueueInfo queueInfo = scheduler.getQueueInfo(null, false, false); @@ -324,6 +324,7 @@ public class TestFifoScheduler { // Before the node update event, there are one local request Assert.assertEquals(1, nodeLocal.getNumContainers()); + NodeUpdateSchedulerEvent node0Update = new NodeUpdateSchedulerEvent(node0); // Now schedule. scheduler.handle(node0Update); @@ -544,7 +545,6 @@ public class TestFifoScheduler { LOG.info("--- END: testFifoScheduler ---"); } - @SuppressWarnings("resource") @Test public void testBlackListNodes() throws Exception { Configuration conf = new Configuration(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java index 1de64896c55..111bf47d2b1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java @@ -22,20 +22,29 @@ import com.google.common.collect.Maps; import com.google.inject.Binder; import com.google.inject.Injector; import com.google.inject.Module; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.MockRMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppMetrics; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; + +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM; import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM; import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.webapp.test.WebAppTests; +import org.junit.Assert; import org.junit.Test; import java.io.IOException; @@ -75,12 +84,67 @@ public class TestRMWebAppFairScheduler { WebAppTests.flushOutput(injector); } + + /** + * Testing inconsistent state between AbstractYarnScheduler#applications and + * RMContext#applications + */ + @Test + public void testFairSchedulerWebAppPageInInconsistentState() { + List appStates = Arrays.asList( + RMAppState.NEW, + RMAppState.NEW_SAVING, + RMAppState.SUBMITTED, + RMAppState.RUNNING, + RMAppState.FINAL_SAVING, + RMAppState.ACCEPTED, + RMAppState.FINISHED + ); + final RMContext rmContext = mockRMContext(appStates); + Injector injector = WebAppTests.createMockInjector(RMContext.class, + rmContext, + new Module() { + @Override + public void configure(Binder binder) { + try { + ResourceManager mockRmWithFairScheduler = + mockRmWithApps(rmContext); + binder.bind(ResourceManager.class).toInstance + (mockRmWithFairScheduler); + + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + }); + FairSchedulerPage fsViewInstance = + injector.getInstance(FairSchedulerPage.class); + try { + fsViewInstance.render(); + } catch (Exception e) { + Assert.fail("Failed to render FairSchedulerPage: " + + StringUtils.stringifyException(e)); + } + WebAppTests.flushOutput(injector); + } + private static RMContext mockRMContext(List states) { final ConcurrentMap applicationsMaps = Maps .newConcurrentMap(); int i = 0; for (RMAppState state : states) { - MockRMApp app = new MockRMApp(i, i, state); + MockRMApp app = new MockRMApp(i, i, state) { + @Override + public RMAppMetrics getRMAppMetrics() { + return new RMAppMetrics(Resource.newInstance(0, 0), 0, 0); + } + @Override + public YarnApplicationState createApplicationState() { + return YarnApplicationState.ACCEPTED; + } + }; + RMAppAttempt attempt = mock(RMAppAttempt.class); + app.setCurrentAppAttempt(attempt); applicationsMaps.put(app.getApplicationId(), app); i++; } @@ -113,4 +177,34 @@ public class TestRMWebAppFairScheduler { fs.init(conf); return fs; } + + private static ResourceManager mockRmWithApps(RMContext rmContext) throws + IOException { + ResourceManager rm = mock(ResourceManager.class); + ResourceScheduler rs = mockFairSchedulerWithoutApps(rmContext); + when(rm.getResourceScheduler()).thenReturn(rs); + when(rm.getRMContext()).thenReturn(rmContext); + return rm; + } + + private static FairScheduler mockFairSchedulerWithoutApps(RMContext rmContext) + throws IOException { + FairScheduler fs = new FairScheduler() { + @Override + public FSAppAttempt getSchedulerApp(ApplicationAttemptId + applicationAttemptId) { + return null ; + } + @Override + public FSAppAttempt getApplicationAttempt(ApplicationAttemptId + applicationAttemptId) { + return null; + } + }; + FairSchedulerConfiguration conf = new FairSchedulerConfiguration(); + fs.setRMContext(rmContext); + fs.init(conf); + return fs; + } + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java index 12c5686e3ee..e02e410c5a7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java @@ -22,9 +22,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assume.assumeTrue; -import java.io.IOException; -import java.io.StringReader; -import java.io.StringWriter; +import java.io.*; import java.net.URI; import java.util.ArrayList; import java.util.Arrays; @@ -47,6 +45,9 @@ import javax.xml.parsers.ParserConfigurationException; import org.apache.commons.codec.binary.Base64; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.DataInputBuffer; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.authentication.server.AuthenticationFilter; import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; @@ -77,6 +78,7 @@ import org.apache.hadoop.yarn.webapp.WebServicesTestUtils; import org.codehaus.jettison.json.JSONException; import org.codehaus.jettison.json.JSONObject; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; @@ -684,7 +686,8 @@ public class TestRMWebServicesAppsModification extends JerseyTest { CredentialsInfo credentials = new CredentialsInfo(); HashMap tokens = new HashMap(); HashMap secrets = new HashMap(); - secrets.put("secret1", Base64.encodeBase64URLSafeString("secret1".getBytes("UTF8"))); + secrets.put("secret1", Base64.encodeBase64String( + "mysecret".getBytes("UTF8"))); credentials.setSecrets(secrets); credentials.setTokens(tokens); ApplicationSubmissionContextInfo appInfo = new ApplicationSubmissionContextInfo(); @@ -757,6 +760,16 @@ public class TestRMWebServicesAppsModification extends JerseyTest { assertEquals(y.getType(), exampleLR.getType()); assertEquals(y.getPattern(), exampleLR.getPattern()); assertEquals(y.getVisibility(), exampleLR.getVisibility()); + Credentials cs = new Credentials(); + ByteArrayInputStream str = + new ByteArrayInputStream(app.getApplicationSubmissionContext() + .getAMContainerSpec().getTokens().array()); + DataInputStream di = new DataInputStream(str); + cs.readTokenStorageStream(di); + Text key = new Text("secret1"); + assertTrue("Secrets missing from credentials object", cs + .getAllSecretKeys().contains(key)); + assertEquals("mysecret", new String(cs.getSecretKey(key), "UTF-8")); response = this.constructWebResource("apps", appId).accept(acceptMedia) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm index 9bb85631584..df614228381 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm @@ -271,6 +271,17 @@ Allocation file format * minSharePreemptionTimeout: number of seconds the queue is under its minimum share before it will try to preempt containers to take resources from other queues. + If not set, the queue will inherit the value from its parent queue. + + * fairSharePreemptionTimeout: number of seconds the queue is under its fair share + threshold before it will try to preempt containers to take resources from other + queues. If not set, the queue will inherit the value from its parent queue. + + * fairSharePreemptionThreshold: the fair share preemption threshold for the + queue. If the queue waits fairSharePreemptionTimeout without receiving + fairSharePreemptionThreshold*fairShare resources, it is allowed to preempt + containers to take resources from other queues. If not set, the queue will + inherit the value from its parent queue. * <>, which represent settings governing the behavior of individual users. They can contain a single property: maxRunningApps, a limit on the @@ -279,14 +290,17 @@ Allocation file format * <>, which sets the default running app limit for any users whose limit is not otherwise specified. - * <>, number of seconds a queue is under - its fair share before it will try to preempt containers to take resources from - other queues. + * <>, which sets the fair share + preemption timeout for the root queue; overridden by fairSharePreemptionTimeout + element in root queue. - * <>, which sets the default number - of seconds the queue is under its minimum share before it will try to preempt - containers to take resources from other queues; overriden by - minSharePreemptionTimeout element in each queue if specified. + * <>, which sets the min share + preemption timeout for the root queue; overridden by minSharePreemptionTimeout + element in root queue. + + * <>, which sets the fair share + preemption threshold for the root queue; overridden by fairSharePreemptionThreshold + element in root queue. * <>, which sets the default running app limit for queues; overriden by maxRunningApps element in each queue. @@ -429,13 +443,19 @@ Monitoring through web UI * Max Resources - The configured maximum resources that are allowed to the queue. - * Fair Share - The queue's fair share of resources. Queues may be allocated - resources beyond their fair share when other queues aren't using them. A - queue whose resource consumption lies at or below its fair share will never - have its containers preempted. + * Instantaneous Fair Share - The queue's instantaneous fair share of resources. + These shares consider only actives queues (those with running applications), + and are used for scheduling decisions. Queues may be allocated resources + beyond their shares when other queues aren't using them. A queue whose + resource consumption lies at or below its instantaneous fair share will never + have its containers preempted. - In addition to the information that the ResourceManager normally displays - about each application, the web interface includes the application's fair share. + * Steady Fair Share - The queue's steady fair share of resources. These shares + consider all the queues irrespective of whether they are active (have + running applications) or not. These are computed less frequently and + change only when the configuration or capacity changes.They are meant to + provide visibility into resources the user can expect, and hence displayed + in the Web UI. Moving applications between queues