From 88f6837fa5fcea92532f915ccf1036b9012e2fed Mon Sep 17 00:00:00 2001 From: Sanford Ryza Date: Wed, 14 Aug 2013 04:37:06 +0000 Subject: [PATCH 01/53] YARN-1060. Two tests in TestFairScheduler are missing @Test annotation (Niranjan Singh via Sandy Ryza) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1513724 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 +++ .../resourcemanager/scheduler/fair/TestFairScheduler.java | 2 ++ 2 files changed, 5 insertions(+) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index c34f1494ce0..a0876b7e44b 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -30,6 +30,9 @@ Release 2.3.0 - UNRELEASED YARN-758. Augment MockNM to use multiple cores (Karthik Kambatla via Sandy Ryza) + YARN-1060. Two tests in TestFairScheduler are missing @Test annotation + (Niranjan Singh via Sandy Ryza) + Release 2.1.1-beta - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index 8be344c9e31..1d68338a026 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -1991,6 +1991,7 @@ public class TestFairScheduler { assertEquals(0, app.getReservedContainers().size()); } + @Test public void testNoMoreCpuOnNode() { RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(2048, 1), 1, "127.0.0.1"); @@ -2009,6 +2010,7 @@ public class TestFairScheduler { assertEquals(1, app.getLiveContainers().size()); } + @Test public void testBasicDRFAssignment() throws Exception { RMNode node = MockNodes.newNodeInfo(1, BuilderUtils.newResource(8192, 5)); NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node); From 8937fd537a0c5471b44db621bb9d702478e68a29 Mon Sep 17 00:00:00 2001 From: Jason Darrell Lowe Date: Wed, 14 Aug 2013 14:18:16 +0000 Subject: [PATCH 02/53] YARN-337. RM handles killed application tracking URL poorly. Contributed by Jason Lowe git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1513888 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 4 ++++ .../rmapp/attempt/RMAppAttemptImpl.java | 10 ++++------ .../attempt/TestRMAppAttemptTransitions.java | 20 +++++++++++++++++++ 3 files changed, 28 insertions(+), 6 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index a0876b7e44b..457ec5fdac9 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -68,6 +68,8 @@ Release 2.1.1-beta - UNRELEASED YARN-994. HeartBeat thread in AMRMClientAsync does not handle runtime exception correctly (Xuan Gong via bikas) + YARN-337. RM handles killed application tracking URL poorly (jlowe) + Release 2.1.0-beta - 2013-08-06 INCOMPATIBLE CHANGES @@ -1178,6 +1180,8 @@ Release 0.23.10 - UNRELEASED BUG FIXES + YARN-337. RM handles killed application tracking URL poorly (jlowe) + Release 0.23.9 - 2013-07-08 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java index 4a877cadae7..e287c203728 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java @@ -865,6 +865,8 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { break; case KILLED: { + // don't leave the tracking URL pointing to a non-existent AM + appAttempt.setTrackingUrlToRMAppPage(); appEvent = new RMAppFailedAttemptEvent(applicationId, RMAppEventType.ATTEMPT_KILLED, @@ -873,6 +875,8 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { break; case FAILED: { + // don't leave the tracking URL pointing to a non-existent AM + appAttempt.setTrackingUrlToRMAppPage(); appEvent = new RMAppFailedAttemptEvent(applicationId, RMAppEventType.ATTEMPT_FAILED, @@ -1063,7 +1067,6 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { RMAppAttemptEvent event) { appAttempt.diagnostics.append("ApplicationMaster for attempt " + appAttempt.getAppAttemptId() + " timed out"); - appAttempt.setTrackingUrlToRMAppPage(); super.transition(appAttempt, event); } } @@ -1182,11 +1185,6 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { " due to: " + containerStatus.getDiagnostics() + "." + "Failing this attempt."); - // When the AM dies, the trackingUrl is left pointing to the AM's URL, - // which shows up in the scheduler UI as a broken link. Direct the - // user to the app page on the RM so they can see the status and logs. - appAttempt.setTrackingUrlToRMAppPage(); - new FinalTransition(RMAppAttemptState.FAILED).transition( appAttempt, containerFinishedEvent); return RMAppAttemptState.FAILED; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java index cafe4f9a705..d61b5c9e6f6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java @@ -691,6 +691,26 @@ public class TestRMAppAttemptTransitions { assertEquals(rmAppPageUrl, applicationAttempt.getTrackingUrl()); } + @Test + public void testRunningToKilled() { + Container amContainer = allocateApplicationAttempt(); + launchApplicationAttempt(amContainer); + runApplicationAttempt(amContainer, "host", 8042, "oldtrackingurl"); + applicationAttempt.handle( + new RMAppAttemptEvent( + applicationAttempt.getAppAttemptId(), + RMAppAttemptEventType.KILL)); + assertEquals(RMAppAttemptState.KILLED, + applicationAttempt.getAppAttemptState()); + assertEquals(0,applicationAttempt.getJustFinishedContainers().size()); + assertEquals(amContainer, applicationAttempt.getMasterContainer()); + assertEquals(0, applicationAttempt.getRanNodes().size()); + String rmAppPageUrl = pjoin(RM_WEBAPP_ADDR, "cluster", "app", + applicationAttempt.getAppAttemptId().getApplicationId()); + assertEquals(rmAppPageUrl, applicationAttempt.getOriginalTrackingUrl()); + assertEquals(rmAppPageUrl, applicationAttempt.getTrackingUrl()); + } + @Test(timeout=10000) public void testLaunchedExpire() { Container amContainer = allocateApplicationAttempt(); From 6390e64abd7978741c7326a972516451bfb76214 Mon Sep 17 00:00:00 2001 From: Luke Lu Date: Wed, 14 Aug 2013 20:23:07 +0000 Subject: [PATCH 03/53] HADOOP 9871. Fix intermittent findbugs warnings in DefaultMetricsSystem. (Junping Du via llu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514024 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 5b557b9a06b..3ba295873ff 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -293,6 +293,9 @@ Release 2.3.0 - UNRELEASED IMPROVEMENTS + HADOOP 9871. Fix intermittent findbugs warnings in DefaultMetricsSystem. + (Junping Du via llu) + HADOOP-9319. Update bundled LZ4 source to r99. (Binglin Chang via llu) HADOOP-9241. DU refresh interval is not configurable (harsh) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java index ce38a95b213..be785b98544 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java @@ -46,8 +46,8 @@ public enum DefaultMetricsSystem { @VisibleForTesting volatile boolean miniClusterMode = false; - final UniqueNames mBeanNames = new UniqueNames(); - final UniqueNames sourceNames = new UniqueNames(); + transient final UniqueNames mBeanNames = new UniqueNames(); + transient final UniqueNames sourceNames = new UniqueNames(); /** * Convenience method to initialize the metrics system From 8451ab5c01fdd290b33881daae8757c5f77a0d0d Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Wed, 14 Aug 2013 22:15:04 +0000 Subject: [PATCH 04/53] HADOOP-9872. Improve protoc version handling and detection. (tucu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514068 13f79535-47bb-0310-9956-ffa450edef68 --- BUILDING.txt | 12 +++++++++++- hadoop-common-project/hadoop-common/CHANGES.txt | 6 ++++-- hadoop-common-project/hadoop-common/pom.xml | 2 ++ .../org/apache/hadoop/util/VersionInfo.java | 14 +++++++++++++- .../resources/common-version-info.properties | 1 + .../src/site/apt/CLIMiniCluster.apt.vm | 2 +- .../src/site/apt/SingleCluster.apt.vm | 2 +- hadoop-hdfs-project/hadoop-hdfs/pom.xml | 12 ++++++++---- .../hadoop-hdfs/src/contrib/bkjournal/pom.xml | 3 ++- .../hadoop-mapreduce-client-common/pom.xml | 3 ++- .../hadoop-mapreduce-client-hs/pom.xml | 3 ++- .../hadoop/maven/plugin/protoc/ProtocMojo.java | 17 +++++++++-------- .../apache/hadoop/maven/plugin/util/Exec.java | 7 +++---- hadoop-project/pom.xml | 1 + hadoop-yarn-project/hadoop-yarn/README | 8 +------- .../hadoop-yarn/hadoop-yarn-api/pom.xml | 3 ++- .../hadoop-yarn-server-common/pom.xml | 3 ++- .../hadoop-yarn-server-nodemanager/pom.xml | 3 ++- 18 files changed, 67 insertions(+), 35 deletions(-) diff --git a/BUILDING.txt b/BUILDING.txt index 7e3d450953f..ff6dea26ad4 100644 --- a/BUILDING.txt +++ b/BUILDING.txt @@ -7,7 +7,7 @@ Requirements: * JDK 1.6 * Maven 3.0 * Findbugs 1.3.9 (if running findbugs) -* ProtocolBuffer 2.4.1+ (for MapReduce and HDFS) +* ProtocolBuffer 2.5.0 * CMake 2.6 or newer (if compiling native code) * Internet connection for first build (to fetch all Maven and Hadoop dependencies) @@ -99,6 +99,16 @@ level once; and then work from the submodule. Keep in mind that SNAPSHOTs time out after a while, using the Maven '-nsu' will stop Maven from trying to update SNAPSHOTs from external repos. +---------------------------------------------------------------------------------- +Protocol Buffer compiler + +The version of Protocol Buffer compiler, protoc, must match the version of the +protobuf JAR. + +If you have multiple versions of protoc in your system, you can set in your +build shell the HADOOP_PROTOC_PATH environment variable to point to the one you +want to use for the Hadoop build. If you don't define this environment variable, +protoc is looked up in the PATH. ---------------------------------------------------------------------------------- Importing projects to eclipse diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 3ba295873ff..77f735504b6 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -105,8 +105,6 @@ Trunk (Unreleased) HADOOP-9833 move slf4j to version 1.7.5 (Kousuke Saruta via stevel) - HADOOP-9845. Update protobuf to 2.5 from 2.4.x. (tucu) - BUG FIXES HADOOP-9451. Fault single-layer config if node group topology is enabled. @@ -584,6 +582,10 @@ Release 2.1.0-beta - 2013-08-06 HADOOP-9150. Avoid unnecessary DNS resolution attempts for logical URIs (todd) + HADOOP-9845. Update protobuf to 2.5 from 2.4.x. (tucu) + + HADOOP-9872. Improve protoc version handling and detection. (tucu) + BUG FIXES HADOOP-9294. GetGroupsTestBase fails on Windows. (Chris Nauroth via suresh) diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 36c32607b9b..a7462ea5a3b 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -309,6 +309,7 @@ ${protobuf.version} + ${protoc.path} ${basedir}/src/main/proto @@ -338,6 +339,7 @@ ${protobuf.version} + ${protoc.path} ${basedir}/src/test/proto diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java index 5d7614f1ebf..0f08f15ffa4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java @@ -90,6 +90,10 @@ public class VersionInfo { " source checksum " + _getSrcChecksum(); } + protected String _getProtocVersion() { + return info.getProperty("protocVersion", "Unknown"); + } + private static VersionInfo COMMON_VERSION_INFO = new VersionInfo("common"); /** * Get the Hadoop version. @@ -153,12 +157,20 @@ public class VersionInfo { public static String getBuildVersion(){ return COMMON_VERSION_INFO._getBuildVersion(); } - + + /** + * Returns the protoc version used for the build. + */ + public static String getProtocVersion(){ + return COMMON_VERSION_INFO._getProtocVersion(); + } + public static void main(String[] args) { LOG.debug("version: "+ getVersion()); System.out.println("Hadoop " + getVersion()); System.out.println("Subversion " + getUrl() + " -r " + getRevision()); System.out.println("Compiled by " + getUser() + " on " + getDate()); + System.out.println("Compiled with protoc " + getProtocVersion()); System.out.println("From source with checksum " + getSrcChecksum()); System.out.println("This command was run using " + ClassUtil.findContainingJar(VersionInfo.class)); diff --git a/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties b/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties index 9a8575c6dea..ad9a24d590b 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties +++ b/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties @@ -23,3 +23,4 @@ user=${user.name} date=${version-info.build.time} url=${version-info.scm.uri} srcChecksum=${version-info.source.md5} +protocVersion=${protobuf.version} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/site/apt/CLIMiniCluster.apt.vm b/hadoop-common-project/hadoop-common/src/site/apt/CLIMiniCluster.apt.vm index 957b99463f0..51a5a9afac2 100644 --- a/hadoop-common-project/hadoop-common/src/site/apt/CLIMiniCluster.apt.vm +++ b/hadoop-common-project/hadoop-common/src/site/apt/CLIMiniCluster.apt.vm @@ -42,7 +42,7 @@ Hadoop MapReduce Next Generation - CLI MiniCluster. $ mvn clean install -DskipTests $ mvn package -Pdist -Dtar -DskipTests -Dmaven.javadoc.skip +---+ - <> You will need protoc installed of version 2.4.1 or greater. + <> You will need protoc 2.5.0 installed. The tarball should be available in <<>> directory. diff --git a/hadoop-common-project/hadoop-common/src/site/apt/SingleCluster.apt.vm b/hadoop-common-project/hadoop-common/src/site/apt/SingleCluster.apt.vm index 0cec916039c..d7058d94914 100644 --- a/hadoop-common-project/hadoop-common/src/site/apt/SingleCluster.apt.vm +++ b/hadoop-common-project/hadoop-common/src/site/apt/SingleCluster.apt.vm @@ -32,7 +32,7 @@ $ mvn clean install -DskipTests $ cd hadoop-mapreduce-project $ mvn clean install assembly:assembly -Pnative +---+ - <> You will need protoc installed of version 2.4.1 or greater. + <> You will need protoc 2.5.0 installed. To ignore the native builds in mapreduce you can omit the <<<-Pnative>>> argument for maven. The tarball should be available in <<>> directory. diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index dd9b960eece..f0e3ac882b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -417,7 +417,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> protoc - 2.5.0 + ${protobuf.version} + ${protoc.path} ${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto ${basedir}/src/main/proto @@ -442,7 +443,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> protoc - 2.5.0 + ${protobuf.version} + ${protoc.path} ${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto ${basedir}/src/main/proto @@ -464,7 +466,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> protoc - 2.5.0 + ${protobuf.version} + ${protoc.path} ${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto ${basedir}/src/main/proto @@ -486,7 +489,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> protoc - 2.5.0 + ${protobuf.version} + ${protoc.path} ${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto ${basedir}/src/main/proto diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml index a306433776e..537dee79c4d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml @@ -103,7 +103,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> protoc - 2.5.0 + ${protobuf.version} + ${protoc.path} ${basedir}/../../../../../hadoop-common-project/hadoop-common/src/main/proto ${basedir}/../../../../../hadoop-hdfs-project/hadoop-hdfs/src/main/proto diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml index 39a2ecbe942..7ef7d3b038d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml @@ -64,7 +64,8 @@ protoc - 2.5.0 + ${protobuf.version} + ${protoc.path} ${basedir}/../../../hadoop-common-project/hadoop-common/src/main/proto ${basedir}/../../../hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml index 0d9ccf8c7f3..31587782f43 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml @@ -78,7 +78,8 @@ protoc - 2.5.0 + ${protobuf.version} + ${protoc.path} ${basedir}/../../../../hadoop-common-project/hadoop-common/src/main/proto diff --git a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocMojo.java b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocMojo.java index 51901ff14d5..bd62f55d191 100644 --- a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocMojo.java +++ b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocMojo.java @@ -45,7 +45,7 @@ public class ProtocMojo extends AbstractMojo { @Parameter(required=true) private FileSet source; - @Parameter(defaultValue="protoc") + @Parameter private String protocCommand; @Parameter(required=true) @@ -53,21 +53,22 @@ public class ProtocMojo extends AbstractMojo { public void execute() throws MojoExecutionException { try { + if (protocCommand == null || protocCommand.trim().isEmpty()) { + protocCommand = "protoc"; + } List command = new ArrayList(); command.add(protocCommand); command.add("--version"); Exec exec = new Exec(this); List out = new ArrayList(); - if (exec.run(command, out) != 0) { - getLog().error("protoc, could not get version"); - for (String s : out) { - getLog().error(s); - } + if (exec.run(command, out) == 127) { + getLog().error("protoc, not found at: " + protocCommand); throw new MojoExecutionException("protoc failure"); } else { - if (out.size() == 0) { + if (out.isEmpty()) { + getLog().error("stdout: " + out); throw new MojoExecutionException( - "'protoc -version' did not return a version"); + "'protoc --version' did not return a version"); } else { if (!out.get(0).endsWith(protocVersion)) { throw new MojoExecutionException( diff --git a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/Exec.java b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/Exec.java index 144ee135623..7dafe817bc3 100644 --- a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/Exec.java +++ b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/Exec.java @@ -63,11 +63,10 @@ public class Exec { for (String s : stdErr.getOutput()) { mojo.getLog().debug(s); } - } else { - stdOut.join(); - stdErr.join(); - output.addAll(stdOut.getOutput()); } + stdOut.join(); + stdErr.join(); + output.addAll(stdOut.getOutput()); } catch (Exception ex) { mojo.getLog().warn(command + " failed: " + ex.toString()); } diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 1a923896c15..e1bb833c630 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -62,6 +62,7 @@ 2.5.0 + ${env.HADOOP_PROTOC_PATH} diff --git a/hadoop-yarn-project/hadoop-yarn/README b/hadoop-yarn-project/hadoop-yarn/README index b15870fa83a..4e6aaa523e7 100644 --- a/hadoop-yarn-project/hadoop-yarn/README +++ b/hadoop-yarn-project/hadoop-yarn/README @@ -8,15 +8,9 @@ Maven: Maven 3 Setup ----- -Install protobuf 2.4.0a or higher (Download from http://code.google.com/p/protobuf/downloads/list) +Install protobuf 2.5.0 (Download from http://code.google.com/p/protobuf/downloads/list) - install the protoc executable (configure, make, make install) - install the maven artifact (cd java; mvn install) -Installing protoc requires gcc 4.1.x or higher. -If the make step fails with (Valid until a fix is released for protobuf 2.4.0a) - ./google/protobuf/descriptor.h:1152: error: - `google::protobuf::internal::Mutex*google::protobuf::DescriptorPool::mutex_' - is private - Replace descriptor.cc with http://protobuf.googlecode.com/svn-history/r380/trunk/src/google/protobuf/descriptor.cc Quick Maven Tips diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml index 9c716cde260..c0df5d8eb34 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml @@ -45,7 +45,8 @@ protoc - 2.5.0 + ${protobuf.version} + ${protoc.path} ${basedir}/../../../hadoop-common-project/hadoop-common/src/main/proto ${basedir}/src/main/proto diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml index d987f407c0d..3d3686e4283 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml @@ -73,7 +73,8 @@ protoc - 2.5.0 + ${protobuf.version} + ${protoc.path} ${basedir}/../../../../hadoop-common-project/hadoop-common/src/main/proto ${basedir}/../../hadoop-yarn-api/src/main/proto diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml index f42d567b16e..9eacd20f02a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml @@ -165,7 +165,8 @@ protoc - 2.5.0 + ${protobuf.version} + ${protoc.path} ${basedir}/../../../../hadoop-common-project/hadoop-common/src/main/proto ${basedir}/../../hadoop-yarn-api/src/main/proto From fdba5fac263f9bf79fccf566c36bbc42ef67e875 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Wed, 14 Aug 2013 23:12:55 +0000 Subject: [PATCH 05/53] HADOOP-9652. RawLocalFs#getFileLinkStatus does not fill in the link owner and mode. (Andrew Wang via Colin Patrick McCabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514088 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 4 + .../hadoop/fs/DelegateToFileSystem.java | 19 +- .../java/org/apache/hadoop/fs/HardLink.java | 40 +---- .../apache/hadoop/fs/RawLocalFileSystem.java | 74 +++++++- .../main/java/org/apache/hadoop/fs/Stat.java | 167 ++++++++++++++++++ .../apache/hadoop/fs/local/RawLocalFs.java | 94 +--------- .../java/org/apache/hadoop/util/Shell.java | 56 +++++- .../apache/hadoop/fs/TestLocalFileSystem.java | 19 +- .../java/org/apache/hadoop/fs/TestStat.java | 122 +++++++++++++ .../apache/hadoop/fs/TestSymlinkLocalFS.java | 15 +- 10 files changed, 453 insertions(+), 157 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestStat.java diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 77f735504b6..52073fa31c8 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -331,6 +331,10 @@ Release 2.3.0 - UNRELEASED HADOOP-9817. FileSystem#globStatus and FileContext#globStatus need to work with symlinks. (Colin Patrick McCabe via Andrew Wang) + HADOOP-9652. RawLocalFs#getFileLinkStatus does not fill in the link owner + and mode. (Andrew Wang via Colin Patrick McCabe) + + Release 2.1.1-beta - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java index 1293448eea3..708ca4ada5b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java @@ -113,7 +113,14 @@ public abstract class DelegateToFileSystem extends AbstractFileSystem { @Override public FileStatus getFileLinkStatus(final Path f) throws IOException { - return getFileStatus(f); + FileStatus status = fsImpl.getFileLinkStatus(f); + // FileSystem#getFileLinkStatus qualifies the link target + // AbstractFileSystem needs to return it plain since it's qualified + // in FileContext, so re-get and set the plain target + if (status.isSymlink()) { + status.setSymlink(fsImpl.getLinkTarget(f)); + } + return status; } @Override @@ -199,22 +206,18 @@ public abstract class DelegateToFileSystem extends AbstractFileSystem { @Override public boolean supportsSymlinks() { - return false; + return fsImpl.supportsSymlinks(); } @Override public void createSymlink(Path target, Path link, boolean createParent) throws IOException { - throw new IOException("File system does not support symlinks"); + fsImpl.createSymlink(target, link, createParent); } @Override public Path getLinkTarget(final Path f) throws IOException { - /* We should never get here. Any file system that threw an - * UnresolvedLinkException, causing this function to be called, - * should override getLinkTarget. - */ - throw new AssertionError(); + return fsImpl.getLinkTarget(f); } @Override //AbstractFileSystem diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HardLink.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HardLink.java index 5e462cdc441..bf5ed6d58f7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HardLink.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HardLink.java @@ -41,15 +41,6 @@ import org.apache.hadoop.util.Shell; */ public class HardLink { - public enum OSType { - OS_TYPE_UNIX, - OS_TYPE_WIN, - OS_TYPE_SOLARIS, - OS_TYPE_MAC, - OS_TYPE_FREEBSD - } - - public static OSType osType; private static HardLinkCommandGetter getHardLinkCommand; public final LinkStats linkStats; //not static @@ -57,19 +48,18 @@ public class HardLink { //initialize the command "getters" statically, so can use their //methods without instantiating the HardLink object static { - osType = getOSType(); - if (osType == OSType.OS_TYPE_WIN) { + if (Shell.WINDOWS) { // Windows getHardLinkCommand = new HardLinkCGWin(); } else { - // Unix + // Unix or Linux getHardLinkCommand = new HardLinkCGUnix(); //override getLinkCountCommand for the particular Unix variant //Linux is already set as the default - {"stat","-c%h", null} - if (osType == OSType.OS_TYPE_MAC || osType == OSType.OS_TYPE_FREEBSD) { + if (Shell.MAC || Shell.FREEBSD) { String[] linkCountCmdTemplate = {"/usr/bin/stat","-f%l", null}; HardLinkCGUnix.setLinkCountCmdTemplate(linkCountCmdTemplate); - } else if (osType == OSType.OS_TYPE_SOLARIS) { + } else if (Shell.SOLARIS) { String[] linkCountCmdTemplate = {"ls","-l", null}; HardLinkCGUnix.setLinkCountCmdTemplate(linkCountCmdTemplate); } @@ -80,26 +70,6 @@ public class HardLink { linkStats = new LinkStats(); } - static private OSType getOSType() { - String osName = System.getProperty("os.name"); - if (Shell.WINDOWS) { - return OSType.OS_TYPE_WIN; - } - else if (osName.contains("SunOS") - || osName.contains("Solaris")) { - return OSType.OS_TYPE_SOLARIS; - } - else if (osName.contains("Mac")) { - return OSType.OS_TYPE_MAC; - } - else if (osName.contains("FreeBSD")) { - return OSType.OS_TYPE_FREEBSD; - } - else { - return OSType.OS_TYPE_UNIX; - } - } - /** * This abstract class bridges the OS-dependent implementations of the * needed functionality for creating hardlinks and querying link counts. @@ -548,7 +518,7 @@ public class HardLink { if (inpMsg == null || exitValue != 0) { throw createIOException(fileName, inpMsg, errMsg, exitValue, null); } - if (osType == OSType.OS_TYPE_SOLARIS) { + if (Shell.SOLARIS) { String[] result = inpMsg.split("\\s+"); return Integer.parseInt(result[1]); } else { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java index d693214163b..42f77fc3508 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java @@ -51,6 +51,7 @@ import org.apache.hadoop.util.StringUtils; public class RawLocalFileSystem extends FileSystem { static final URI NAME = URI.create("file:///"); private Path workingDir; + private static final boolean useDeprecatedFileStatus = !Stat.isAvailable(); public RawLocalFileSystem() { workingDir = getInitialWorkingDirectory(); @@ -385,8 +386,11 @@ public class RawLocalFileSystem extends FileSystem { throw new FileNotFoundException("File " + f + " does not exist"); } if (localf.isFile()) { + if (!useDeprecatedFileStatus) { + return new FileStatus[] { getFileStatus(f) }; + } return new FileStatus[] { - new RawLocalFileStatus(localf, getDefaultBlockSize(f), this) }; + new DeprecatedRawLocalFileStatus(localf, getDefaultBlockSize(f), this)}; } File[] names = localf.listFiles(); @@ -516,15 +520,22 @@ public class RawLocalFileSystem extends FileSystem { @Override public FileStatus getFileStatus(Path f) throws IOException { + return getFileLinkStatusInternal(f, true); + } + + @Deprecated + private FileStatus deprecatedGetFileStatus(Path f) throws IOException { File path = pathToFile(f); if (path.exists()) { - return new RawLocalFileStatus(pathToFile(f), getDefaultBlockSize(f), this); + return new DeprecatedRawLocalFileStatus(pathToFile(f), + getDefaultBlockSize(f), this); } else { throw new FileNotFoundException("File " + f + " does not exist"); } } - static class RawLocalFileStatus extends FileStatus { + @Deprecated + static class DeprecatedRawLocalFileStatus extends FileStatus { /* We can add extra fields here. It breaks at least CopyFiles.FilePair(). * We recognize if the information is already loaded by check if * onwer.equals(""). @@ -533,7 +544,7 @@ public class RawLocalFileSystem extends FileSystem { return !super.getOwner().isEmpty(); } - RawLocalFileStatus(File f, long defaultBlockSize, FileSystem fs) { + DeprecatedRawLocalFileStatus(File f, long defaultBlockSize, FileSystem fs) { super(f.length(), f.isDirectory(), 1, defaultBlockSize, f.lastModified(), new Path(f.getPath()).makeQualified(fs.getUri(), fs.getWorkingDirectory())); @@ -699,7 +710,7 @@ public class RawLocalFileSystem extends FileSystem { */ @Override public FileStatus getFileLinkStatus(final Path f) throws IOException { - FileStatus fi = getFileLinkStatusInternal(f); + FileStatus fi = getFileLinkStatusInternal(f, false); // getFileLinkStatus is supposed to return a symlink with a // qualified path if (fi.isSymlink()) { @@ -710,7 +721,35 @@ public class RawLocalFileSystem extends FileSystem { return fi; } - private FileStatus getFileLinkStatusInternal(final Path f) throws IOException { + /** + * Public {@link FileStatus} methods delegate to this function, which in turn + * either call the new {@link Stat} based implementation or the deprecated + * methods based on platform support. + * + * @param f Path to stat + * @param dereference whether to dereference the final path component if a + * symlink + * @return FileStatus of f + * @throws IOException + */ + private FileStatus getFileLinkStatusInternal(final Path f, + boolean dereference) throws IOException { + if (!useDeprecatedFileStatus) { + return getNativeFileLinkStatus(f, dereference); + } else if (dereference) { + return deprecatedGetFileStatus(f); + } else { + return deprecatedGetFileLinkStatusInternal(f); + } + } + + /** + * Deprecated. Remains for legacy support. Should be removed when {@link Stat} + * gains support for Windows and other operating systems. + */ + @Deprecated + private FileStatus deprecatedGetFileLinkStatusInternal(final Path f) + throws IOException { String target = FileUtil.readLink(new File(f.toString())); try { @@ -746,10 +785,31 @@ public class RawLocalFileSystem extends FileSystem { throw e; } } + /** + * Calls out to platform's native stat(1) implementation to get file metadata + * (permissions, user, group, atime, mtime, etc). This works around the lack + * of lstat(2) in Java 6. + * + * Currently, the {@link Stat} class used to do this only supports Linux + * and FreeBSD, so the old {@link #deprecatedGetFileLinkStatusInternal(Path)} + * implementation (deprecated) remains further OS support is added. + * + * @param f File to stat + * @param dereference whether to dereference symlinks + * @return FileStatus of f + * @throws IOException + */ + private FileStatus getNativeFileLinkStatus(final Path f, + boolean dereference) throws IOException { + checkPath(f); + Stat stat = new Stat(f, getDefaultBlockSize(f), dereference, this); + FileStatus status = stat.getFileStatus(); + return status; + } @Override public Path getLinkTarget(Path f) throws IOException { - FileStatus fi = getFileLinkStatusInternal(f); + FileStatus fi = getFileLinkStatusInternal(f, false); // return an unqualified symlink target return fi.getSymlink(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java new file mode 100644 index 00000000000..36dd8811e77 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java @@ -0,0 +1,167 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import java.io.BufferedReader; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.NoSuchElementException; +import java.util.StringTokenizer; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.util.Shell; + +import com.google.common.annotations.VisibleForTesting; + +/** + * Wrapper for the Unix stat(1) command. Used to workaround the lack of + * lstat(2) in Java 6. + */ +@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) +@InterfaceStability.Evolving +public class Stat extends Shell { + + private final Path original; + private final Path qualified; + private final Path path; + private final long blockSize; + private final boolean dereference; + + private FileStatus stat; + + public Stat(Path path, long blockSize, boolean deref, FileSystem fs) + throws IOException { + super(0L, true); + // Original path + this.original = path; + // Qualify the original and strip out URI fragment via toUri().getPath() + Path stripped = new Path( + original.makeQualified(fs.getUri(), fs.getWorkingDirectory()) + .toUri().getPath()); + // Re-qualify the bare stripped path and store it + this.qualified = + stripped.makeQualified(fs.getUri(), fs.getWorkingDirectory()); + // Strip back down to a plain path + this.path = new Path(qualified.toUri().getPath()); + this.blockSize = blockSize; + this.dereference = deref; + } + + public FileStatus getFileStatus() throws IOException { + run(); + return stat; + } + + /** + * Whether Stat is supported on the current platform + * @return + */ + public static boolean isAvailable() { + if (Shell.LINUX || Shell.FREEBSD) { + return true; + } + return false; + } + + @VisibleForTesting + FileStatus getFileStatusForTesting() { + return stat; + } + + @Override + protected String[] getExecString() { + String derefFlag = "-"; + if (dereference) { + derefFlag = "-L"; + } + if (Shell.LINUX) { + return new String[] { + "stat", derefFlag + "c", "%s,%F,%Y,%X,%a,%U,%G,%N", path.toString() }; + } else if (Shell.FREEBSD) { + return new String[] { + "stat", derefFlag + "f", "%z,%HT,%m,%a,%Op,%Su,%Sg,`link' -> `%Y'", + path.toString() }; + } else { + throw new UnsupportedOperationException( + "stat is not supported on this platform"); + } + } + + @Override + protected void parseExecResult(BufferedReader lines) throws IOException { + // Reset stat + stat = null; + + String line = lines.readLine(); + if (line == null) { + throw new IOException("Unable to stat path: " + original); + } + if (line.endsWith("No such file or directory") || + line.endsWith("Not a directory")) { + throw new FileNotFoundException("File " + original + " does not exist"); + } + if (line.endsWith("Too many levels of symbolic links")) { + throw new IOException("Possible cyclic loop while following symbolic" + + " link " + original); + } + // 6,symbolic link,6,1373584236,1373584236,lrwxrwxrwx,andrew,andrew,`link' -> `target' + StringTokenizer tokens = new StringTokenizer(line, ","); + try { + long length = Long.parseLong(tokens.nextToken()); + boolean isDir = tokens.nextToken().equalsIgnoreCase("directory") ? true + : false; + // Convert from seconds to milliseconds + long modTime = Long.parseLong(tokens.nextToken())*1000; + long accessTime = Long.parseLong(tokens.nextToken())*1000; + String octalPerms = tokens.nextToken(); + // FreeBSD has extra digits beyond 4, truncate them + if (octalPerms.length() > 4) { + int len = octalPerms.length(); + octalPerms = octalPerms.substring(len-4, len); + } + FsPermission perms = new FsPermission(Short.parseShort(octalPerms, 8)); + String owner = tokens.nextToken(); + String group = tokens.nextToken(); + String symStr = tokens.nextToken(); + // 'notalink' + // 'link' -> `target' + // '' -> '' + Path symlink = null; + StringTokenizer symTokens = new StringTokenizer(symStr, "`"); + symTokens.nextToken(); + try { + String target = symTokens.nextToken(); + target = target.substring(0, target.length()-1); + if (!target.isEmpty()) { + symlink = new Path(target); + } + } catch (NoSuchElementException e) { + // null if not a symlink + } + // Set stat + stat = new FileStatus(length, isDir, 1, blockSize, modTime, accessTime, + perms, owner, group, symlink, qualified); + } catch (NumberFormatException e) { + throw new IOException("Unexpected stat output: " + line, e); + } catch (NoSuchElementException e) { + throw new IOException("Unexpected stat output: " + line, e); + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/RawLocalFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/RawLocalFs.java index 605bade09a8..6cb2792eebc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/RawLocalFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/RawLocalFs.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.fs.local; -import java.io.File; -import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; @@ -28,13 +26,9 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.AbstractFileSystem; import org.apache.hadoop.fs.DelegateToFileSystem; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.FsServerDefaults; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RawLocalFileSystem; -import org.apache.hadoop.fs.permission.FsPermission; /** * The RawLocalFs implementation of AbstractFileSystem. @@ -72,90 +66,12 @@ public class RawLocalFs extends DelegateToFileSystem { public FsServerDefaults getServerDefaults() throws IOException { return LocalConfigKeys.getServerDefaults(); } - + @Override - public boolean supportsSymlinks() { + public boolean isValidName(String src) { + // Different local file systems have different validation rules. Skip + // validation here and just let the OS handle it. This is consistent with + // RawLocalFileSystem. return true; } - - @Override - public void createSymlink(Path target, Path link, boolean createParent) - throws IOException { - final String targetScheme = target.toUri().getScheme(); - if (targetScheme != null && !"file".equals(targetScheme)) { - throw new IOException("Unable to create symlink to non-local file "+ - "system: "+target.toString()); - } - - if (createParent) { - mkdir(link.getParent(), FsPermission.getDirDefault(), true); - } - - // NB: Use createSymbolicLink in java.nio.file.Path once available - int result = FileUtil.symLink(target.toString(), link.toString()); - if (result != 0) { - throw new IOException("Error " + result + " creating symlink " + - link + " to " + target); - } - } - - /** - * Return a FileStatus representing the given path. If the path refers - * to a symlink return a FileStatus representing the link rather than - * the object the link refers to. - */ - @Override - public FileStatus getFileLinkStatus(final Path f) throws IOException { - String target = FileUtil.readLink(new File(f.toString())); - try { - FileStatus fs = getFileStatus(f); - // If f refers to a regular file or directory - if (target.isEmpty()) { - return fs; - } - // Otherwise f refers to a symlink - return new FileStatus(fs.getLen(), - false, - fs.getReplication(), - fs.getBlockSize(), - fs.getModificationTime(), - fs.getAccessTime(), - fs.getPermission(), - fs.getOwner(), - fs.getGroup(), - new Path(target), - f); - } catch (FileNotFoundException e) { - /* The exists method in the File class returns false for dangling - * links so we can get a FileNotFoundException for links that exist. - * It's also possible that we raced with a delete of the link. Use - * the readBasicFileAttributes method in java.nio.file.attributes - * when available. - */ - if (!target.isEmpty()) { - return new FileStatus(0, false, 0, 0, 0, 0, FsPermission.getDefault(), - "", "", new Path(target), f); - } - // f refers to a file or directory that does not exist - throw e; - } - } - - @Override - public boolean isValidName(String src) { - // Different local file systems have different validation rules. Skip - // validation here and just let the OS handle it. This is consistent with - // RawLocalFileSystem. - return true; - } - - @Override - public Path getLinkTarget(Path f) throws IOException { - /* We should never get here. Valid local links are resolved transparently - * by the underlying local file system and accessing a dangling link will - * result in an IOException, not an UnresolvedLinkException, so FileContext - * should never call this function. - */ - throw new AssertionError(); - } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java index 2817736f281..0a8ce2e9983 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java @@ -58,6 +58,45 @@ abstract public class Shell { /** Windows CreateProcess synchronization object */ public static final Object WindowsProcessLaunchLock = new Object(); + // OSType detection + + public enum OSType { + OS_TYPE_LINUX, + OS_TYPE_WIN, + OS_TYPE_SOLARIS, + OS_TYPE_MAC, + OS_TYPE_FREEBSD, + OS_TYPE_OTHER + } + + public static final OSType osType = getOSType(); + + static private OSType getOSType() { + String osName = System.getProperty("os.name"); + if (osName.startsWith("Windows")) { + return OSType.OS_TYPE_WIN; + } else if (osName.contains("SunOS") || osName.contains("Solaris")) { + return OSType.OS_TYPE_SOLARIS; + } else if (osName.contains("Mac")) { + return OSType.OS_TYPE_MAC; + } else if (osName.contains("FreeBSD")) { + return OSType.OS_TYPE_FREEBSD; + } else if (osName.startsWith("Linux")) { + return OSType.OS_TYPE_LINUX; + } else { + // Some other form of Unix + return OSType.OS_TYPE_OTHER; + } + } + + // Helper static vars for each platform + public static final boolean WINDOWS = (osType == OSType.OS_TYPE_WIN); + public static final boolean SOLARIS = (osType == OSType.OS_TYPE_SOLARIS); + public static final boolean MAC = (osType == OSType.OS_TYPE_MAC); + public static final boolean FREEBSD = (osType == OSType.OS_TYPE_FREEBSD); + public static final boolean LINUX = (osType == OSType.OS_TYPE_LINUX); + public static final boolean OTHER = (osType == OSType.OS_TYPE_OTHER); + /** a Unix command to get the current user's groups list */ public static String[] getGroupsCommand() { return (WINDOWS)? new String[]{"cmd", "/c", "groups"} @@ -282,13 +321,6 @@ abstract public class Shell { return exeFile.getCanonicalPath(); } - /** Set to true on Windows platforms */ - public static final boolean WINDOWS /* borrowed from Path.WINDOWS */ - = System.getProperty("os.name").startsWith("Windows"); - - public static final boolean LINUX - = System.getProperty("os.name").startsWith("Linux"); - /** a Windows utility to emulate Unix commands */ public static final String WINUTILS = getWinUtilsPath(); @@ -336,6 +368,7 @@ abstract public class Shell { private long interval; // refresh interval in msec private long lastTime; // last time the command was performed + final private boolean redirectErrorStream; // merge stdout and stderr private Map environment; // env for the command execution private File dir; private Process process; // sub process used to execute the command @@ -348,13 +381,18 @@ abstract public class Shell { this(0L); } + public Shell(long interval) { + this(interval, false); + } + /** * @param interval the minimum duration to wait before re-executing the * command. */ - public Shell( long interval ) { + public Shell(long interval, boolean redirectErrorStream) { this.interval = interval; this.lastTime = (interval<0) ? 0 : -interval; + this.redirectErrorStream = redirectErrorStream; } /** set the environment for the command @@ -393,6 +431,8 @@ abstract public class Shell { if (dir != null) { builder.directory(this.dir); } + + builder.redirectErrorStream(redirectErrorStream); if (Shell.WINDOWS) { synchronized (WindowsProcessLaunchLock) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java index cb6a6421134..dacb2c9b82f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java @@ -26,6 +26,7 @@ import org.apache.hadoop.util.StringUtils; import static org.apache.hadoop.fs.FileSystemTestHelper.*; import java.io.*; +import java.net.URI; import java.util.Arrays; import java.util.Random; @@ -363,12 +364,12 @@ public class TestLocalFileSystem { FileStatus status = fileSys.getFileStatus(path); assertTrue("check we're actually changing something", newModTime != status.getModificationTime()); - assertEquals(0, status.getAccessTime()); + long accessTime = status.getAccessTime(); fileSys.setTimes(path, newModTime, -1); status = fileSys.getFileStatus(path); assertEquals(newModTime, status.getModificationTime()); - assertEquals(0, status.getAccessTime()); + assertEquals(accessTime, status.getAccessTime()); } /** @@ -520,4 +521,18 @@ public class TestLocalFileSystem { fail(s); } } + + @Test + public void testStripFragmentFromPath() throws Exception { + FileSystem fs = FileSystem.getLocal(new Configuration()); + Path pathQualified = TEST_PATH.makeQualified(fs.getUri(), + fs.getWorkingDirectory()); + Path pathWithFragment = new Path( + new URI(pathQualified.toString() + "#glacier")); + // Create test file with fragment + FileSystemTestHelper.createFile(fs, pathWithFragment); + Path resolved = fs.resolvePath(pathWithFragment); + assertEquals("resolvePath did not strip fragment from Path", pathQualified, + resolved); + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestStat.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestStat.java new file mode 100644 index 00000000000..4397f2d534c --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestStat.java @@ -0,0 +1,122 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.BufferedReader; +import java.io.FileNotFoundException; +import java.io.StringReader; + +import org.apache.hadoop.conf.Configuration; +import org.junit.BeforeClass; +import org.junit.Test; + +public class TestStat { + + private static Stat stat; + + @BeforeClass + public static void setup() throws Exception { + stat = new Stat(new Path("/dummypath"), + 4096l, false, FileSystem.get(new Configuration())); + } + + private class StatOutput { + final String doesNotExist; + final String directory; + final String file; + final String symlink; + final String stickydir; + + StatOutput(String doesNotExist, String directory, String file, + String symlink, String stickydir) { + this.doesNotExist = doesNotExist; + this.directory = directory; + this.file = file; + this.symlink = symlink; + this.stickydir = stickydir; + } + + void test() throws Exception { + BufferedReader br; + FileStatus status; + + try { + br = new BufferedReader(new StringReader(doesNotExist)); + stat.parseExecResult(br); + } catch (FileNotFoundException e) { + // expected + } + + br = new BufferedReader(new StringReader(directory)); + stat.parseExecResult(br); + status = stat.getFileStatusForTesting(); + assertTrue(status.isDirectory()); + + br = new BufferedReader(new StringReader(file)); + stat.parseExecResult(br); + status = stat.getFileStatusForTesting(); + assertTrue(status.isFile()); + + br = new BufferedReader(new StringReader(symlink)); + stat.parseExecResult(br); + status = stat.getFileStatusForTesting(); + assertTrue(status.isSymlink()); + + br = new BufferedReader(new StringReader(stickydir)); + stat.parseExecResult(br); + status = stat.getFileStatusForTesting(); + assertTrue(status.isDirectory()); + assertTrue(status.getPermission().getStickyBit()); + } + } + + @Test(timeout=10000) + public void testStatLinux() throws Exception { + StatOutput linux = new StatOutput( + "stat: cannot stat `watermelon': No such file or directory", + "4096,directory,1373584236,1373586485,755,andrew,root,`.'", + "0,regular empty file,1373584228,1373584228,644,andrew,andrew,`target'", + "6,symbolic link,1373584236,1373584236,777,andrew,andrew,`link' -> `target'", + "4096,directory,1374622334,1375124212,1755,andrew,andrew,`stickydir'"); + linux.test(); + } + + @Test(timeout=10000) + public void testStatFreeBSD() throws Exception { + StatOutput freebsd = new StatOutput( + "stat: symtest/link: stat: No such file or directory", + "512,Directory,1373583695,1373583669,40755,awang,awang,`link' -> `'", + "0,Regular File,1373508937,1373508937,100644,awang,awang,`link' -> `'", + "6,Symbolic Link,1373508941,1373508941,120755,awang,awang,`link' -> `target'", + "512,Directory,1375139537,1375139537,41755,awang,awang,`link' -> `'"); + freebsd.test(); + } + + @Test(timeout=10000) + public void testStatFileNotFound() throws Exception { + try { + stat.getFileStatus(); + fail("Expected FileNotFoundException"); + } catch (FileNotFoundException e) { + // expected + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java index eb0e1089bf3..c82dcc8a124 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java @@ -31,6 +31,7 @@ import java.net.URISyntaxException; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.util.Shell; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.Test; /** @@ -134,6 +135,7 @@ abstract public class TestSymlinkLocalFS extends SymlinkBaseTest { Path fileAbs = new Path(testBaseDir1()+"/file"); Path fileQual = new Path(testURI().toString(), fileAbs); Path link = new Path(testBaseDir1()+"/linkToFile"); + Path linkQual = new Path(testURI().toString(), link.toString()); wrapper.createSymlink(fileAbs, link, false); // Deleting the link using FileContext currently fails because // resolve looks up LocalFs rather than RawLocalFs for the path @@ -151,18 +153,15 @@ abstract public class TestSymlinkLocalFS extends SymlinkBaseTest { // Expected. File's exists method returns false for dangling links } // We can stat a dangling link + UserGroupInformation user = UserGroupInformation.getCurrentUser(); FileStatus fsd = wrapper.getFileLinkStatus(link); assertEquals(fileQual, fsd.getSymlink()); assertTrue(fsd.isSymlink()); assertFalse(fsd.isDirectory()); - assertEquals("", fsd.getOwner()); - assertEquals("", fsd.getGroup()); - assertEquals(link, fsd.getPath()); - assertEquals(0, fsd.getLen()); - assertEquals(0, fsd.getBlockSize()); - assertEquals(0, fsd.getReplication()); - assertEquals(0, fsd.getAccessTime()); - assertEquals(FsPermission.getDefault(), fsd.getPermission()); + assertEquals(user.getUserName(), fsd.getOwner()); + // Compare against user's primary group + assertEquals(user.getGroupNames()[0], fsd.getGroup()); + assertEquals(linkQual, fsd.getPath()); // Accessing the link try { readFile(link); From 0cb5f081496d449b2ceccc72b6fea81e32adca2a Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Wed, 14 Aug 2013 23:17:55 +0000 Subject: [PATCH 06/53] HADOOP-9381. Document dfs cp -f option. Contributed by Keegan Witt and Suresh Srinivas. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514089 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 2 + .../apache/hadoop/fs/shell/CopyCommands.java | 6 +- .../src/site/apt/FileSystemShell.apt.vm | 61 +++++++++++++------ .../src/test/resources/testConf.xml | 12 +++- 4 files changed, 58 insertions(+), 23 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 52073fa31c8..15000ec72c6 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -389,6 +389,8 @@ Release 2.1.1-beta - UNRELEASED HADOOP-9857. Tests block and sometimes timeout on Windows due to invalid entropy source. (cnauroth) + HADOOP-9381. Document dfs cp -f option. (Keegan Witt, suresh via suresh) + Release 2.1.0-beta - 2013-08-06 INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java index 6c39cf897ef..db15d467ec3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java @@ -133,7 +133,8 @@ class CopyCommands { "Copy files that match the file pattern to a\n" + "destination. When copying multiple files, the destination\n" + "must be a directory. Passing -p preserves access and\n" + - "modification times, ownership and the mode.\n"; + "modification times, ownership and the mode. Passing -f\n" + + "overwrites the destination if it already exists.\n"; @Override protected void processOptions(LinkedList args) throws IOException { @@ -186,7 +187,8 @@ class CopyCommands { "into fs. Copying fails if the file already\n" + "exists, unless the -f flag is given. Passing\n" + "-p preserves access and modification times,\n" + - "ownership and the mode.\n"; + "ownership and the mode. Passing -f overwrites\n" + + "the destination if it already exists.\n"; @Override protected void processOptions(LinkedList args) throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm b/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm index 0c56cf328fb..5c0869c0ae2 100644 --- a/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm +++ b/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm @@ -86,11 +86,14 @@ chgrp Usage: <<>> - Change group association of files. With -R, make the change recursively - through the directory structure. The user must be the owner of files, or + Change group association of files. The user must be the owner of files, or else a super-user. Additional information is in the {{{betterurl}Permissions Guide}}. + Options + + * The -R option will make the change recursively through the directory structure. + chmod Usage: << URI [URI ...]>>> @@ -100,14 +103,21 @@ chmod else a super-user. Additional information is in the {{{betterurl}Permissions Guide}}. + Options + + * The -R option will make the change recursively through the directory structure. + chown Usage: <<>> - Change the owner of files. With -R, make the change recursively through the - directory structure. The user must be a super-user. Additional information + Change the owner of files. The user must be a super-user. Additional information is in the {{{betterurl}Permissions Guide}}. + Options + + * The -R option will make the change recursively through the directory structure. + copyFromLocal Usage: << URI>>> @@ -115,6 +125,10 @@ copyFromLocal Similar to put command, except that the source is restricted to a local file reference. + Options: + + * The -f option will overwrite the destination if it already exists. + copyToLocal Usage: << >>> @@ -145,11 +159,15 @@ count cp - Usage: << >>> + Usage: << >>> Copy files from source to destination. This command allows multiple sources as well in which case the destination must be a directory. + Options: + + * The -f option will overwrite the destination if it already exists. + Example: * <<>> @@ -232,7 +250,7 @@ ls permissions number_of_replicas userid groupid filesize modification_date modification_time filename +---+ - For a directory it returns list of its direct children as in unix.A directory is listed as: + For a directory it returns list of its direct children as in Unix. A directory is listed as: +---+ permissions userid groupid modification_date modification_time dirname @@ -256,8 +274,11 @@ mkdir Usage: << >>> - Takes path uri's as argument and creates directories. With -p the behavior - is much like unix mkdir -p creating parent directories along the path. + Takes path uri's as argument and creates directories. + + Options: + + * The -p option behavior is much like Unix mkdir -p, creating parent directories along the path. Example: @@ -362,8 +383,11 @@ setrep Usage: << >>> - Changes the replication factor of a file. -R option is for recursively - increasing the replication factor of files within a directory. + Changes the replication factor of a file. + + Options: + + * The -R option will recursively increase the replication factor of files within a directory. Example: @@ -390,8 +414,11 @@ tail Usage: <<>> - Displays last kilobyte of the file to stdout. -f option can be used as in - Unix. + Displays last kilobyte of the file to stdout. + + Options: + + * The -f option will output appended data as the file grows, as in Unix. Example: @@ -406,13 +433,9 @@ test Options: -*----+------------+ -| -e | check to see if the file exists. Return 0 if true. -*----+------------+ -| -z | check to see if the file is zero length. Return 0 if true. -*----+------------+ -| -d | check to see if the path is directory. Return 0 if true. -*----+------------+ + * The -e option will check to see if the file exists, returning 0 if true. + * The -z option will check to see if the file is zero length, returning 0 if true. + * The -d option will check to see if the path is directory, returning 0 if true. Example: diff --git a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml index 69886abb84b..62d94474f55 100644 --- a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml +++ b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml @@ -296,7 +296,11 @@ RegexpComparator - ^( |\t)*modification times, ownership and the mode.( )* + ^( |\t)*modification times, ownership and the mode. Passing -f( )* + + + RegexpComparator + ^( |\t)*overwrites the destination if it already exists.( )* @@ -400,7 +404,11 @@ RegexpComparator - ^( |\t)*ownership and the mode.( )* + ^( |\t)*ownership and the mode. Passing -f overwrites( )* + + + RegexpComparator + ^( |\t)*the destination if it already exists.( )* From 49a892056df7d73207f7a65ae5b4b905ba5e6ab8 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Wed, 14 Aug 2013 23:32:29 +0000 Subject: [PATCH 07/53] HDFS-4816. transitionToActive blocks if the SBN is doing checkpoint image transfer. (Andrew Wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514095 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../namenode/ha/StandbyCheckpointer.java | 39 ++++++++++++++++--- .../namenode/ha/TestStandbyCheckpoints.java | 28 +++++++++++++ 3 files changed, 65 insertions(+), 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 54e65b538b9..5f2fb795a39 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -267,6 +267,9 @@ Release 2.3.0 - UNRELEASED HDFS-5065. TestSymlinkHdfsDisable fails on Windows. (ivanmi) + HDFS-4816. transitionToActive blocks if the SBN is doing checkpoint image + transfer. (Andrew Wang) + Release 2.1.1-beta - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java index 46f13f0416b..c4651efa242 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java @@ -17,9 +17,17 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; +import static org.apache.hadoop.util.Time.now; + import java.io.IOException; import java.net.InetSocketAddress; import java.security.PrivilegedAction; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadFactory; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -38,10 +46,10 @@ import org.apache.hadoop.hdfs.util.Canceler; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; -import static org.apache.hadoop.util.Time.now; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.ThreadFactoryBuilder; /** * Thread which runs inside the NN when it's in Standby state, @@ -57,6 +65,7 @@ public class StandbyCheckpointer { private final FSNamesystem namesystem; private long lastCheckpointTime; private final CheckpointerThread thread; + private final ThreadFactory uploadThreadFactory; private String activeNNAddress; private InetSocketAddress myNNAddress; @@ -72,6 +81,8 @@ public class StandbyCheckpointer { this.namesystem = ns; this.checkpointConf = new CheckpointConf(conf); this.thread = new CheckpointerThread(); + this.uploadThreadFactory = new ThreadFactoryBuilder().setDaemon(true) + .setNameFormat("TransferFsImageUpload-%d").build(); setNameNodeAddresses(conf); } @@ -142,7 +153,7 @@ public class StandbyCheckpointer { private void doCheckpoint() throws InterruptedException, IOException { assert canceler != null; - long txid; + final long txid; namesystem.writeLockInterruptibly(); try { @@ -171,9 +182,26 @@ public class StandbyCheckpointer { } // Upload the saved checkpoint back to the active - TransferFsImage.uploadImageFromStorage( - activeNNAddress, myNNAddress, - namesystem.getFSImage().getStorage(), txid); + // Do this in a separate thread to avoid blocking transition to active + // See HDFS-4816 + ExecutorService executor = + Executors.newSingleThreadExecutor(uploadThreadFactory); + Future upload = executor.submit(new Callable() { + @Override + public Void call() throws IOException { + TransferFsImage.uploadImageFromStorage( + activeNNAddress, myNNAddress, + namesystem.getFSImage().getStorage(), txid); + return null; + } + }); + executor.shutdown(); + try { + upload.get(); + } catch (ExecutionException e) { + throw new IOException("Exception during image upload: " + e.getMessage(), + e.getCause()); + } } /** @@ -301,6 +329,7 @@ public class StandbyCheckpointer { LOG.info("Checkpoint was cancelled: " + ce.getMessage()); canceledCount++; } catch (InterruptedException ie) { + LOG.info("Interrupted during checkpointing", ie); // Probably requested shutdown. continue; } catch (Throwable t) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java index 470284b077e..dff28740690 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java @@ -239,6 +239,34 @@ public class TestStandbyCheckpoints { assertTrue(canceledOne); } + + /** + * Test cancellation of ongoing checkpoints when failover happens + * mid-checkpoint during image upload from standby to active NN. + */ + @Test(timeout=60000) + public void testCheckpointCancellationDuringUpload() throws Exception { + // don't compress, we want a big image + cluster.getConfiguration(0).setBoolean( + DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, false); + cluster.getConfiguration(1).setBoolean( + DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, false); + // Throttle SBN upload to make it hang during upload to ANN + cluster.getConfiguration(1).setLong( + DFSConfigKeys.DFS_IMAGE_TRANSFER_RATE_KEY, 100); + cluster.restartNameNode(0); + cluster.restartNameNode(1); + nn0 = cluster.getNameNode(0); + nn1 = cluster.getNameNode(1); + + cluster.transitionToActive(0); + + doEdits(0, 100); + HATestUtil.waitForStandbyToCatchUp(nn0, nn1); + HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(104)); + cluster.transitionToStandby(0); + cluster.transitionToActive(1); + } /** * Make sure that clients will receive StandbyExceptions even when a From b32ace11f1fe4540767ee69f74e321977a9ae37a Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Thu, 15 Aug 2013 00:45:48 +0000 Subject: [PATCH 08/53] HDFS-5051. nn fails to download checkpointed image from snn in some setups. Contributed by Vinay and Suresh Srinivas. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514110 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hdfs/server/namenode/GetImageServlet.java | 20 +++++++++++++------ 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 5f2fb795a39..66eab12040d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -315,6 +315,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5091. Support for spnego keytab separate from the JournalNode keytab for secure HA. (jing9) + HDFS-5051. nn fails to download checkpointed image from snn in some + setups. (Vinay and suresh via suresh) + Release 2.1.0-beta - 2013-08-06 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java index dfe1c6af69a..35c3cfb2884 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java @@ -310,11 +310,14 @@ public class GetImageServlet extends HttpServlet { static String getParamStringToPutImage(long txid, InetSocketAddress imageListenAddress, Storage storage) { - + String machine = !imageListenAddress.isUnresolved() + && imageListenAddress.getAddress().isAnyLocalAddress() ? null + : imageListenAddress.getHostName(); return "putimage=1" + "&" + TXID_PARAM + "=" + txid + "&port=" + imageListenAddress.getPort() + - "&" + STORAGEINFO_PARAM + "=" + + (machine != null ? "&machine=" + machine : "") + + "&" + STORAGEINFO_PARAM + "=" + storage.toColonSeparatedString(); } @@ -341,10 +344,6 @@ public class GetImageServlet extends HttpServlet { Map pmap = request.getParameterMap(); isGetImage = isGetEdit = isPutImage = fetchLatest = false; remoteport = 0; - machineName = request.getRemoteHost(); - if (InetAddresses.isInetAddress(machineName)) { - machineName = NetUtils.getHostNameOfIP(machineName); - } for (Map.Entry entry : pmap.entrySet()) { String key = entry.getKey(); @@ -369,11 +368,20 @@ public class GetImageServlet extends HttpServlet { txId = ServletUtil.parseLongParam(request, TXID_PARAM); } else if (key.equals("port")) { remoteport = new Integer(val[0]).intValue(); + } else if (key.equals("machine")) { + machineName = val[0]; } else if (key.equals(STORAGEINFO_PARAM)) { storageInfoString = val[0]; } } + if (machineName == null) { + machineName = request.getRemoteHost(); + if (InetAddresses.isInetAddress(machineName)) { + machineName = NetUtils.getHostNameOfIP(machineName); + } + } + int numGets = (isGetImage?1:0) + (isGetEdit?1:0); if ((numGets > 1) || (numGets == 0) && !isPutImage) { throw new IOException("Illegal parameters to TransferFsImage"); From 9cf82b6a7b5742802c451e53af7ec718f74ee58f Mon Sep 17 00:00:00 2001 From: Konstantin Shvachko Date: Thu, 15 Aug 2013 01:12:21 +0000 Subject: [PATCH 09/53] HDFS-5068. Convert NNThroughputBenchmark to a Tool to allow generic options. Contributed by Konstantin Shvachko. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514114 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../namenode/NNThroughputBenchmark.java | 72 +++++++++++++------ 2 files changed, 55 insertions(+), 20 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 66eab12040d..c6f160d263f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -256,6 +256,9 @@ Release 2.3.0 - UNRELEASED HDFS-4817. Make HDFS advisory caching configurable on a per-file basis. (Colin Patrick McCabe) + HDFS-5068. Convert NNThroughputBenchmark to a Tool to allow generic options. + (shv) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index 77f8560816f..3156de4e93a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -61,6 +61,8 @@ import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.security.Groups; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.VersionInfo; import org.apache.log4j.Level; import org.apache.log4j.LogManager; @@ -100,7 +102,7 @@ import org.apache.log4j.LogManager; * Then the benchmark executes the specified number of operations using * the specified number of threads and outputs the resulting stats. */ -public class NNThroughputBenchmark { +public class NNThroughputBenchmark implements Tool { private static final Log LOG = LogFactory.getLog(NNThroughputBenchmark.class); private static final int BLOCK_SIZE = 16; private static final String GENERAL_OPTIONS_USAGE = @@ -115,6 +117,8 @@ public class NNThroughputBenchmark { // We do not need many handlers, since each thread simulates a handler // by calling name-node methods directly config.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 1); + // Turn off minimum block size verification + config.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0); // set exclude file config.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, "${hadoop.tmp.dir}/dfs/hosts/exclude"); @@ -129,14 +133,11 @@ public class NNThroughputBenchmark { config.set(DFSConfigKeys.DFS_HOSTS, "${hadoop.tmp.dir}/dfs/hosts/include"); File includeFile = new File(config.get(DFSConfigKeys.DFS_HOSTS, "include")); new FileOutputStream(includeFile).close(); - // Start the NameNode - String[] argv = new String[] {}; - nameNode = NameNode.createNameNode(argv, config); - nameNodeProto = nameNode.getRpcServer(); } void close() { - nameNode.stop(); + if(nameNode != null) + nameNode.stop(); } static void setNameNodeLoggingLevel(Level logLevel) { @@ -1290,52 +1291,69 @@ public class NNThroughputBenchmark { System.exit(-1); } + public static void runBenchmark(Configuration conf, List args) + throws Exception { + NNThroughputBenchmark bench = null; + try { + bench = new NNThroughputBenchmark(conf); + bench.run(args.toArray(new String[]{})); + } finally { + if(bench != null) + bench.close(); + } + } + /** * Main method of the benchmark. * @param args command line parameters */ - public static void runBenchmark(Configuration conf, List args) throws Exception { + @Override // Tool + public int run(String[] aArgs) throws Exception { + List args = new ArrayList(Arrays.asList(aArgs)); if(args.size() < 2 || ! args.get(0).startsWith("-op")) printUsage(); String type = args.get(1); boolean runAll = OperationStatsBase.OP_ALL_NAME.equals(type); - NNThroughputBenchmark bench = null; + // Start the NameNode + String[] argv = new String[] {}; + nameNode = NameNode.createNameNode(argv, config); + nameNodeProto = nameNode.getRpcServer(); + List ops = new ArrayList(); OperationStatsBase opStat = null; try { - bench = new NNThroughputBenchmark(conf); if(runAll || CreateFileStats.OP_CREATE_NAME.equals(type)) { - opStat = bench.new CreateFileStats(args); + opStat = new CreateFileStats(args); ops.add(opStat); } if(runAll || OpenFileStats.OP_OPEN_NAME.equals(type)) { - opStat = bench.new OpenFileStats(args); + opStat = new OpenFileStats(args); ops.add(opStat); } if(runAll || DeleteFileStats.OP_DELETE_NAME.equals(type)) { - opStat = bench.new DeleteFileStats(args); + opStat = new DeleteFileStats(args); ops.add(opStat); } if(runAll || FileStatusStats.OP_FILE_STATUS_NAME.equals(type)) { - opStat = bench.new FileStatusStats(args); + opStat = new FileStatusStats(args); ops.add(opStat); } if(runAll || RenameFileStats.OP_RENAME_NAME.equals(type)) { - opStat = bench.new RenameFileStats(args); + opStat = new RenameFileStats(args); ops.add(opStat); } if(runAll || BlockReportStats.OP_BLOCK_REPORT_NAME.equals(type)) { - opStat = bench.new BlockReportStats(args); + opStat = new BlockReportStats(args); ops.add(opStat); } if(runAll || ReplicationStats.OP_REPLICATION_NAME.equals(type)) { - opStat = bench.new ReplicationStats(args); + opStat = new ReplicationStats(args); ops.add(opStat); } if(runAll || CleanAllStats.OP_CLEAN_NAME.equals(type)) { - opStat = bench.new CleanAllStats(args); + opStat = new CleanAllStats(args); ops.add(opStat); } if(ops.size() == 0) @@ -1354,14 +1372,28 @@ public class NNThroughputBenchmark { } catch(Exception e) { LOG.error(StringUtils.stringifyException(e)); throw e; + } + return 0; + } + + public static void main(String[] args) throws Exception { + NNThroughputBenchmark bench = null; + try { + bench = new NNThroughputBenchmark(new HdfsConfiguration()); + ToolRunner.run(bench, args); } finally { if(bench != null) bench.close(); } } - public static void main(String[] args) throws Exception { - runBenchmark(new HdfsConfiguration(), - new ArrayList(Arrays.asList(args))); + @Override // Configurable + public void setConf(Configuration conf) { + config = conf; + } + + @Override // Configurable + public Configuration getConf() { + return config; } } From 4c8db6009291001b685b63f05b59a084972df8d4 Mon Sep 17 00:00:00 2001 From: Konstantin Shvachko Date: Thu, 15 Aug 2013 01:29:16 +0000 Subject: [PATCH 10/53] HDFS-5079. Cleaning up NNHAStatusHeartbeat.State from DatanodeProtocolProtos. Contributed by Tao Luo. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514118 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java | 5 +++-- .../hadoop-hdfs/src/main/proto/DatanodeProtocol.proto | 7 ++----- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index c6f160d263f..4d62131b8cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -6,6 +6,9 @@ Trunk (Unreleased) HDFS-3034. Remove the deprecated DFSOutputStream.sync() method. (szetszwo) + HDFS-5079. Cleaning up NNHAStatusHeartbeat.State from + DatanodeProtocolProtos. (Tao Luo via shv) + NEW FEATURES HDFS-3125. Add JournalService to enable Journal Daemon. (suresh) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 613edb1fa18..a3b60a765df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; +import org.apache.hadoop.ha.proto.HAServiceProtocolProtos; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ClientProtocol; @@ -1311,10 +1312,10 @@ public class PBHelper { NNHAStatusHeartbeatProto.newBuilder(); switch (hb.getState()) { case ACTIVE: - builder.setState(NNHAStatusHeartbeatProto.State.ACTIVE); + builder.setState(HAServiceProtocolProtos.HAServiceStateProto.ACTIVE); break; case STANDBY: - builder.setState(NNHAStatusHeartbeatProto.State.STANDBY); + builder.setState(HAServiceProtocolProtos.HAServiceStateProto.STANDBY); break; default: throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto index 3b9b90b5d27..bc5461be567 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto @@ -31,6 +31,7 @@ option java_generic_services = true; option java_generate_equals_and_hash = true; package hadoop.hdfs; +import "HAServiceProtocol.proto"; import "hdfs.proto"; /** @@ -185,11 +186,7 @@ message StorageReportProto { * txid - Highest transaction ID this NN has seen */ message NNHAStatusHeartbeatProto { - enum State { - ACTIVE = 0; - STANDBY = 1; - } - required State state = 1; + required hadoop.common.HAServiceStateProto state = 1; required uint64 txid = 2; } From 472d8bc9839b9cbe262e41743f40b0cb4912f5f2 Mon Sep 17 00:00:00 2001 From: Arun Murthy Date: Thu, 15 Aug 2013 02:35:48 +0000 Subject: [PATCH 11/53] YARN-1056. Remove dual use of string 'resourcemanager' in yarn.resourcemanager.connect.{max.wait.secs|retry_interval.secs}. Contributed by Karthik Kambatla. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514135 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 4 +++ .../hadoop/yarn/conf/YarnConfiguration.java | 24 +++++++------- .../apache/hadoop/yarn/client/RMProxy.java | 24 +++++--------- .../src/main/resources/yarn-default.xml | 16 ++++++++- .../nodemanager/TestNodeStatusUpdater.java | 33 +++++++++---------- 5 files changed, 55 insertions(+), 46 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 457ec5fdac9..92744c0537b 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -875,6 +875,10 @@ Release 2.1.0-beta - 2013-08-06 YARN-1043. Push all metrics consistently. (Jian He via acmurthy) + YARN-1056. Remove dual use of string 'resourcemanager' in + yarn.resourcemanager.connect.{max.wait.secs|retry_interval.secs} + (Karthik Kambatla via acmurthy) + Release 2.0.5-alpha - 06/06/2013 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 065fb6344c6..ec9eb19c4f7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -259,7 +259,7 @@ public class YarnConfiguration extends Configuration { /** URI for FileSystemRMStateStore */ public static final String FS_RM_STATE_STORE_URI = - RM_PREFIX + "fs.rm-state-store.uri"; + RM_PREFIX + "fs.state-store.uri"; /** The maximum number of completed applications RM keeps. */ public static final String RM_MAX_COMPLETED_APPLICATIONS = @@ -655,19 +655,17 @@ public class YarnConfiguration extends Configuration { public static final long DEFAULT_NM_PROCESS_KILL_WAIT_MS = 2000; - /** Max time to wait to establish a connection to RM - */ - public static final String RESOURCEMANAGER_CONNECT_MAX_WAIT_SECS = - RM_PREFIX + "resourcemanager.connect.max.wait.secs"; - public static final int DEFAULT_RESOURCEMANAGER_CONNECT_MAX_WAIT_SECS = - 15*60; + /** Max time to wait to establish a connection to RM */ + public static final String RESOURCEMANAGER_CONNECT_MAX_WAIT_MS = + RM_PREFIX + "connect.max-wait.ms"; + public static final int DEFAULT_RESOURCEMANAGER_CONNECT_MAX_WAIT_MS = + 15 * 60 * 1000; - /** Time interval between each attempt to connect to RM - */ - public static final String RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_SECS = - RM_PREFIX + "resourcemanager.connect.retry_interval.secs"; - public static final long DEFAULT_RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_SECS - = 30; + /** Time interval between each attempt to connect to RM */ + public static final String RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS = + RM_PREFIX + "connect.retry-interval.ms"; + public static final long DEFAULT_RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS + = 30 * 1000; /** * CLASSPATH for YARN applications. A comma-separated list of CLASSPATH diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java index 3b166a8806c..5fff760eb2d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java @@ -35,14 +35,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.io.retry.RetryProxy; -import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.ipc.YarnRPC; -import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import com.google.common.annotations.VisibleForTesting; @@ -79,38 +75,36 @@ public class RMProxy { public static RetryPolicy createRetryPolicy(Configuration conf) { long rmConnectWaitMS = conf.getInt( - YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_SECS, - YarnConfiguration.DEFAULT_RESOURCEMANAGER_CONNECT_MAX_WAIT_SECS) - * 1000; + YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, + YarnConfiguration.DEFAULT_RESOURCEMANAGER_CONNECT_MAX_WAIT_MS); long rmConnectionRetryIntervalMS = conf.getLong( - YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_SECS, + YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS, YarnConfiguration - .DEFAULT_RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_SECS) - * 1000; + .DEFAULT_RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS); if (rmConnectionRetryIntervalMS < 0) { throw new YarnRuntimeException("Invalid Configuration. " + - YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_SECS + + YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS + " should not be negative."); } - boolean waitForEver = (rmConnectWaitMS == -1000); + boolean waitForEver = (rmConnectWaitMS == -1); if (waitForEver) { return RetryPolicies.RETRY_FOREVER; } else { if (rmConnectWaitMS < 0) { throw new YarnRuntimeException("Invalid Configuration. " - + YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_SECS + + YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS + " can be -1, but can not be other negative numbers"); } // try connect once if (rmConnectWaitMS < rmConnectionRetryIntervalMS) { - LOG.warn(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_SECS + LOG.warn(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS + " is smaller than " - + YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_SECS + + YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS + ". Only try connect once."); rmConnectWaitMS = 0; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index b6753bc4adc..ab8d50aab10 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -140,6 +140,20 @@ 1000 + + Maximum time to wait to establish connection to + ResourceManager. + yarn.resourcemanager.connect.max-wait.ms + 900000 + + + + How often to try connecting to the + ResourceManager. + yarn.resourcemanager.connect.retry-interval.ms + 30000 + + The maximum number of application attempts. It's a global setting for all application masters. Each application master can specify @@ -249,7 +263,7 @@ RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore as the value for yarn.resourcemanager.store.class - yarn.resourcemanager.fs.rm-state-store.uri + yarn.resourcemanager.fs.state-store.uri ${hadoop.tmp.dir}/yarn/system/rmstore diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java index 78ab13ea835..d2119a75072 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java @@ -957,15 +957,14 @@ public class TestNodeStatusUpdater { @Test (timeout = 150000) public void testNMConnectionToRM() throws Exception { final long delta = 50000; - final long connectionWaitSecs = 5; - final long connectionRetryIntervalSecs = 1; + final long connectionWaitMs = 5000; + final long connectionRetryIntervalMs = 1000; //Waiting for rmStartIntervalMS, RM will be started final long rmStartIntervalMS = 2*1000; - conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_SECS, - connectionWaitSecs); - conf.setLong(YarnConfiguration - .RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_SECS, - connectionRetryIntervalSecs); + conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, + connectionWaitMs); + conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS, + connectionRetryIntervalMs); //Test NM try to connect to RM Several times, but finally fail NodeManagerWithCustomNodeStatusUpdater nmWithUpdater; @@ -987,15 +986,15 @@ public class TestNodeStatusUpdater { } catch(Exception e) { long t = System.currentTimeMillis(); long duration = t - waitStartTime; - boolean waitTimeValid = (duration >= connectionWaitSecs * 1000) - && (duration < (connectionWaitSecs * 1000 + delta)); + boolean waitTimeValid = (duration >= connectionWaitMs) + && (duration < (connectionWaitMs + delta)); if(!waitTimeValid) { //either the exception was too early, or it had a different cause. //reject with the inner stack trace throw new Exception("NM should have tried re-connecting to RM during " + - "period of at least " + connectionWaitSecs + " seconds, but " + - "stopped retrying within " + (connectionWaitSecs + delta/1000) + - " seconds: " + e, e); + "period of at least " + connectionWaitMs + " ms, but " + + "stopped retrying within " + (connectionWaitMs + delta) + + " ms: " + e, e); } } @@ -1149,14 +1148,14 @@ public class TestNodeStatusUpdater { @Test(timeout = 200000) public void testNodeStatusUpdaterRetryAndNMShutdown() throws Exception { - final long connectionWaitSecs = 1; - final long connectionRetryIntervalSecs = 1; + final long connectionWaitSecs = 1000; + final long connectionRetryIntervalMs = 1000; YarnConfiguration conf = createNMConfig(); - conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_SECS, + conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, connectionWaitSecs); conf.setLong(YarnConfiguration - .RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_SECS, - connectionRetryIntervalSecs); + .RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS, + connectionRetryIntervalMs); conf.setLong(YarnConfiguration.NM_SLEEP_DELAY_BEFORE_SIGKILL_MS, 5000); CyclicBarrier syncBarrier = new CyclicBarrier(2); nm = new MyNodeManager2(syncBarrier, conf); From d0b61a169ef5988a77148d1d071e21de9f7bb8e1 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Thu, 15 Aug 2013 03:58:23 +0000 Subject: [PATCH 12/53] HADOOP-9875. TestDoAsEffectiveUser can fail on JDK 7. (Aaron T. Myers via Colin Patrick McCabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514147 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../org/apache/hadoop/security/TestDoAsEffectiveUser.java | 7 ++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 15000ec72c6..3a5736a1f53 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -334,6 +334,9 @@ Release 2.3.0 - UNRELEASED HADOOP-9652. RawLocalFs#getFileLinkStatus does not fill in the link owner and mode. (Andrew Wang via Colin Patrick McCabe) + HADOOP-9875. TestDoAsEffectiveUser can fail on JDK 7. (Aaron T. Myers via + Colin Patrick McCabe) + Release 2.1.1-beta - UNRELEASED diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java index 217174de497..830106dcee2 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java @@ -38,6 +38,7 @@ import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenInfo; +import org.junit.Before; import org.junit.Test; import org.apache.hadoop.ipc.TestSaslRPC.TestTokenSecretManager; import org.apache.hadoop.ipc.TestSaslRPC.TestTokenIdentifier; @@ -58,7 +59,7 @@ public class TestDoAsEffectiveUser { GROUP2_NAME }; private static final String ADDRESS = "0.0.0.0"; private TestProtocol proxy; - private static Configuration masterConf = new Configuration(); + private static final Configuration masterConf = new Configuration(); public static final Log LOG = LogFactory @@ -70,6 +71,10 @@ public class TestDoAsEffectiveUser { "RULE:[2:$1@$0](.*@HADOOP.APACHE.ORG)s/@.*//" + "RULE:[1:$1@$0](.*@HADOOP.APACHE.ORG)s/@.*//" + "DEFAULT"); + } + + @Before + public void setMasterConf() { UserGroupInformation.setConfiguration(masterConf); } From 0182ea16d359b41c065bf9cbf740f8b23f6381e3 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Thu, 15 Aug 2013 04:52:52 +0000 Subject: [PATCH 13/53] HDFS-4898. BlockPlacementPolicyWithNodeGroup.chooseRemoteRack() fails to properly fallback to local rack. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514156 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../BlockPlacementPolicyWithNodeGroup.java | 15 ++++++++------- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 4d62131b8cb..9b50809b47a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -324,6 +324,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5051. nn fails to download checkpointed image from snn in some setups. (Vinay and suresh via suresh) + HDFS-4898. BlockPlacementPolicyWithNodeGroup.chooseRemoteRack() fails to + properly fallback to local rack. (szetszwo) + Release 2.1.0-beta - 2013-08-06 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java index 643d2b401cd..e98318b9783 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java @@ -169,16 +169,17 @@ public class BlockPlacementPolicyWithNodeGroup extends BlockPlacementPolicyDefau long blocksize, int maxReplicasPerRack, List results, boolean avoidStaleNodes) throws NotEnoughReplicasException { int oldNumOfReplicas = results.size(); - // randomly choose one node from remote racks + + final String rackLocation = NetworkTopology.getFirstHalf( + localMachine.getNetworkLocation()); try { - chooseRandom( - numOfReplicas, - "~" + NetworkTopology.getFirstHalf(localMachine.getNetworkLocation()), - excludedNodes, blocksize, maxReplicasPerRack, results, - avoidStaleNodes); + // randomly choose from remote racks + chooseRandom(numOfReplicas, "~" + rackLocation, excludedNodes, blocksize, + maxReplicasPerRack, results, avoidStaleNodes); } catch (NotEnoughReplicasException e) { + // fall back to the local rack chooseRandom(numOfReplicas - (results.size() - oldNumOfReplicas), - localMachine.getNetworkLocation(), excludedNodes, blocksize, + rackLocation, excludedNodes, blocksize, maxReplicasPerRack, results, avoidStaleNodes); } } From 97d04ae132135bdba276759b89cfc60851f4e6a0 Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Thu, 15 Aug 2013 06:03:07 +0000 Subject: [PATCH 14/53] HDFS-4632. globStatus using backslash for escaping does not work on Windows. Contributed by Chuan Liu. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514168 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../src/test/java/org/apache/hadoop/fs/TestGlobPaths.java | 3 +++ 2 files changed, 6 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 9b50809b47a..17554e93ed8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -327,6 +327,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-4898. BlockPlacementPolicyWithNodeGroup.chooseRemoteRack() fails to properly fallback to local rack. (szetszwo) + HDFS-4632. globStatus using backslash for escaping does not work on Windows. + (Chuan Liu via cnauroth) + Release 2.1.0-beta - 2013-08-06 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java index 5989b9be000..4f5474d2b64 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java @@ -466,6 +466,9 @@ public class TestGlobPaths { @Test public void pTestEscape() throws IOException { + // Skip the test case on Windows because backslash will be treated as a + // path separator instead of an escaping character on Windows. + org.junit.Assume.assumeTrue(!Path.WINDOWS); try { String [] files = new String[] {USER_DIR+"/ab\\[c.d"}; Path[] matchedPath = prepareTesting(USER_DIR+"/ab\\[c.d", files); From b776bd46aed2f5b3aa226af36c0081a7d1f69eda Mon Sep 17 00:00:00 2001 From: Siddharth Seth Date: Thu, 15 Aug 2013 07:20:14 +0000 Subject: [PATCH 15/53] YARN-1045. Improve toString implementation for PBImpls. Contributed by Jian He. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514185 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 2 ++ .../impl/pb/AllocateRequestPBImpl.java | 4 +++- .../impl/pb/AllocateResponsePBImpl.java | 4 +++- .../impl/pb/CancelDelegationTokenRequestPBImpl.java | 4 +++- .../impl/pb/CancelDelegationTokenResponsePBImpl.java | 4 +++- .../impl/pb/FinishApplicationMasterRequestPBImpl.java | 4 +++- .../pb/FinishApplicationMasterResponsePBImpl.java | 4 +++- .../impl/pb/GetApplicationReportRequestPBImpl.java | 4 +++- .../impl/pb/GetApplicationReportResponsePBImpl.java | 4 +++- .../impl/pb/GetApplicationsRequestPBImpl.java | 4 +++- .../impl/pb/GetApplicationsResponsePBImpl.java | 4 +++- .../impl/pb/GetClusterMetricsRequestPBImpl.java | 4 +++- .../impl/pb/GetClusterMetricsResponsePBImpl.java | 4 +++- .../impl/pb/GetClusterNodesRequestPBImpl.java | 11 +++++++---- .../impl/pb/GetClusterNodesResponsePBImpl.java | 4 +++- .../impl/pb/GetContainerStatusesRequestPBImpl.java | 5 +++-- .../impl/pb/GetContainerStatusesResponsePBImpl.java | 5 +++-- .../impl/pb/GetDelegationTokenRequestPBImpl.java | 4 +++- .../impl/pb/GetDelegationTokenResponsePBImpl.java | 4 +++- .../impl/pb/GetNewApplicationRequestPBImpl.java | 4 +++- .../impl/pb/GetNewApplicationResponsePBImpl.java | 4 +++- .../impl/pb/GetQueueInfoRequestPBImpl.java | 4 +++- .../impl/pb/GetQueueInfoResponsePBImpl.java | 4 +++- .../impl/pb/GetQueueUserAclsInfoRequestPBImpl.java | 4 +++- .../impl/pb/GetQueueUserAclsInfoResponsePBImpl.java | 4 +++- .../impl/pb/KillApplicationRequestPBImpl.java | 4 +++- .../impl/pb/KillApplicationResponsePBImpl.java | 4 +++- .../pb/RegisterApplicationMasterRequestPBImpl.java | 4 +++- .../pb/RegisterApplicationMasterResponsePBImpl.java | 3 ++- .../impl/pb/RenewDelegationTokenRequestPBImpl.java | 4 +++- .../impl/pb/RenewDelegationTokenResponsePBImpl.java | 4 +++- .../impl/pb/StartContainerRequestPBImpl.java | 4 +++- .../impl/pb/StartContainersResponsePBImpl.java | 4 ++-- .../impl/pb/StopContainersRequestPBImpl.java | 5 +++-- .../impl/pb/StopContainersResponsePBImpl.java | 5 +++-- .../impl/pb/SubmitApplicationRequestPBImpl.java | 4 +++- .../impl/pb/SubmitApplicationResponsePBImpl.java | 4 +++- .../api/protocolrecords/impl/pb/package-info.java | 1 + .../api/records/impl/pb/ApplicationReportPBImpl.java | 4 +++- .../impl/pb/ApplicationResourceUsageReportPBImpl.java | 4 +++- .../impl/pb/ApplicationSubmissionContextPBImpl.java | 4 +++- .../records/impl/pb/ContainerLaunchContextPBImpl.java | 3 ++- .../api/records/impl/pb/ContainerStatusPBImpl.java | 4 +++- .../yarn/api/records/impl/pb/LocalResourcePBImpl.java | 4 +++- .../yarn/api/records/impl/pb/NodeReportPBImpl.java | 4 +++- .../records/impl/pb/PreemptionContainerPBImpl.java | 4 +++- .../api/records/impl/pb/PreemptionContractPBImpl.java | 4 +++- .../api/records/impl/pb/PreemptionMessagePBImpl.java | 4 +++- .../impl/pb/PreemptionResourceRequestPBImpl.java | 4 +++- .../hadoop/yarn/api/records/impl/pb/ProtoBase.java | 3 ++- .../yarn/api/records/impl/pb/QueueInfoPBImpl.java | 4 +++- .../api/records/impl/pb/QueueUserACLInfoPBImpl.java | 4 +++- .../impl/pb/StrictPreemptionContractPBImpl.java | 4 +++- .../hadoop/yarn/api/records/impl/pb/URLPBImpl.java | 4 +++- .../api/records/impl/pb/YarnClusterMetricsPBImpl.java | 4 +++- .../hadoop/yarn/api/records/impl/pb/package-info.java | 1 + .../impl/pb/RefreshAdminAclsRequestPBImpl.java | 4 +++- .../impl/pb/RefreshAdminAclsResponsePBImpl.java | 4 +++- .../impl/pb/RefreshNodesRequestPBImpl.java | 4 +++- .../impl/pb/RefreshNodesResponsePBImpl.java | 4 +++- .../impl/pb/RefreshQueuesRequestPBImpl.java | 4 +++- .../impl/pb/RefreshQueuesResponsePBImpl.java | 4 +++- .../impl/pb/RefreshServiceAclsRequestPBImpl.java | 4 +++- .../impl/pb/RefreshServiceAclsResponsePBImpl.java | 4 +++- ...reshSuperUserGroupsConfigurationRequestPBImpl.java | 4 +++- ...eshSuperUserGroupsConfigurationResponsePBImpl.java | 4 +++- .../pb/RefreshUserToGroupsMappingsRequestPBImpl.java | 4 +++- .../pb/RefreshUserToGroupsMappingsResponsePBImpl.java | 4 +++- .../api/records/impl/pb/NodeHealthStatusPBImpl.java | 4 +++- 69 files changed, 202 insertions(+), 74 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 92744c0537b..51b03ef53cc 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -42,6 +42,8 @@ Release 2.1.1-beta - UNRELEASED IMPROVEMENTS YARN-589. Expose a REST API for monitoring the fair scheduler (Sandy Ryza). + + YARN-1045. Improve toString implementation for PBImpls. (Jian He via sseth) OPTIMIZATIONS diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java index 723ab5cf6cd..bff252f38a6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java @@ -38,6 +38,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ResourceRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateRequestProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class AllocateRequestPBImpl extends AllocateRequest { @@ -83,7 +85,7 @@ public class AllocateRequestPBImpl extends AllocateRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java index f38f91e064b..37d59713670 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java @@ -49,6 +49,8 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateResponseProtoOrBuilder; import org.apache.hadoop.yarn.proto.YarnServiceProtos.NMTokenProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class AllocateResponsePBImpl extends AllocateResponse { @@ -99,7 +101,7 @@ public class AllocateResponsePBImpl extends AllocateResponse { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private synchronized void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/CancelDelegationTokenRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/CancelDelegationTokenRequestPBImpl.java index e8e926beb4a..d44578418a2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/CancelDelegationTokenRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/CancelDelegationTokenRequestPBImpl.java @@ -26,6 +26,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl; +import com.google.protobuf.TextFormat; + @Private @Unstable public class CancelDelegationTokenRequestPBImpl extends @@ -90,7 +92,7 @@ public class CancelDelegationTokenRequestPBImpl extends @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/CancelDelegationTokenResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/CancelDelegationTokenResponsePBImpl.java index 334e1edd8ea..ec2b2b2081b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/CancelDelegationTokenResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/CancelDelegationTokenResponsePBImpl.java @@ -22,6 +22,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto; import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse; +import com.google.protobuf.TextFormat; + @Private @Unstable public class CancelDelegationTokenResponsePBImpl extends CancelDelegationTokenResponse { @@ -58,6 +60,6 @@ public class CancelDelegationTokenResponsePBImpl extends CancelDelegationTokenRe @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationMasterRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationMasterRequestPBImpl.java index 233c3114834..2805f82e2e5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationMasterRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationMasterRequestPBImpl.java @@ -28,6 +28,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.FinalApplicationStatusProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationMasterRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationMasterRequestProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class FinishApplicationMasterRequestPBImpl extends FinishApplicationMasterRequest { @@ -68,7 +70,7 @@ public class FinishApplicationMasterRequestPBImpl extends FinishApplicationMaste @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationMasterResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationMasterResponsePBImpl.java index 70f4f96dd58..ff57eb42d77 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationMasterResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationMasterResponsePBImpl.java @@ -24,6 +24,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse; import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationMasterResponseProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class FinishApplicationMasterResponsePBImpl extends FinishApplicationMasterResponse { @@ -63,6 +65,6 @@ public class FinishApplicationMasterResponsePBImpl extends FinishApplicationMast @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationReportRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationReportRequestPBImpl.java index e1e3308293f..47f43180869 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationReportRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationReportRequestPBImpl.java @@ -28,6 +28,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportRequestProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetApplicationReportRequestPBImpl extends GetApplicationReportRequest { @@ -71,7 +73,7 @@ public class GetApplicationReportRequestPBImpl extends GetApplicationReportReque @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationReportResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationReportResponsePBImpl.java index ef61dfae54c..8a54898278a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationReportResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationReportResponsePBImpl.java @@ -28,6 +28,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportResponseProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetApplicationReportResponsePBImpl extends GetApplicationReportResponse { @@ -71,7 +73,7 @@ public class GetApplicationReportResponsePBImpl extends GetApplicationReportResp @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java index dda5e2137d7..48a8d85ab8e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java @@ -28,6 +28,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsRequestProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetApplicationsRequestPBImpl extends GetApplicationsRequest { @@ -123,6 +125,6 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsResponsePBImpl.java index b0897c67820..453fc894a1d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsResponsePBImpl.java @@ -31,6 +31,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsResponseProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetApplicationsResponsePBImpl @@ -90,7 +92,7 @@ extends GetApplicationsResponse { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterMetricsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterMetricsRequestPBImpl.java index 9dcad99269e..2288da84a34 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterMetricsRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterMetricsRequestPBImpl.java @@ -24,6 +24,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterMetricsRequestProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetClusterMetricsRequestPBImpl extends GetClusterMetricsRequest { @@ -63,6 +65,6 @@ public class GetClusterMetricsRequestPBImpl extends GetClusterMetricsRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterMetricsResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterMetricsResponsePBImpl.java index 635307aad6b..7502753a00f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterMetricsResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterMetricsResponsePBImpl.java @@ -28,6 +28,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.YarnClusterMetricsProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterMetricsResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterMetricsResponseProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetClusterMetricsResponsePBImpl extends GetClusterMetricsResponse { @@ -71,7 +73,7 @@ public class GetClusterMetricsResponsePBImpl extends GetClusterMetricsResponse { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodesRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodesRequestPBImpl.java index 4e51320bf17..09c0fc7a136 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodesRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodesRequestPBImpl.java @@ -18,17 +18,20 @@ package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; -import org.apache.hadoop.classification.InterfaceAudience.Private; -import org.apache.hadoop.classification.InterfaceStability.Unstable; import java.util.EnumSet; import java.util.Iterator; import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest; import org.apache.hadoop.yarn.api.records.NodeState; +import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils; import org.apache.hadoop.yarn.proto.YarnProtos.NodeStateProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesRequestProtoOrBuilder; -import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils; + +import com.google.protobuf.TextFormat; @Private @Unstable @@ -152,6 +155,6 @@ public class GetClusterNodesRequestPBImpl extends GetClusterNodesRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodesResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodesResponsePBImpl.java index 23210d46f6c..04530e53334 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodesResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodesResponsePBImpl.java @@ -31,6 +31,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.NodeReportProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesResponseProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetClusterNodesResponsePBImpl extends GetClusterNodesResponse { @@ -89,7 +91,7 @@ public class GetClusterNodesResponsePBImpl extends GetClusterNodesResponse { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerStatusesRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerStatusesRequestPBImpl.java index 0c305ca8862..bbc1492c4d6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerStatusesRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerStatusesRequestPBImpl.java @@ -30,6 +30,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusesRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusesRequestProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetContainerStatusesRequestPBImpl extends @@ -75,8 +77,7 @@ public class GetContainerStatusesRequestPBImpl extends @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ") - .replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerStatusesResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerStatusesResponsePBImpl.java index 18df2146612..fb8885be62b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerStatusesResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerStatusesResponsePBImpl.java @@ -39,6 +39,8 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.ContainerExceptionMapProto import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusesResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusesResponseProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetContainerStatusesResponsePBImpl extends @@ -85,8 +87,7 @@ public class GetContainerStatusesResponsePBImpl extends @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ") - .replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetDelegationTokenRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetDelegationTokenRequestPBImpl.java index 8e98c88efd9..435b807d3b7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetDelegationTokenRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetDelegationTokenRequestPBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequest import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProtoOrBuilder; import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetDelegationTokenRequestPBImpl extends GetDelegationTokenRequest { @@ -86,7 +88,7 @@ public class GetDelegationTokenRequestPBImpl extends GetDelegationTokenRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetDelegationTokenResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetDelegationTokenResponsePBImpl.java index c51d80d1fba..93f4b5bba27 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetDelegationTokenResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetDelegationTokenResponsePBImpl.java @@ -27,6 +27,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetDelegationTokenResponsePBImpl extends GetDelegationTokenResponse { @@ -94,7 +96,7 @@ public class GetDelegationTokenResponsePBImpl extends GetDelegationTokenResponse @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationRequestPBImpl.java index 1fb3b70ba85..a5699f3d8af 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationRequestPBImpl.java @@ -24,6 +24,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationRequestProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetNewApplicationRequestPBImpl extends GetNewApplicationRequest { @@ -63,6 +65,6 @@ public class GetNewApplicationRequestPBImpl extends GetNewApplicationRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationResponsePBImpl.java index bf1a6c283ac..eb8ca2c5968 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationResponsePBImpl.java @@ -31,6 +31,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationResponseProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetNewApplicationResponsePBImpl extends GetNewApplicationResponse { @@ -74,7 +76,7 @@ public class GetNewApplicationResponsePBImpl extends GetNewApplicationResponse { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueInfoRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueInfoRequestPBImpl.java index 664ea23157d..c79e0c40e0c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueInfoRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueInfoRequestPBImpl.java @@ -24,6 +24,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoRequestProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetQueueInfoRequestPBImpl extends GetQueueInfoRequest { @@ -124,6 +126,6 @@ public class GetQueueInfoRequestPBImpl extends GetQueueInfoRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueInfoResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueInfoResponsePBImpl.java index 7d60ae16a89..7193d92fe3e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueInfoResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueInfoResponsePBImpl.java @@ -27,6 +27,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.QueueInfoProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoResponseProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetQueueInfoResponsePBImpl extends GetQueueInfoResponse { @@ -71,7 +73,7 @@ public class GetQueueInfoResponsePBImpl extends GetQueueInfoResponse { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueUserAclsInfoRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueUserAclsInfoRequestPBImpl.java index d964e85f064..e11ba5d10fc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueUserAclsInfoRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueUserAclsInfoRequestPBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoRequestProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetQueueUserAclsInfoRequestPBImpl extends GetQueueUserAclsInfoRequest { @@ -64,6 +66,6 @@ public class GetQueueUserAclsInfoRequestPBImpl extends GetQueueUserAclsInfoReque @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueUserAclsInfoResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueUserAclsInfoResponsePBImpl.java index d9fdadaf1c6..8d405d9726a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueUserAclsInfoResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueUserAclsInfoResponsePBImpl.java @@ -31,6 +31,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.QueueUserACLInfoProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoResponseProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetQueueUserAclsInfoResponsePBImpl extends GetQueueUserAclsInfoResponse { @@ -90,7 +92,7 @@ public class GetQueueUserAclsInfoResponsePBImpl extends GetQueueUserAclsInfoResp @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/KillApplicationRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/KillApplicationRequestPBImpl.java index 2b13a72085b..db973676828 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/KillApplicationRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/KillApplicationRequestPBImpl.java @@ -28,6 +28,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationRequestProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class KillApplicationRequestPBImpl extends KillApplicationRequest { @@ -71,7 +73,7 @@ public class KillApplicationRequestPBImpl extends KillApplicationRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/KillApplicationResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/KillApplicationResponsePBImpl.java index f16d4177ec3..14e0c1f74af 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/KillApplicationResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/KillApplicationResponsePBImpl.java @@ -24,6 +24,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse; import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationResponseProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class KillApplicationResponsePBImpl extends KillApplicationResponse { @@ -63,6 +65,6 @@ public class KillApplicationResponsePBImpl extends KillApplicationResponse { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterRequestPBImpl.java index a2d2024d381..037dfd98760 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterRequestPBImpl.java @@ -25,6 +25,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterReque import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterRequestProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RegisterApplicationMasterRequestPBImpl extends RegisterApplicationMasterRequest { @@ -65,7 +67,7 @@ public class RegisterApplicationMasterRequestPBImpl extends RegisterApplicationM @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterResponsePBImpl.java index 75ce2009f33..486304c7fb1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterResponsePBImpl.java @@ -38,6 +38,7 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterR import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterResponseProtoOrBuilder; import com.google.protobuf.ByteString; +import com.google.protobuf.TextFormat; @Private @@ -85,7 +86,7 @@ public class RegisterApplicationMasterResponsePBImpl extends @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToProto() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RenewDelegationTokenRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RenewDelegationTokenRequestPBImpl.java index 3e109c5d275..dac3c9b9b89 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RenewDelegationTokenRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RenewDelegationTokenRequestPBImpl.java @@ -26,6 +26,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RenewDelegationTokenRequestPBImpl extends @@ -89,7 +91,7 @@ public class RenewDelegationTokenRequestPBImpl extends @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RenewDelegationTokenResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RenewDelegationTokenResponsePBImpl.java index ecf0b30a172..9d20b469873 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RenewDelegationTokenResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RenewDelegationTokenResponsePBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRespo import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProtoOrBuilder; import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenResponse; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RenewDelegationTokenResponsePBImpl extends @@ -66,7 +68,7 @@ public class RenewDelegationTokenResponsePBImpl extends @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void maybeInitBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainerRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainerRequestPBImpl.java index acb9d34cfed..c1cd0ebbfc2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainerRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainerRequestPBImpl.java @@ -31,6 +31,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ContainerLaunchContextProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainerRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainerRequestProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class StartContainerRequestPBImpl extends StartContainerRequest { @@ -75,7 +77,7 @@ public class StartContainerRequestPBImpl extends StartContainerRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainersResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainersResponsePBImpl.java index 1482cd779b0..8f5c740bd2e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainersResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainersResponsePBImpl.java @@ -41,6 +41,7 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainersResponsePro import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainersResponseProtoOrBuilder; import com.google.protobuf.ByteString; +import com.google.protobuf.TextFormat; @Private @Unstable @@ -87,8 +88,7 @@ public class StartContainersResponsePBImpl extends StartContainersResponse { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ") - .replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StopContainersRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StopContainersRequestPBImpl.java index 5c758e80629..27e092b706a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StopContainersRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StopContainersRequestPBImpl.java @@ -30,6 +30,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainersRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainersRequestProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class StopContainersRequestPBImpl extends StopContainersRequest { @@ -73,8 +75,7 @@ public class StopContainersRequestPBImpl extends StopContainersRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ") - .replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StopContainersResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StopContainersResponsePBImpl.java index 5385d0a0ab2..dd28b06443d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StopContainersResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StopContainersResponsePBImpl.java @@ -37,6 +37,8 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.ContainerExceptionMapProto import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainersResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainersResponseProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class StopContainersResponsePBImpl extends StopContainersResponse { @@ -80,8 +82,7 @@ public class StopContainersResponsePBImpl extends StopContainersResponse { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ") - .replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToProto() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/SubmitApplicationRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/SubmitApplicationRequestPBImpl.java index ab33412514d..ad45d9adb75 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/SubmitApplicationRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/SubmitApplicationRequestPBImpl.java @@ -28,6 +28,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationSubmissionContextProto import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationRequestProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class SubmitApplicationRequestPBImpl extends SubmitApplicationRequest { @@ -71,7 +73,7 @@ public class SubmitApplicationRequestPBImpl extends SubmitApplicationRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/SubmitApplicationResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/SubmitApplicationResponsePBImpl.java index 8ac49f5dfb1..9e127767662 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/SubmitApplicationResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/SubmitApplicationResponsePBImpl.java @@ -24,6 +24,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse; import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationResponseProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class SubmitApplicationResponsePBImpl extends SubmitApplicationResponse { @@ -63,6 +65,6 @@ public class SubmitApplicationResponsePBImpl extends SubmitApplicationResponse { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/package-info.java index f9c7e5c7d28..4b29e4f740e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/package-info.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/package-info.java @@ -18,3 +18,4 @@ @InterfaceAudience.Private package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; import org.apache.hadoop.classification.InterfaceAudience; + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java index c68c2447799..9716f74a681 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java @@ -36,6 +36,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationResourceUsageReportPro import org.apache.hadoop.yarn.proto.YarnProtos.FinalApplicationStatusProto; import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationStateProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class ApplicationReportPBImpl extends ApplicationReport { @@ -424,7 +426,7 @@ public class ApplicationReportPBImpl extends ApplicationReport { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationResourceUsageReportPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationResourceUsageReportPBImpl.java index eb834241ef0..ada716593e7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationResourceUsageReportPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationResourceUsageReportPBImpl.java @@ -26,6 +26,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationResourceUsageReportPro import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationResourceUsageReportProtoOrBuilder; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class ApplicationResourceUsageReportPBImpl @@ -73,7 +75,7 @@ extends ApplicationResourceUsageReport { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java index 96f280ac282..5b48141adda 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java @@ -32,6 +32,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ContainerLaunchContextProto; import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class ApplicationSubmissionContextPBImpl @@ -80,7 +82,7 @@ extends ApplicationSubmissionContext { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java index 02e0d3b23e3..12dcfcd9f8f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java @@ -39,6 +39,7 @@ import org.apache.hadoop.yarn.proto.YarnProtos.StringLocalResourceMapProto; import org.apache.hadoop.yarn.proto.YarnProtos.StringStringMapProto; import com.google.protobuf.ByteString; +import com.google.protobuf.TextFormat; @Private @Unstable @@ -89,7 +90,7 @@ extends ContainerLaunchContext { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } protected final ByteBuffer convertFromProtoFormat(ByteString byteString) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerStatusPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerStatusPBImpl.java index a52ae4c2f4a..9cb28f4d0d6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerStatusPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerStatusPBImpl.java @@ -29,6 +29,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStateProto; import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStatusProto; import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStatusProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class ContainerStatusPBImpl extends ContainerStatus { @@ -72,7 +74,7 @@ public class ContainerStatusPBImpl extends ContainerStatus { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/LocalResourcePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/LocalResourcePBImpl.java index d2caf0b4a44..16bd59740d2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/LocalResourcePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/LocalResourcePBImpl.java @@ -30,6 +30,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceTypeProto; import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceVisibilityProto; import org.apache.hadoop.yarn.proto.YarnProtos.URLProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class LocalResourcePBImpl extends LocalResource { @@ -72,7 +74,7 @@ public class LocalResourcePBImpl extends LocalResource { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private synchronized void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeReportPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeReportPBImpl.java index e1a66d76956..7a1b1b1c5fd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeReportPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeReportPBImpl.java @@ -29,6 +29,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.NodeReportProto; import org.apache.hadoop.yarn.proto.YarnProtos.NodeReportProtoOrBuilder; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class NodeReportPBImpl extends NodeReport { @@ -234,7 +236,7 @@ public class NodeReportPBImpl extends NodeReport { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionContainerPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionContainerPBImpl.java index 55ead2061c3..8d46cbdaf27 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionContainerPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionContainerPBImpl.java @@ -25,6 +25,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto; import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionContainerProto; import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionContainerProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class PreemptionContainerPBImpl extends PreemptionContainer { @@ -69,7 +71,7 @@ public class PreemptionContainerPBImpl extends PreemptionContainer { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToProto() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionContractPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionContractPBImpl.java index 07a0af95c47..2d234f2f8c9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionContractPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionContractPBImpl.java @@ -33,6 +33,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionContractProto; import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionContractProtoOrBuilder; import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionResourceRequestProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class PreemptionContractPBImpl extends PreemptionContract { @@ -77,7 +79,7 @@ public class PreemptionContractPBImpl extends PreemptionContract { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToProto() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionMessagePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionMessagePBImpl.java index 65dc820b90b..6cac49ae685 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionMessagePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionMessagePBImpl.java @@ -27,6 +27,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionMessageProto; import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionMessageProtoOrBuilder; import org.apache.hadoop.yarn.proto.YarnProtos.StrictPreemptionContractProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class PreemptionMessagePBImpl extends PreemptionMessage { @@ -71,7 +73,7 @@ public class PreemptionMessagePBImpl extends PreemptionMessage { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToProto() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionResourceRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionResourceRequestPBImpl.java index 97930c3dae4..a8972b15f96 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionResourceRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionResourceRequestPBImpl.java @@ -25,6 +25,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionResourceRequestProto; import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionResourceRequestProtoOrBuilder; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceRequestProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class PreemptionResourceRequestPBImpl extends PreemptionResourceRequest { @@ -69,7 +71,7 @@ public class PreemptionResourceRequestPBImpl extends PreemptionResourceRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToProto() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoBase.java index 4e8c39bc9f6..bd3cc54bb0c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoBase.java @@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import com.google.protobuf.ByteString; import com.google.protobuf.Message; +import com.google.protobuf.TextFormat; @Private @Unstable @@ -51,7 +52,7 @@ public abstract class ProtoBase { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } protected final ByteBuffer convertFromProtoFormat(ByteString byteString) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java index 529bd8b848a..56a5b584324 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java @@ -32,6 +32,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.QueueInfoProto; import org.apache.hadoop.yarn.proto.YarnProtos.QueueInfoProtoOrBuilder; import org.apache.hadoop.yarn.proto.YarnProtos.QueueStateProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class QueueInfoPBImpl extends QueueInfo { @@ -175,7 +177,7 @@ public class QueueInfoPBImpl extends QueueInfo { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void initLocalApplicationsList() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueUserACLInfoPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueUserACLInfoPBImpl.java index cf484103008..4aa9b0955e6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueUserACLInfoPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueUserACLInfoPBImpl.java @@ -30,6 +30,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.QueueACLProto; import org.apache.hadoop.yarn.proto.YarnProtos.QueueUserACLInfoProto; import org.apache.hadoop.yarn.proto.YarnProtos.QueueUserACLInfoProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class QueueUserACLInfoPBImpl extends QueueUserACLInfo { @@ -103,7 +105,7 @@ public class QueueUserACLInfoPBImpl extends QueueUserACLInfo { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void initLocalQueueUserAclsList() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/StrictPreemptionContractPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/StrictPreemptionContractPBImpl.java index 0d80921396b..28569b46b74 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/StrictPreemptionContractPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/StrictPreemptionContractPBImpl.java @@ -30,6 +30,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionContainerProto; import org.apache.hadoop.yarn.proto.YarnProtos.StrictPreemptionContractProto; import org.apache.hadoop.yarn.proto.YarnProtos.StrictPreemptionContractProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class StrictPreemptionContractPBImpl extends StrictPreemptionContract { @@ -74,7 +76,7 @@ public class StrictPreemptionContractPBImpl extends StrictPreemptionContract { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToProto() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/URLPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/URLPBImpl.java index 14cc762e020..c5586c766d5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/URLPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/URLPBImpl.java @@ -25,6 +25,8 @@ import org.apache.hadoop.yarn.api.records.URL; import org.apache.hadoop.yarn.proto.YarnProtos.URLProto; import org.apache.hadoop.yarn.proto.YarnProtos.URLProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class URLPBImpl extends URL { @@ -64,7 +66,7 @@ public class URLPBImpl extends URL { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void maybeInitBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/YarnClusterMetricsPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/YarnClusterMetricsPBImpl.java index cef6c62a46c..ce2f7483331 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/YarnClusterMetricsPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/YarnClusterMetricsPBImpl.java @@ -25,6 +25,8 @@ import org.apache.hadoop.yarn.api.records.YarnClusterMetrics; import org.apache.hadoop.yarn.proto.YarnProtos.YarnClusterMetricsProto; import org.apache.hadoop.yarn.proto.YarnProtos.YarnClusterMetricsProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class YarnClusterMetricsPBImpl extends YarnClusterMetrics { @@ -64,7 +66,7 @@ public class YarnClusterMetricsPBImpl extends YarnClusterMetrics { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void maybeInitBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/package-info.java index 1f14e60d800..2571db8e8dc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/package-info.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/package-info.java @@ -18,3 +18,4 @@ @InterfaceAudience.Private package org.apache.hadoop.yarn.api.records.impl.pb; import org.apache.hadoop.classification.InterfaceAudience; + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshAdminAclsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshAdminAclsRequestPBImpl.java index 5aba8e3150a..c2af34e622e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshAdminAclsRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshAdminAclsRequestPBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RefreshAdminAclsRequestPBImpl @@ -64,6 +66,6 @@ extends RefreshAdminAclsRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshAdminAclsResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshAdminAclsResponsePBImpl.java index f1288c50a16..b9c8a278e3b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshAdminAclsResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshAdminAclsResponsePBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsResponseProto; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RefreshAdminAclsResponsePBImpl extends RefreshAdminAclsResponse { @@ -63,6 +65,6 @@ public class RefreshAdminAclsResponsePBImpl extends RefreshAdminAclsResponse { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesRequestPBImpl.java index 6231d9b4919..2cea95a2b12 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesRequestPBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RefreshNodesRequestPBImpl extends RefreshNodesRequest { @@ -63,6 +65,6 @@ public class RefreshNodesRequestPBImpl extends RefreshNodesRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesResponsePBImpl.java index 4ac1f8a6729..0b4bf2749d4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesResponsePBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResponseProto; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResponse; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RefreshNodesResponsePBImpl extends RefreshNodesResponse { @@ -63,6 +65,6 @@ public class RefreshNodesResponsePBImpl extends RefreshNodesResponse { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshQueuesRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshQueuesRequestPBImpl.java index 87f881c9fba..62067277c80 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshQueuesRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshQueuesRequestPBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesRequestProto; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesRequest; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RefreshQueuesRequestPBImpl extends RefreshQueuesRequest { @@ -63,6 +65,6 @@ public class RefreshQueuesRequestPBImpl extends RefreshQueuesRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshQueuesResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshQueuesResponsePBImpl.java index 4ff71088b7f..6d50b8311b2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshQueuesResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshQueuesResponsePBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesResponseProto; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesResponse; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RefreshQueuesResponsePBImpl extends RefreshQueuesResponse { @@ -63,6 +65,6 @@ public class RefreshQueuesResponsePBImpl extends RefreshQueuesResponse { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshServiceAclsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshServiceAclsRequestPBImpl.java index 8add1ec5555..7a0bb2e4453 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshServiceAclsRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshServiceAclsRequestPBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsRequestProto; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsRequest; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RefreshServiceAclsRequestPBImpl extends RefreshServiceAclsRequest { @@ -65,6 +67,6 @@ public class RefreshServiceAclsRequestPBImpl extends RefreshServiceAclsRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshServiceAclsResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshServiceAclsResponsePBImpl.java index f24645937d3..d3ea3a4ee09 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshServiceAclsResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshServiceAclsResponsePBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsResponseProto; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsResponse; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RefreshServiceAclsResponsePBImpl extends @@ -66,6 +68,6 @@ public class RefreshServiceAclsResponsePBImpl extends @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationRequestPBImpl.java index a93a7b7320b..7620f48262d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationRequestPBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationRequestProto; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RefreshSuperUserGroupsConfigurationRequestPBImpl @@ -64,6 +66,6 @@ extends RefreshSuperUserGroupsConfigurationRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationResponsePBImpl.java index b899ff169a1..dca301a4fad 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationResponsePBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationResponseProto; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RefreshSuperUserGroupsConfigurationResponsePBImpl extends RefreshSuperUserGroupsConfigurationResponse { @@ -63,6 +65,6 @@ public class RefreshSuperUserGroupsConfigurationResponsePBImpl extends RefreshSu @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsRequestPBImpl.java index f1a41beefbc..080f97c4cf7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsRequestPBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsRequestProto; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsRequest; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RefreshUserToGroupsMappingsRequestPBImpl @@ -64,6 +66,6 @@ extends RefreshUserToGroupsMappingsRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsResponsePBImpl.java index 52a8b99f1f0..972ebfaab99 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsResponsePBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsResponseProto; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsResponse; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RefreshUserToGroupsMappingsResponsePBImpl extends RefreshUserToGroupsMappingsResponse { @@ -63,6 +65,6 @@ public class RefreshUserToGroupsMappingsResponsePBImpl extends RefreshUserToGrou @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeHealthStatusPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeHealthStatusPBImpl.java index 52e378c89b9..75aa3d1cfd0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeHealthStatusPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeHealthStatusPBImpl.java @@ -22,6 +22,8 @@ import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeHealthStatusProto import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeHealthStatusProtoOrBuilder; import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus; +import com.google.protobuf.TextFormat; + public class NodeHealthStatusPBImpl extends NodeHealthStatus { private NodeHealthStatusProto.Builder builder; @@ -62,7 +64,7 @@ public class NodeHealthStatusPBImpl extends NodeHealthStatus { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToProto() { From 26c5a490e5f39377749aff90a22efab626c081df Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Thu, 15 Aug 2013 16:51:07 +0000 Subject: [PATCH 16/53] HDFS-5093. TestGlobPaths should re-use the MiniDFSCluster to avoid failure on Windows. Contributed by Chuan Liu. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514366 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../org/apache/hadoop/fs/TestGlobPaths.java | 218 +++++++++--------- 2 files changed, 116 insertions(+), 105 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 17554e93ed8..effb3a62a4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -330,6 +330,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-4632. globStatus using backslash for escaping does not work on Windows. (Chuan Liu via cnauroth) + HDFS-5093. TestGlobPaths should re-use the MiniDFSCluster to avoid failure + on Windows. (Chuan Liu via cnauroth) + Release 2.1.0-beta - 2013-08-06 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java index 4f5474d2b64..b712be10f0f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java @@ -20,7 +20,6 @@ package org.apache.hadoop.fs; import static org.junit.Assert.*; import java.io.IOException; -import java.util.Arrays; import java.util.regex.Pattern; import org.apache.commons.lang.StringUtils; @@ -30,8 +29,6 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.junit.*; -import com.google.common.base.Joiner; - public class TestGlobPaths { static class RegexPathFilter implements PathFilter { @@ -50,6 +47,7 @@ public class TestGlobPaths { static private MiniDFSCluster dfsCluster; static private FileSystem fs; + static private FileContext fc; static final private int NUM_OF_PATHS = 4; static private String USER_DIR; private Path[] path = new Path[NUM_OF_PATHS]; @@ -59,6 +57,7 @@ public class TestGlobPaths { Configuration conf = new HdfsConfiguration(); dfsCluster = new MiniDFSCluster.Builder(conf).build(); fs = FileSystem.get(conf); + fc = FileContext.getFileContext(conf); USER_DIR = fs.getHomeDirectory().toUri().getPath().toString(); } @@ -803,28 +802,24 @@ public class TestGlobPaths { /** * Run a glob test on FileSystem. */ - private static void testOnFileSystem(FSTestWrapperGlobTest test) throws Exception { - Configuration conf = new HdfsConfiguration(); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + private void testOnFileSystem(FSTestWrapperGlobTest test) throws Exception { try { - FileSystem fs = FileSystem.get(conf); + fc.mkdir(new Path(USER_DIR), FsPermission.getDefault(), true); test.run(new FileSystemTestWrapper(fs), fs, null); } finally { - cluster.shutdown(); + fc.delete(new Path(USER_DIR), true); } } /** * Run a glob test on FileContext. */ - private static void testOnFileContext(FSTestWrapperGlobTest test) throws Exception { - Configuration conf = new HdfsConfiguration(); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + private void testOnFileContext(FSTestWrapperGlobTest test) throws Exception { try { - FileContext fc = FileContext.getFileContext(conf); + fs.mkdirs(new Path(USER_DIR)); test.run(new FileContextTestWrapper(fc), null, fc); } finally { - cluster.shutdown(); + cleanupDFS(); } } @@ -857,32 +852,33 @@ public class TestGlobPaths { throws Exception { // Test that globbing through a symlink to a directory yields a path // containing that symlink. - wrap.mkdir(new Path("/alpha"), - FsPermission.getDirDefault(), false); - wrap.createSymlink(new Path("/alpha"), new Path("/alphaLink"), false); - wrap.mkdir(new Path("/alphaLink/beta"), + wrap.mkdir(new Path(USER_DIR + "/alpha"), FsPermission.getDirDefault(), + false); + wrap.createSymlink(new Path(USER_DIR + "/alpha"), new Path(USER_DIR + + "/alphaLink"), false); + wrap.mkdir(new Path(USER_DIR + "/alphaLink/beta"), FsPermission.getDirDefault(), false); // Test simple glob - FileStatus[] statuses = - wrap.globStatus(new Path("/alpha/*"), new AcceptAllPathFilter()); - Assert.assertEquals(1, statuses.length); - Assert.assertEquals("/alpha/beta", - statuses[0].getPath().toUri().getPath()); - // Test glob through symlink - statuses = - wrap.globStatus(new Path("/alphaLink/*"), new AcceptAllPathFilter()); - Assert.assertEquals(1, statuses.length); - Assert.assertEquals("/alphaLink/beta", - statuses[0].getPath().toUri().getPath()); - // If the terminal path component in a globbed path is a symlink, - // we don't dereference that link. - wrap.createSymlink(new Path("beta"), new Path("/alphaLink/betaLink"), - false); - statuses = wrap.globStatus(new Path("/alpha/betaLi*"), + FileStatus[] statuses = wrap.globStatus(new Path(USER_DIR + "/alpha/*"), new AcceptAllPathFilter()); Assert.assertEquals(1, statuses.length); - Assert.assertEquals("/alpha/betaLink", - statuses[0].getPath().toUri().getPath()); + Assert.assertEquals(USER_DIR + "/alpha/beta", statuses[0].getPath() + .toUri().getPath()); + // Test glob through symlink + statuses = wrap.globStatus(new Path(USER_DIR + "/alphaLink/*"), + new AcceptAllPathFilter()); + Assert.assertEquals(1, statuses.length); + Assert.assertEquals(USER_DIR + "/alphaLink/beta", statuses[0].getPath() + .toUri().getPath()); + // If the terminal path component in a globbed path is a symlink, + // we don't dereference that link. + wrap.createSymlink(new Path("beta"), new Path(USER_DIR + + "/alphaLink/betaLink"), false); + statuses = wrap.globStatus(new Path(USER_DIR + "/alpha/betaLi*"), + new AcceptAllPathFilter()); + Assert.assertEquals(1, statuses.length); + Assert.assertEquals(USER_DIR + "/alpha/betaLink", statuses[0].getPath() + .toUri().getPath()); // todo: test symlink-to-symlink-to-dir, etc. } } @@ -902,58 +898,64 @@ public class TestGlobPaths { * * Also test globbing dangling symlinks. It should NOT throw any exceptions! */ - private static class TestGlobWithSymlinksToSymlinks - implements FSTestWrapperGlobTest { + private static class TestGlobWithSymlinksToSymlinks implements + FSTestWrapperGlobTest { public void run(FSTestWrapper wrap, FileSystem fs, FileContext fc) throws Exception { // Test that globbing through a symlink to a symlink to a directory // fully resolves - wrap.mkdir(new Path("/alpha"), FsPermission.getDirDefault(), false); - wrap.createSymlink(new Path("/alpha"), new Path("/alphaLink"), false); - wrap.createSymlink(new Path("/alphaLink"), - new Path("/alphaLinkLink"), false); - wrap.mkdir(new Path("/alpha/beta"), FsPermission.getDirDefault(), false); + wrap.mkdir(new Path(USER_DIR + "/alpha"), FsPermission.getDirDefault(), + false); + wrap.createSymlink(new Path(USER_DIR + "/alpha"), new Path(USER_DIR + + "/alphaLink"), false); + wrap.createSymlink(new Path(USER_DIR + "/alphaLink"), new Path(USER_DIR + + "/alphaLinkLink"), false); + wrap.mkdir(new Path(USER_DIR + "/alpha/beta"), + FsPermission.getDirDefault(), false); // Test glob through symlink to a symlink to a directory - FileStatus statuses[] = - wrap.globStatus(new Path("/alphaLinkLink"), new AcceptAllPathFilter()); + FileStatus statuses[] = wrap.globStatus(new Path(USER_DIR + + "/alphaLinkLink"), new AcceptAllPathFilter()); Assert.assertEquals(1, statuses.length); - Assert.assertEquals("/alphaLinkLink", - statuses[0].getPath().toUri().getPath()); - statuses = - wrap.globStatus(new Path("/alphaLinkLink/*"), new AcceptAllPathFilter()); + Assert.assertEquals(USER_DIR + "/alphaLinkLink", statuses[0].getPath() + .toUri().getPath()); + statuses = wrap.globStatus(new Path(USER_DIR + "/alphaLinkLink/*"), + new AcceptAllPathFilter()); Assert.assertEquals(1, statuses.length); - Assert.assertEquals("/alphaLinkLink/beta", - statuses[0].getPath().toUri().getPath()); + Assert.assertEquals(USER_DIR + "/alphaLinkLink/beta", statuses[0] + .getPath().toUri().getPath()); // Test glob of dangling symlink (theta does not actually exist) - wrap.createSymlink(new Path("theta"), new Path("/alpha/kappa"), false); - statuses = wrap.globStatus(new Path("/alpha/kappa/kappa"), - new AcceptAllPathFilter()); + wrap.createSymlink(new Path(USER_DIR + "theta"), new Path(USER_DIR + + "/alpha/kappa"), false); + statuses = wrap.globStatus(new Path(USER_DIR + "/alpha/kappa/kappa"), + new AcceptAllPathFilter()); Assert.assertNull(statuses); // Test glob of symlinks - wrap.createFile("/alpha/beta/gamma"); - wrap.createSymlink(new Path("gamma"), - new Path("/alpha/beta/gammaLink"), false); - wrap.createSymlink(new Path("gammaLink"), - new Path("/alpha/beta/gammaLinkLink"), false); - wrap.createSymlink(new Path("gammaLinkLink"), - new Path("/alpha/beta/gammaLinkLinkLink"), false); - statuses = wrap.globStatus(new Path("/alpha/*/gammaLinkLinkLink"), - new AcceptAllPathFilter()); + wrap.createFile(USER_DIR + "/alpha/beta/gamma"); + wrap.createSymlink(new Path(USER_DIR + "gamma"), new Path(USER_DIR + + "/alpha/beta/gammaLink"), false); + wrap.createSymlink(new Path(USER_DIR + "gammaLink"), new Path(USER_DIR + + "/alpha/beta/gammaLinkLink"), false); + wrap.createSymlink(new Path(USER_DIR + "gammaLinkLink"), new Path( + USER_DIR + "/alpha/beta/gammaLinkLinkLink"), false); + statuses = wrap.globStatus(new Path(USER_DIR + + "/alpha/*/gammaLinkLinkLink"), new AcceptAllPathFilter()); Assert.assertEquals(1, statuses.length); - Assert.assertEquals("/alpha/beta/gammaLinkLinkLink", + Assert.assertEquals(USER_DIR + "/alpha/beta/gammaLinkLinkLink", statuses[0].getPath().toUri().getPath()); - statuses = wrap.globStatus(new Path("/alpha/beta/*"), - new AcceptAllPathFilter()); - Assert.assertEquals("/alpha/beta/gamma;/alpha/beta/gammaLink;" + - "/alpha/beta/gammaLinkLink;/alpha/beta/gammaLinkLinkLink", + statuses = wrap.globStatus(new Path(USER_DIR + "/alpha/beta/*"), + new AcceptAllPathFilter()); + Assert.assertEquals(USER_DIR + "/alpha/beta/gamma;" + USER_DIR + + "/alpha/beta/gammaLink;" + USER_DIR + "/alpha/beta/gammaLinkLink;" + + USER_DIR + "/alpha/beta/gammaLinkLinkLink", TestPath.mergeStatuses(statuses)); // Let's create two symlinks that point to each other, and glob on them. - wrap.createSymlink(new Path("tweedledee"), - new Path("/tweedledum"), false); - wrap.createSymlink(new Path("tweedledum"), - new Path("/tweedledee"), false); - statuses = wrap.globStatus(new Path("/tweedledee/unobtainium"), - new AcceptAllPathFilter()); + wrap.createSymlink(new Path(USER_DIR + "tweedledee"), new Path(USER_DIR + + "/tweedledum"), false); + wrap.createSymlink(new Path(USER_DIR + "tweedledum"), new Path(USER_DIR + + "/tweedledee"), false); + statuses = wrap.globStatus( + new Path(USER_DIR + "/tweedledee/unobtainium"), + new AcceptAllPathFilter()); Assert.assertNull(statuses); } } @@ -971,34 +973,39 @@ public class TestGlobPaths { /** * Test globbing symlinks with a custom PathFilter */ - private static class TestGlobSymlinksWithCustomPathFilter - implements FSTestWrapperGlobTest { + private static class TestGlobSymlinksWithCustomPathFilter implements + FSTestWrapperGlobTest { public void run(FSTestWrapper wrap, FileSystem fs, FileContext fc) throws Exception { // Test that globbing through a symlink to a symlink to a directory // fully resolves - wrap.mkdir(new Path("/alpha"), FsPermission.getDirDefault(), false); - wrap.createSymlink(new Path("/alpha"), new Path("/alphaLinkz"), false); - wrap.mkdir(new Path("/alpha/beta"), FsPermission.getDirDefault(), false); - wrap.mkdir(new Path("/alpha/betaz"), FsPermission.getDirDefault(), false); - // Test glob through symlink to a symlink to a directory, with a PathFilter - FileStatus statuses[] = - wrap.globStatus(new Path("/alpha/beta"), new AcceptPathsEndingInZ()); + wrap.mkdir(new Path(USER_DIR + "/alpha"), FsPermission.getDirDefault(), + false); + wrap.createSymlink(new Path(USER_DIR + "/alpha"), new Path(USER_DIR + + "/alphaLinkz"), false); + wrap.mkdir(new Path(USER_DIR + "/alpha/beta"), + FsPermission.getDirDefault(), false); + wrap.mkdir(new Path(USER_DIR + "/alpha/betaz"), + FsPermission.getDirDefault(), false); + // Test glob through symlink to a symlink to a directory, with a + // PathFilter + FileStatus statuses[] = wrap.globStatus( + new Path(USER_DIR + "/alpha/beta"), new AcceptPathsEndingInZ()); Assert.assertNull(statuses); - statuses = - wrap.globStatus(new Path("/alphaLinkz/betaz"), new AcceptPathsEndingInZ()); + statuses = wrap.globStatus(new Path(USER_DIR + "/alphaLinkz/betaz"), + new AcceptPathsEndingInZ()); Assert.assertEquals(1, statuses.length); - Assert.assertEquals("/alphaLinkz/betaz", - statuses[0].getPath().toUri().getPath()); - statuses = - wrap.globStatus(new Path("/*/*"), new AcceptPathsEndingInZ()); - Assert.assertEquals("/alpha/betaz;/alphaLinkz/betaz", - TestPath.mergeStatuses(statuses)); - statuses = - wrap.globStatus(new Path("/*/*"), new AcceptAllPathFilter()); - Assert.assertEquals("/alpha/beta;/alpha/betaz;" + - "/alphaLinkz/beta;/alphaLinkz/betaz", - TestPath.mergeStatuses(statuses)); + Assert.assertEquals(USER_DIR + "/alphaLinkz/betaz", statuses[0].getPath() + .toUri().getPath()); + statuses = wrap.globStatus(new Path(USER_DIR + "/*/*"), + new AcceptPathsEndingInZ()); + Assert.assertEquals(USER_DIR + "/alpha/betaz;" + USER_DIR + + "/alphaLinkz/betaz", TestPath.mergeStatuses(statuses)); + statuses = wrap.globStatus(new Path(USER_DIR + "/*/*"), + new AcceptAllPathFilter()); + Assert.assertEquals(USER_DIR + "/alpha/beta;" + USER_DIR + + "/alpha/betaz;" + USER_DIR + "/alphaLinkz/beta;" + USER_DIR + + "/alphaLinkz/betaz", TestPath.mergeStatuses(statuses)); } } @@ -1015,24 +1022,25 @@ public class TestGlobPaths { /** * Test that globStatus fills in the scheme even when it is not provided. */ - private static class TestGlobFillsInScheme - implements FSTestWrapperGlobTest { - public void run(FSTestWrapper wrap, FileSystem fs, FileContext fc) + private static class TestGlobFillsInScheme implements FSTestWrapperGlobTest { + public void run(FSTestWrapper wrap, FileSystem fs, FileContext fc) throws Exception { // Verify that the default scheme is hdfs, when we don't supply one. - wrap.mkdir(new Path("/alpha"), FsPermission.getDirDefault(), false); - wrap.createSymlink(new Path("/alpha"), new Path("/alphaLink"), false); - FileStatus statuses[] = - wrap.globStatus(new Path("/alphaLink"), new AcceptAllPathFilter()); + wrap.mkdir(new Path(USER_DIR + "/alpha"), FsPermission.getDirDefault(), + false); + wrap.createSymlink(new Path(USER_DIR + "/alpha"), new Path(USER_DIR + + "/alphaLink"), false); + FileStatus statuses[] = wrap.globStatus( + new Path(USER_DIR + "/alphaLink"), new AcceptAllPathFilter()); Assert.assertEquals(1, statuses.length); Path path = statuses[0].getPath(); - Assert.assertEquals("/alphaLink", path.toUri().getPath()); + Assert.assertEquals(USER_DIR + "/alphaLink", path.toUri().getPath()); Assert.assertEquals("hdfs", path.toUri().getScheme()); if (fc != null) { // If we're using FileContext, then we can list a file:/// URI. // Since everyone should have the root directory, we list that. - statuses = - wrap.globStatus(new Path("file:///"), new AcceptAllPathFilter()); + statuses = wrap.globStatus(new Path("file:///"), + new AcceptAllPathFilter()); Assert.assertEquals(1, statuses.length); Path filePath = statuses[0].getPath(); Assert.assertEquals("file", filePath.toUri().getScheme()); From 8172215e5601c3bb03fb5c0a0d88768142ea5087 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Thu, 15 Aug 2013 17:19:52 +0000 Subject: [PATCH 17/53] HDFS-5080. BootstrapStandby not working with QJM when the existing NN is active. Contributed by Jing Zhao. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514386 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hdfs/qjournal/client/AsyncLogger.java | 2 +- .../hdfs/qjournal/client/AsyncLoggerSet.java | 6 +- .../qjournal/client/IPCLoggerChannel.java | 5 +- .../qjournal/client/QuorumJournalManager.java | 5 +- .../qjournal/protocol/QJournalProtocol.java | 7 +- ...JournalProtocolServerSideTranslatorPB.java | 3 +- .../QJournalProtocolTranslatorPB.java | 4 +- .../hadoop/hdfs/qjournal/server/Journal.java | 31 +++- .../qjournal/server/JournalNodeRpcServer.java | 5 +- .../hdfs/server/namenode/FSEditLog.java | 16 +- .../server/namenode/FileJournalManager.java | 12 +- .../hdfs/server/namenode/JournalSet.java | 13 +- .../server/namenode/ha/BootstrapStandby.java | 2 +- .../src/main/proto/QJournalProtocol.proto | 1 + .../hdfs/server/namenode/FSImageTestUtil.java | 13 ++ .../namenode/TestFileJournalManager.java | 2 +- .../namenode/ha/TestBootstrapStandby.java | 20 +-- .../ha/TestBootstrapStandbyWithQJM.java | 170 ++++++++++++++++++ 19 files changed, 265 insertions(+), 55 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandbyWithQJM.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index effb3a62a4f..e720915987b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -333,6 +333,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5093. TestGlobPaths should re-use the MiniDFSCluster to avoid failure on Windows. (Chuan Liu via cnauroth) + HDFS-5080. BootstrapStandby not working with QJM when the existing NN is + active. (jing9) + Release 2.1.0-beta - 2013-08-06 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java index dda1de1164b..2501e009931 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java @@ -109,7 +109,7 @@ interface AsyncLogger { * Fetch the list of edit logs available on the remote node. */ public ListenableFuture getEditLogManifest( - long fromTxnId, boolean forReading); + long fromTxnId, boolean forReading, boolean inProgressOk); /** * Prepare recovery. See the HDFS-3077 design document for details. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java index 3beff863efb..74131936bde 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java @@ -262,14 +262,14 @@ class AsyncLoggerSet { return QuorumCall.create(calls); } - public QuorumCall - getEditLogManifest(long fromTxnId, boolean forReading) { + public QuorumCall getEditLogManifest( + long fromTxnId, boolean forReading, boolean inProgressOk) { Map> calls = Maps.newHashMap(); for (AsyncLogger logger : loggers) { ListenableFuture future = - logger.getEditLogManifest(fromTxnId, forReading); + logger.getEditLogManifest(fromTxnId, forReading, inProgressOk); calls.put(logger, future); } return QuorumCall.create(calls); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java index 9115804a966..4603dbd0207 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java @@ -519,12 +519,13 @@ public class IPCLoggerChannel implements AsyncLogger { @Override public ListenableFuture getEditLogManifest( - final long fromTxnId, final boolean forReading) { + final long fromTxnId, final boolean forReading, + final boolean inProgressOk) { return executor.submit(new Callable() { @Override public RemoteEditLogManifest call() throws IOException { GetEditLogManifestResponseProto ret = getProxy().getEditLogManifest( - journalId, fromTxnId, forReading); + journalId, fromTxnId, forReading, inProgressOk); // Update the http port, since we need this to build URLs to any of the // returned logs. httpPort = ret.getHttpPort(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java index 385200176e8..4f1b96b6f42 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java @@ -456,7 +456,7 @@ public class QuorumJournalManager implements JournalManager { long fromTxnId, boolean inProgressOk, boolean forReading) throws IOException { QuorumCall q = - loggers.getEditLogManifest(fromTxnId, forReading); + loggers.getEditLogManifest(fromTxnId, forReading, inProgressOk); Map resps = loggers.waitForWriteQuorum(q, selectInputStreamsTimeoutMs, "selectInputStreams"); @@ -480,8 +480,7 @@ public class QuorumJournalManager implements JournalManager { allStreams.add(elis); } } - JournalSet.chainAndMakeRedundantStreams( - streams, allStreams, fromTxnId, inProgressOk); + JournalSet.chainAndMakeRedundantStreams(streams, allStreams, fromTxnId); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java index 63d7a755170..15ee76c6e9b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java @@ -125,10 +125,13 @@ public interface QJournalProtocol { * @param sinceTxId the first transaction which the client cares about * @param forReading whether or not the caller intends to read from the edit * logs + * @param inProgressOk whether or not to check the in-progress edit log + * segment * @return a list of edit log segments since the given transaction ID. */ - public GetEditLogManifestResponseProto getEditLogManifest( - String jid, long sinceTxId, boolean forReading) throws IOException; + public GetEditLogManifestResponseProto getEditLogManifest(String jid, + long sinceTxId, boolean forReading, boolean inProgressOk) + throws IOException; /** * Begin the recovery process for a given segment. See the HDFS-3077 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java index bdebb380a35..50714040268 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java @@ -203,7 +203,8 @@ public class QJournalProtocolServerSideTranslatorPB implements QJournalProtocolP return impl.getEditLogManifest( request.getJid().getIdentifier(), request.getSinceTxId(), - request.getForReading()); + request.getForReading(), + request.getInProgressOk()); } catch (IOException e) { throw new ServiceException(e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java index 7b36ff5c025..2df7d94bc5a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java @@ -228,13 +228,15 @@ public class QJournalProtocolTranslatorPB implements ProtocolMetaInterface, @Override public GetEditLogManifestResponseProto getEditLogManifest(String jid, - long sinceTxId, boolean forReading) throws IOException { + long sinceTxId, boolean forReading, boolean inProgressOk) + throws IOException { try { return rpcProxy.getEditLogManifest(NULL_CONTROLLER, GetEditLogManifestRequestProto.newBuilder() .setJid(convertJournalId(jid)) .setSinceTxId(sinceTxId) .setForReading(forReading) + .setInProgressOk(inProgressOk) .build()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java index 4e286b539a5..b68516b6c6a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java @@ -25,10 +25,9 @@ import java.io.InputStream; import java.io.OutputStreamWriter; import java.net.URL; import java.security.PrivilegedExceptionAction; +import java.util.Iterator; import java.util.List; import java.util.concurrent.TimeUnit; -import java.util.regex.Matcher; -import java.util.regex.Pattern; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -36,8 +35,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.qjournal.protocol.JournalNotFormattedException; -import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol; import org.apache.hadoop.hdfs.qjournal.protocol.JournalOutOfSyncException; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; @@ -50,6 +49,7 @@ import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; import org.apache.hadoop.hdfs.server.namenode.JournalManager; import org.apache.hadoop.hdfs.server.namenode.TransferFsImage; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.hdfs.util.AtomicFileOutputStream; import org.apache.hadoop.hdfs.util.BestEffortLongFile; @@ -630,14 +630,31 @@ class Journal implements Closeable { * @see QJournalProtocol#getEditLogManifest(String, long) */ public RemoteEditLogManifest getEditLogManifest(long sinceTxId, - boolean forReading) throws IOException { + boolean forReading, boolean inProgressOk) throws IOException { // No need to checkRequest() here - anyone may ask for the list // of segments. checkFormatted(); - RemoteEditLogManifest manifest = new RemoteEditLogManifest( - fjm.getRemoteEditLogs(sinceTxId, forReading)); - return manifest; + // if this is for reading, ignore the in-progress editlog segment + inProgressOk = forReading ? false : inProgressOk; + List logs = fjm.getRemoteEditLogs(sinceTxId, forReading, + inProgressOk); + + if (inProgressOk) { + RemoteEditLog log = null; + for (Iterator iter = logs.iterator(); iter.hasNext();) { + log = iter.next(); + if (log.isInProgress()) { + iter.remove(); + break; + } + } + if (log != null && log.isInProgress()) { + logs.add(new RemoteEditLog(log.getStartTxId(), getHighestWrittenTxId())); + } + } + + return new RemoteEditLogManifest(logs); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java index d00ba2d145f..79bd333ad34 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java @@ -175,10 +175,11 @@ class JournalNodeRpcServer implements QJournalProtocol { @Override public GetEditLogManifestResponseProto getEditLogManifest(String jid, - long sinceTxId, boolean forReading) throws IOException { + long sinceTxId, boolean forReading, boolean inProgressOk) + throws IOException { RemoteEditLogManifest manifest = jn.getOrCreateJournal(jid) - .getEditLogManifest(sinceTxId, forReading); + .getEditLogManifest(sinceTxId, forReading, inProgressOk); return GetEditLogManifestResponseProto.newBuilder() .setManifest(PBHelper.convert(manifest)) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index df90a8fc10f..60ffe7ac172 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -1274,6 +1274,7 @@ public class FSEditLog implements LogsPurgeable { } } + @Override public void selectInputStreams(Collection streams, long fromTxId, boolean inProgressOk, boolean forReading) { journalSet.selectInputStreams(streams, fromTxId, inProgressOk, forReading); @@ -1284,18 +1285,27 @@ public class FSEditLog implements LogsPurgeable { return selectInputStreams(fromTxId, toAtLeastTxId, null, true); } + /** Select a list of input streams to load */ + public Collection selectInputStreams( + long fromTxId, long toAtLeastTxId, MetaRecoveryContext recovery, + boolean inProgressOk) throws IOException { + return selectInputStreams(fromTxId, toAtLeastTxId, recovery, inProgressOk, + true); + } + /** - * Select a list of input streams to load. + * Select a list of input streams. * * @param fromTxId first transaction in the selected streams * @param toAtLeast the selected streams must contain this transaction * @param inProgessOk set to true if in-progress streams are OK + * @param forReading whether or not to use the streams to load the edit log */ public synchronized Collection selectInputStreams( long fromTxId, long toAtLeastTxId, MetaRecoveryContext recovery, - boolean inProgressOk) throws IOException { + boolean inProgressOk, boolean forReading) throws IOException { List streams = new ArrayList(); - selectInputStreams(streams, fromTxId, inProgressOk, true); + selectInputStreams(streams, fromTxId, inProgressOk, forReading); try { checkForGaps(streams, fromTxId, toAtLeastTxId, inProgressOk); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java index f745693ceb9..77aca197ab6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java @@ -169,18 +169,26 @@ public class FileJournalManager implements JournalManager { * @param fromTxId the txnid which to start looking * @param forReading whether or not the caller intends to read from the edit * logs + * @param inProgressOk whether or not to include the in-progress edit log + * segment * @return a list of remote edit logs * @throws IOException if edit logs cannot be listed. */ public List getRemoteEditLogs(long firstTxId, - boolean forReading) throws IOException { + boolean forReading, boolean inProgressOk) throws IOException { + // make sure not reading in-progress edit log, i.e., if forReading is true, + // we should ignore the in-progress edit log. + Preconditions.checkArgument(!(forReading && inProgressOk)); + File currentDir = sd.getCurrentDir(); List allLogFiles = matchEditLogs(currentDir); List ret = Lists.newArrayListWithCapacity( allLogFiles.size()); for (EditLogFile elf : allLogFiles) { - if (elf.hasCorruptHeader() || elf.isInProgress()) continue; + if (elf.hasCorruptHeader() || (!inProgressOk && elf.isInProgress())) { + continue; + } if (elf.getFirstTxId() >= firstTxId) { ret.add(new RemoteEditLog(elf.firstTxId, elf.lastTxId)); } else if (elf.getFirstTxId() < firstTxId && firstTxId <= elf.getLastTxId()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java index 396524dbaf1..1d43cb73527 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.util.ExitUtil.terminate; + import java.io.IOException; import java.util.ArrayList; import java.util.Collection; @@ -31,14 +33,10 @@ import java.util.concurrent.CopyOnWriteArrayList; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; - -import static org.apache.hadoop.util.ExitUtil.terminate; - import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; -import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.ComparisonChain; import com.google.common.collect.ImmutableList; @@ -257,13 +255,12 @@ public class JournalSet implements JournalManager { ". Skipping.", ioe); } } - chainAndMakeRedundantStreams(streams, allStreams, fromTxId, inProgressOk); + chainAndMakeRedundantStreams(streams, allStreams, fromTxId); } public static void chainAndMakeRedundantStreams( Collection outStreams, - PriorityQueue allStreams, - long fromTxId, boolean inProgressOk) { + PriorityQueue allStreams, long fromTxId) { // We want to group together all the streams that start on the same start // transaction ID. To do this, we maintain an accumulator (acc) of all // the streams we've seen at a given start transaction ID. When we see a @@ -598,7 +595,7 @@ public class JournalSet implements JournalManager { if (j.getManager() instanceof FileJournalManager) { FileJournalManager fjm = (FileJournalManager)j.getManager(); try { - allLogs.addAll(fjm.getRemoteEditLogs(fromTxId, forReading)); + allLogs.addAll(fjm.getRemoteEditLogs(fromTxId, forReading, false)); } catch (Throwable t) { LOG.warn("Cannot list edit logs in " + fjm, t); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java index e9549ce8f18..41325257e35 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java @@ -226,7 +226,7 @@ public class BootstrapStandby implements Tool, Configurable { try { Collection streams = image.getEditLog().selectInputStreams( - firstTxIdInLogs, curTxIdOnOtherNode, null, true); + firstTxIdInLogs, curTxIdOnOtherNode, null, true, false); for (EditLogInputStream stream : streams) { IOUtils.closeStream(stream); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto index 16c0277b9be..a9e8017e96f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto @@ -177,6 +177,7 @@ message GetEditLogManifestRequestProto { required uint64 sinceTxId = 2; // Transaction ID // Whether or not the client will be reading from the returned streams. optional bool forReading = 3 [default = true]; + optional bool inProgressOk = 4 [default = false]; } message GetEditLogManifestResponseProto { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java index 6fc8d6e4784..7c2c7e2f98c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java @@ -62,6 +62,7 @@ import org.mockito.Mockito; import com.google.common.base.Joiner; import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Sets; @@ -555,4 +556,16 @@ public abstract class FSImageTestUtil { public static long getNSQuota(FSNamesystem ns) { return ns.dir.rootDir.getNsQuota(); } + + public static void assertNNFilesMatch(MiniDFSCluster cluster) throws Exception { + List curDirs = Lists.newArrayList(); + curDirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0)); + curDirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 1)); + + // Ignore seen_txid file, since the newly bootstrapped standby + // will have a higher seen_txid than the one it bootstrapped from. + Set ignoredFiles = ImmutableSet.of("seen_txid"); + FSImageTestUtil.assertParallelFilesAreIdentical(curDirs, + ignoredFiles); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java index e3fd99a4b71..44d1058806f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java @@ -479,6 +479,6 @@ public class TestFileJournalManager { private static String getLogsAsString( FileJournalManager fjm, long firstTxId) throws IOException { - return Joiner.on(",").join(fjm.getRemoteEditLogs(firstTxId, true)); + return Joiner.on(",").join(fjm.getRemoteEditLogs(firstTxId, true, false)); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java index d38fdd7982b..678e03866d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java @@ -24,8 +24,6 @@ import static org.junit.Assert.fail; import java.io.File; import java.io.IOException; import java.net.URI; -import java.util.List; -import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -45,8 +43,6 @@ import org.junit.Before; import org.junit.Test; import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Lists; public class TestBootstrapStandby { private static final Log LOG = LogFactory.getLog(TestBootstrapStandby.class); @@ -107,7 +103,7 @@ public class TestBootstrapStandby { // Should have copied over the namespace from the active FSImageTestUtil.assertNNHasCheckpoints(cluster, 1, ImmutableList.of(0)); - assertNNFilesMatch(); + FSImageTestUtil.assertNNFilesMatch(cluster); // We should now be able to start the standby successfully. cluster.restartNameNode(1); @@ -138,7 +134,7 @@ public class TestBootstrapStandby { // Should have copied over the namespace from the active FSImageTestUtil.assertNNHasCheckpoints(cluster, 1, ImmutableList.of((int)expectedCheckpointTxId)); - assertNNFilesMatch(); + FSImageTestUtil.assertNNFilesMatch(cluster); // We should now be able to start the standby successfully. cluster.restartNameNode(1); @@ -208,18 +204,6 @@ public class TestBootstrapStandby { cluster.getConfiguration(1)); assertEquals(0, rc); } - - private void assertNNFilesMatch() throws Exception { - List curDirs = Lists.newArrayList(); - curDirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0)); - curDirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 1)); - - // Ignore seen_txid file, since the newly bootstrapped standby - // will have a higher seen_txid than the one it bootstrapped from. - Set ignoredFiles = ImmutableSet.of("seen_txid"); - FSImageTestUtil.assertParallelFilesAreIdentical(curDirs, - ignoredFiles); - } private void removeStandbyNameDirs() { for (URI u : cluster.getNameDirs(1)) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandbyWithQJM.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandbyWithQJM.java new file mode 100644 index 00000000000..e618c9a5ed0 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandbyWithQJM.java @@ -0,0 +1,170 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode.ha; + +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY; +import static org.junit.Assert.assertEquals; + +import java.io.IOException; +import java.net.URI; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; +import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.google.common.collect.ImmutableList; + +/** + * Test BootstrapStandby when QJM is used for shared edits. + */ +public class TestBootstrapStandbyWithQJM { + + private static final String NAMESERVICE = "ns1"; + private static final String NN1 = "nn1"; + private static final String NN2 = "nn2"; + private static final int NUM_JN = 3; + private static final int NN1_IPC_PORT = 10000; + private static final int NN1_INFO_PORT = 10001; + private static final int NN2_IPC_PORT = 10002; + private static final int NN2_INFO_PORT = 10003; + + private MiniDFSCluster cluster; + private MiniJournalCluster jCluster; + + @Before + public void setup() throws Exception { + // start 3 journal nodes + jCluster = new MiniJournalCluster.Builder(new Configuration()).format(true) + .numJournalNodes(NUM_JN).build(); + URI journalURI = jCluster.getQuorumJournalURI(NAMESERVICE); + + // start cluster with 2 NameNodes + MiniDFSNNTopology topology = new MiniDFSNNTopology() + .addNameservice(new MiniDFSNNTopology.NSConf(NAMESERVICE).addNN( + new MiniDFSNNTopology.NNConf("nn1").setIpcPort(NN1_IPC_PORT) + .setHttpPort(NN1_INFO_PORT)).addNN( + new MiniDFSNNTopology.NNConf("nn2").setIpcPort(NN2_IPC_PORT) + .setHttpPort(NN2_INFO_PORT))); + + Configuration conf = initHAConf(journalURI); + cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology) + .numDataNodes(1).manageNameDfsSharedDirs(false).build(); + cluster.waitActive(); + + Configuration confNN0 = new Configuration(conf); + cluster.shutdown(); + // initialize the journal nodes + confNN0.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1"); + NameNode.initializeSharedEdits(confNN0, true); + + // restart the cluster + cluster = new MiniDFSCluster.Builder(conf).format(false) + .nnTopology(topology).numDataNodes(1).manageNameDfsSharedDirs(false) + .build(); + cluster.waitActive(); + + // make nn0 active + cluster.transitionToActive(0); + // do sth to generate in-progress edit log data + DistributedFileSystem dfs = (DistributedFileSystem) + HATestUtil.configureFailoverFs(cluster, conf); + dfs.mkdirs(new Path("/test2")); + dfs.close(); + } + + @After + public void cleanup() throws IOException { + if (cluster != null) { + cluster.shutdown(); + } + if (jCluster != null) { + jCluster.shutdown(); + } + } + + private Configuration initHAConf(URI journalURI) { + Configuration conf = new Configuration(); + conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, + journalURI.toString()); + + String address1 = "127.0.0.1:" + NN1_IPC_PORT; + String address2 = "127.0.0.1:" + NN2_IPC_PORT; + conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, + NAMESERVICE, NN1), address1); + conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, + NAMESERVICE, NN2), address2); + conf.set(DFSConfigKeys.DFS_NAMESERVICES, NAMESERVICE); + conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, NAMESERVICE), + NN1 + "," + NN2); + conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + NAMESERVICE, + ConfiguredFailoverProxyProvider.class.getName()); + conf.set("fs.defaultFS", "hdfs://" + NAMESERVICE); + + return conf; + } + + /** BootstrapStandby when the existing NN is standby */ + @Test + public void testBootstrapStandbyWithStandbyNN() throws Exception { + // make the first NN in standby state + cluster.transitionToStandby(0); + Configuration confNN1 = cluster.getConfiguration(1); + + // shut down nn1 + cluster.shutdownNameNode(1); + + int rc = BootstrapStandby.run(new String[] { "-force" }, confNN1); + assertEquals(0, rc); + + // Should have copied over the namespace from the standby + FSImageTestUtil.assertNNHasCheckpoints(cluster, 1, + ImmutableList.of(0)); + FSImageTestUtil.assertNNFilesMatch(cluster); + } + + /** BootstrapStandby when the existing NN is active */ + @Test + public void testBootstrapStandbyWithActiveNN() throws Exception { + // make the first NN in active state + cluster.transitionToActive(0); + Configuration confNN1 = cluster.getConfiguration(1); + + // shut down nn1 + cluster.shutdownNameNode(1); + + int rc = BootstrapStandby.run(new String[] { "-force" }, confNN1); + assertEquals(0, rc); + + // Should have copied over the namespace from the standby + FSImageTestUtil.assertNNHasCheckpoints(cluster, 1, + ImmutableList.of(0)); + FSImageTestUtil.assertNNFilesMatch(cluster); + } +} From 02b19e0738d9df1e4d38280c5575e1d3ba49f8cb Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Thu, 15 Aug 2013 18:22:52 +0000 Subject: [PATCH 18/53] HDFS-5076. Add MXBean methods to query NN's transaction information and JournalNode's journal status. Contributed by Jing Zhao. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514422 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hdfs/qjournal/server/JournalNode.java | 52 ++++++++- .../qjournal/server/JournalNodeMXBean.java | 36 ++++++ .../hdfs/server/namenode/FSNamesystem.java | 10 ++ .../hdfs/server/namenode/NameNodeMXBean.java | 6 + .../server/TestJournalNodeMXBean.java | 107 ++++++++++++++++++ .../server/namenode/TestNameNodeMXBean.java | 5 + 7 files changed, 218 insertions(+), 1 deletion(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeMXBean.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index e720915987b..44308674028 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -288,6 +288,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-4763 Add script changes/utility for starting NFS gateway (brandonli) + HDFS-5076 Add MXBean methods to query NN's transaction information and + JournalNode's journal status. (jing9) + IMPROVEMENTS HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java index 8291b5932eb..4ed4244ac16 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java @@ -18,8 +18,10 @@ package org.apache.hadoop.hdfs.qjournal.server; import java.io.File; +import java.io.FileFilter; import java.io.IOException; import java.net.InetSocketAddress; +import java.util.HashMap; import java.util.Map; import org.apache.commons.logging.Log; @@ -34,11 +36,13 @@ import org.apache.hadoop.hdfs.server.common.StorageErrorReporter; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.source.JvmMetrics; +import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.DiskChecker; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.mortbay.util.ajax.JSON; import com.google.common.base.Preconditions; import com.google.common.collect.Maps; @@ -51,7 +55,7 @@ import com.google.common.collect.Maps; * in the quorum protocol. */ @InterfaceAudience.Private -public class JournalNode implements Tool, Configurable { +public class JournalNode implements Tool, Configurable, JournalNodeMXBean { public static final Log LOG = LogFactory.getLog(JournalNode.class); private Configuration conf; private JournalNodeRpcServer rpcServer; @@ -128,6 +132,8 @@ public class JournalNode implements Tool, Configurable { SecurityUtil.login(conf, DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY, DFSConfigKeys.DFS_JOURNALNODE_USER_NAME_KEY, socAddr.getHostName()); + registerJNMXBean(); + httpServer = new JournalNodeHttpServer(conf, this); httpServer.start(); @@ -208,6 +214,50 @@ public class JournalNode implements Tool, Configurable { return new File(new File(dir), jid); } + @Override // JournalNodeMXBean + public String getJournalsStatus() { + // jid:{Formatted:True/False} + Map> status = + new HashMap>(); + synchronized (this) { + for (Map.Entry entry : journalsById.entrySet()) { + Map jMap = new HashMap(); + jMap.put("Formatted", Boolean.toString(entry.getValue().isFormatted())); + status.put(entry.getKey(), jMap); + } + } + + // It is possible that some journals have been formatted before, while the + // corresponding journals are not in journalsById yet (because of restarting + // JN, e.g.). For simplicity, let's just assume a journal is formatted if + // there is a directory for it. We can also call analyzeStorage method for + // these directories if necessary. + // Also note that we do not need to check localDir here since + // validateAndCreateJournalDir has been called before we register the + // MXBean. + File[] journalDirs = localDir.listFiles(new FileFilter() { + @Override + public boolean accept(File file) { + return file.isDirectory(); + } + }); + for (File journalDir : journalDirs) { + String jid = journalDir.getName(); + if (!status.containsKey(jid)) { + Map jMap = new HashMap(); + jMap.put("Formatted", "true"); + status.put(jid, jMap); + } + } + return JSON.toString(status); + } + + /** + * Register JournalNodeMXBean + */ + private void registerJNMXBean() { + MBeans.register("JournalNode", "JournalNodeInfo", this); + } private class ErrorReporter implements StorageErrorReporter { @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeMXBean.java new file mode 100644 index 00000000000..4e8d9da50f9 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeMXBean.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.qjournal.server; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * This is the JMX management interface for JournalNode information + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface JournalNodeMXBean { + + /** + * Get status information (e.g., whether formatted) of JournalNode's journals. + * + * @return A string presenting status for each journal + */ + public String getJournalsStatus(); +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 989f688a0fd..2f230d73509 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -6364,6 +6364,16 @@ public class FSNamesystem implements Namesystem, FSClusterStats, return JSON.toString(jasList); } + @Override // NameNodeMxBean + public String getJournalTransactionInfo() { + Map txnIdMap = new HashMap(); + txnIdMap.put("LastAppliedOrWrittenTxId", + Long.toString(this.getFSImage().getLastAppliedOrWrittenTxId())); + txnIdMap.put("MostRecentCheckpointTxId", + Long.toString(this.getFSImage().getMostRecentCheckpointTxId())); + return JSON.toString(txnIdMap); + } + @Override // NameNodeMXBean public String getNNStarted() { return getStartTime().toString(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java index 50315a4ae67..173d5aea4c7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java @@ -188,6 +188,12 @@ public interface NameNodeMXBean { * @return the name journal status information, as a JSON string. */ public String getNameJournalStatus(); + + /** + * Get information about the transaction ID, including the last applied + * transaction ID and the most recent checkpoint's transaction ID + */ + public String getJournalTransactionInfo(); /** * Gets the NN start time diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java new file mode 100644 index 00000000000..347184870f6 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java @@ -0,0 +1,107 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.qjournal.server; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.util.HashMap; +import java.util.Map; + +import javax.management.MBeanServer; +import javax.management.ObjectName; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; +import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.mortbay.util.ajax.JSON; + +/** + * Test {@link JournalNodeMXBean} + */ +public class TestJournalNodeMXBean { + + private static final String NAMESERVICE = "ns1"; + private static final int NUM_JN = 1; + + private MiniJournalCluster jCluster; + private JournalNode jn; + + @Before + public void setup() throws IOException { + // start 1 journal node + jCluster = new MiniJournalCluster.Builder(new Configuration()).format(true) + .numJournalNodes(NUM_JN).build(); + jn = jCluster.getJournalNode(0); + } + + @After + public void cleanup() throws IOException { + if (jCluster != null) { + jCluster.shutdown(); + } + } + + @Test + public void testJournalNodeMXBean() throws Exception { + // we have not formatted the journals yet, and the journal status in jmx + // should be empty since journal objects are created lazily + MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); + ObjectName mxbeanName = new ObjectName( + "Hadoop:service=JournalNode,name=JournalNodeInfo"); + + // getJournalsStatus + String journalStatus = (String) mbs.getAttribute(mxbeanName, + "JournalsStatus"); + assertEquals(jn.getJournalsStatus(), journalStatus); + assertFalse(journalStatus.contains(NAMESERVICE)); + + // format the journal ns1 + final NamespaceInfo FAKE_NSINFO = new NamespaceInfo(12345, "mycluster", + "my-bp", 0L); + jn.getOrCreateJournal(NAMESERVICE).format(FAKE_NSINFO); + + // check again after format + // getJournalsStatus + journalStatus = (String) mbs.getAttribute(mxbeanName, "JournalsStatus"); + assertEquals(jn.getJournalsStatus(), journalStatus); + Map> jMap = new HashMap>(); + Map infoMap = new HashMap(); + infoMap.put("Formatted", "true"); + jMap.put(NAMESERVICE, infoMap); + assertEquals(JSON.toString(jMap), journalStatus); + + // restart journal node without formatting + jCluster = new MiniJournalCluster.Builder(new Configuration()).format(false) + .numJournalNodes(NUM_JN).build(); + jn = jCluster.getJournalNode(0); + // re-check + journalStatus = (String) mbs.getAttribute(mxbeanName, "JournalsStatus"); + assertEquals(jn.getJournalsStatus(), journalStatus); + jMap = new HashMap>(); + infoMap = new HashMap(); + infoMap.put("Formatted", "true"); + jMap.put(NAMESERVICE, infoMap); + assertEquals(JSON.toString(jMap), journalStatus); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java index 227d2cef402..8d188d7b651 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java @@ -120,6 +120,11 @@ public class TestNameNodeMXBean { String nameJournalStatus = (String) (mbs.getAttribute(mxbeanName, "NameJournalStatus")); assertEquals("Bad value for NameJournalStatus", fsn.getNameJournalStatus(), nameJournalStatus); + // get attribute JournalTransactionInfo + String journalTxnInfo = (String) mbs.getAttribute(mxbeanName, + "JournalTransactionInfo"); + assertEquals("Bad value for NameTxnIds", fsn.getJournalTransactionInfo(), + journalTxnInfo); // get attribute "NNStarted" String nnStarted = (String) mbs.getAttribute(mxbeanName, "NNStarted"); assertEquals("Bad value for NNStarted", fsn.getNNStarted(), nnStarted); From d62bd71d650fbe143b222235083be80efb1b63ef Mon Sep 17 00:00:00 2001 From: Siddharth Seth Date: Thu, 15 Aug 2013 18:44:52 +0000 Subject: [PATCH 19/53] Update CHANGES.txt to move YARN-1045 and MAPREDUCE-5352 to the correct version. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514432 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 6 +++--- hadoop-yarn-project/CHANGES.txt | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 066264c9ea5..933e9fcf9c2 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -186,9 +186,6 @@ Release 2.1.1-beta - UNRELEASED OPTIMIZATIONS - MAPREDUCE-5352. Optimize node local splits generated by - CombineFileInputFormat. (sseth) - MAPREDUCE-5446. TestJobHistoryEvents and TestJobHistoryParsing have race conditions (jlowe via kihwal) @@ -395,6 +392,9 @@ Release 2.1.0-beta - 2013-08-06 MAPREDUCE-5268. Improve history server startup performance (Karthik Kambatla via jlowe) + MAPREDUCE-5352. Optimize node local splits generated by + CombineFileInputFormat. (sseth) + BUG FIXES MAPREDUCE-4671. AM does not tell the RM about container requests which are diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 51b03ef53cc..b12b10d21ff 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -43,8 +43,6 @@ Release 2.1.1-beta - UNRELEASED YARN-589. Expose a REST API for monitoring the fair scheduler (Sandy Ryza). - YARN-1045. Improve toString implementation for PBImpls. (Jian He via sseth) - OPTIMIZATIONS BUG FIXES @@ -524,6 +522,8 @@ Release 2.1.0-beta - 2013-08-06 YARN-1046. Disable mem monitoring by default in MiniYARNCluster. (Karthik Kambatla via Sandy Ryza) + YARN-1045. Improve toString implementation for PBImpls. (Jian He via sseth) + OPTIMIZATIONS YARN-512. Log aggregation root directory check is more expensive than it From 0e47ebb32f62b9312b44ccbec97e302674cdee80 Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Thu, 15 Aug 2013 19:22:09 +0000 Subject: [PATCH 20/53] HADOOP-9868. Server must not advertise kerberos realm. Contributed by Daryn Sharp. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514448 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++ .../src/main/java/org/apache/hadoop/security/SaslRpcServer.java | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 3a5736a1f53..43a89d897dd 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -394,6 +394,8 @@ Release 2.1.1-beta - UNRELEASED HADOOP-9381. Document dfs cp -f option. (Keegan Witt, suresh via suresh) + HADOOP-9868. Server must not advertise kerberos realm. (daryn via kihwal) + Release 2.1.0-beta - 2013-08-06 INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java index fffedc1a11e..9408028ffa2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java @@ -104,7 +104,7 @@ public class SaslRpcServer { if (LOG.isDebugEnabled()) LOG.debug("Kerberos principal name is " + fullName); // don't use KerberosName because we don't want auth_to_local - String[] parts = fullName.split("[/@]", 2); + String[] parts = fullName.split("[/@]", 3); protocol = parts[0]; // should verify service host is present here rather than in create() // but lazy tests are using a UGI that isn't a SPN... From a37d2fc89d79b73049fa7678b1b5165aa4de423a Mon Sep 17 00:00:00 2001 From: Arun Murthy Date: Thu, 15 Aug 2013 20:29:43 +0000 Subject: [PATCH 21/53] Preparing for hadoop-2.1.0-beta rc2. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514469 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 +- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +- hadoop-mapreduce-project/CHANGES.txt | 2 +- hadoop-yarn-project/CHANGES.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 43a89d897dd..9518bf278af 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -396,7 +396,7 @@ Release 2.1.1-beta - UNRELEASED HADOOP-9868. Server must not advertise kerberos realm. (daryn via kihwal) -Release 2.1.0-beta - 2013-08-06 +Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 44308674028..fbd7e61eb66 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -339,7 +339,7 @@ Release 2.1.1-beta - UNRELEASED HDFS-5080. BootstrapStandby not working with QJM when the existing NN is active. (jing9) -Release 2.1.0-beta - 2013-08-06 +Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 933e9fcf9c2..613a38a52f5 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -220,7 +220,7 @@ Release 2.1.1-beta - UNRELEASED MAPREDUCE-5454. TestDFSIO fails intermittently on JDK7 (Karthik Kambatla via Sandy Ryza) -Release 2.1.0-beta - 2013-08-06 +Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index b12b10d21ff..9d10e2a6efe 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -70,7 +70,7 @@ Release 2.1.1-beta - UNRELEASED YARN-337. RM handles killed application tracking URL poorly (jlowe) -Release 2.1.0-beta - 2013-08-06 +Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES From 2fc7e14e392f188958b9867a5d2dd563dfcc378a Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Thu, 15 Aug 2013 20:43:46 +0000 Subject: [PATCH 22/53] HDFS-5099. Namenode#copyEditLogSegmentsToSharedDir should close EditLogInputStreams upon finishing. Contributed by Chuan Liu. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514481 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hadoop/hdfs/server/namenode/NameNode.java | 64 +++++++++++-------- 2 files changed, 39 insertions(+), 28 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index fbd7e61eb66..02beab5c609 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -339,6 +339,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5080. BootstrapStandby not working with QJM when the existing NN is active. (jing9) + HDFS-5099. Namenode#copyEditLogSegmentsToSharedDir should close + EditLogInputStreams upon finishing. (Chuan Liu via cnauroth) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index a933585523a..b8a51390c11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -956,41 +956,49 @@ public class NameNode implements NameNodeStatusMXBean { FSEditLog sourceEditLog = fsns.getFSImage().editLog; long fromTxId = fsns.getFSImage().getMostRecentCheckpointTxId(); - Collection streams = sourceEditLog.selectInputStreams( - fromTxId+1, 0); - - // Set the nextTxid to the CheckpointTxId+1 - newSharedEditLog.setNextTxId(fromTxId + 1); - // Copy all edits after last CheckpointTxId to shared edits dir - for (EditLogInputStream stream : streams) { - LOG.debug("Beginning to copy stream " + stream + " to shared edits"); - FSEditLogOp op; - boolean segmentOpen = false; - while ((op = stream.readOp()) != null) { - if (LOG.isTraceEnabled()) { - LOG.trace("copying op: " + op); - } - if (!segmentOpen) { - newSharedEditLog.startLogSegment(op.txid, false); - segmentOpen = true; - } - - newSharedEditLog.logEdit(op); + Collection streams = null; + try { + streams = sourceEditLog.selectInputStreams(fromTxId + 1, 0); - if (op.opCode == FSEditLogOpCodes.OP_END_LOG_SEGMENT) { + // Set the nextTxid to the CheckpointTxId+1 + newSharedEditLog.setNextTxId(fromTxId + 1); + + // Copy all edits after last CheckpointTxId to shared edits dir + for (EditLogInputStream stream : streams) { + LOG.debug("Beginning to copy stream " + stream + " to shared edits"); + FSEditLogOp op; + boolean segmentOpen = false; + while ((op = stream.readOp()) != null) { + if (LOG.isTraceEnabled()) { + LOG.trace("copying op: " + op); + } + if (!segmentOpen) { + newSharedEditLog.startLogSegment(op.txid, false); + segmentOpen = true; + } + + newSharedEditLog.logEdit(op); + + if (op.opCode == FSEditLogOpCodes.OP_END_LOG_SEGMENT) { + newSharedEditLog.logSync(); + newSharedEditLog.endCurrentLogSegment(false); + LOG.debug("ending log segment because of END_LOG_SEGMENT op in " + + stream); + segmentOpen = false; + } + } + + if (segmentOpen) { + LOG.debug("ending log segment because of end of stream in " + stream); newSharedEditLog.logSync(); newSharedEditLog.endCurrentLogSegment(false); - LOG.debug("ending log segment because of END_LOG_SEGMENT op in " + stream); segmentOpen = false; } } - - if (segmentOpen) { - LOG.debug("ending log segment because of end of stream in " + stream); - newSharedEditLog.logSync(); - newSharedEditLog.endCurrentLogSegment(false); - segmentOpen = false; + } finally { + if (streams != null) { + FSEditLog.closeAllStreams(streams); } } } From 10ec8a248ecbe37e52f81b13b939174eb43eda1f Mon Sep 17 00:00:00 2001 From: Konstantin Shvachko Date: Thu, 15 Aug 2013 21:21:10 +0000 Subject: [PATCH 23/53] HDFS-2994. If lease soft limit is recovered successfully the append can fail. Contributed by Tao Luo. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514500 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../hdfs/server/namenode/FSNamesystem.java | 9 +++- .../apache/hadoop/hdfs/TestFileAppend.java | 44 +++++++++++++++++++ 3 files changed, 54 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 02beab5c609..081c080868f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -342,6 +342,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5099. Namenode#copyEditLogSegmentsToSharedDir should close EditLogInputStreams upon finishing. (Chuan Liu via cnauroth) + HDFS-2994. If lease soft limit is recovered successfully + the append can fail. (Tao Luo via shv) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 2f230d73509..f15fe4fe388 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2153,10 +2153,15 @@ public class FSNamesystem implements Namesystem, FSClusterStats, throw new FileNotFoundException("failed to append to non-existent file " + src + " on client " + clientMachine); } - final INodeFile myFile = INodeFile.valueOf(inode, src, true); + INodeFile myFile = INodeFile.valueOf(inode, src, true); // Opening an existing file for write - may need to recover lease. recoverLeaseInternal(myFile, src, holder, clientMachine, false); - + + // recoverLeaseInternal may create a new InodeFile via + // finalizeINodeFileUnderConstruction so we need to refresh + // the referenced file. + myFile = INodeFile.valueOf(dir.getINode(src), src, true); + final DatanodeDescriptor clientNode = blockManager.getDatanodeManager().getDatanodeByHost(clientMachine); return prepareFileForWrite(src, myFile, holder, clientMachine, clientNode, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java index 54ff9036b91..e4015944692 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import java.io.File; @@ -337,4 +338,47 @@ public class TestFileAppend{ cluster.shutdown(); } } + + /** Tests appending after soft-limit expires. */ + @Test + public void testAppendAfterSoftLimit() + throws IOException, InterruptedException { + Configuration conf = new HdfsConfiguration(); + conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); + conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true); + //Set small soft-limit for lease + final long softLimit = 1L; + final long hardLimit = 9999999L; + + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) + .build(); + cluster.setLeasePeriod(softLimit, hardLimit); + cluster.waitActive(); + + FileSystem fs = cluster.getFileSystem(); + FileSystem fs2 = new DistributedFileSystem(); + fs2.initialize(fs.getUri(), conf); + + final Path testPath = new Path("/testAppendAfterSoftLimit"); + final byte[] fileContents = AppendTestUtil.initBuffer(32); + + // create a new file without closing + FSDataOutputStream out = fs.create(testPath); + out.write(fileContents); + + //Wait for > soft-limit + Thread.sleep(250); + + try { + FSDataOutputStream appendStream2 = fs2.append(testPath); + appendStream2.write(fileContents); + appendStream2.close(); + assertEquals(fileContents.length, fs.getFileStatus(testPath).getLen()); + } finally { + fs.close(); + fs2.close(); + cluster.shutdown(); + } + } + } From 8df7e7deecad2b8131d67a1916b1ec4c9f7bc633 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Thu, 15 Aug 2013 23:05:41 +0000 Subject: [PATCH 24/53] HADOOP-9865. FileContext#globStatus has a regression with respect to relative path. (Contributed by Chaun Lin) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514531 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 + .../java/org/apache/hadoop/fs/Globber.java | 8 +-- .../org/apache/hadoop/fs/TestGlobPaths.java | 55 ++++++++++++++----- 3 files changed, 47 insertions(+), 19 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 9518bf278af..e18d4584299 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -337,6 +337,9 @@ Release 2.3.0 - UNRELEASED HADOOP-9875. TestDoAsEffectiveUser can fail on JDK 7. (Aaron T. Myers via Colin Patrick McCabe) + HADOOP-9865. FileContext#globStatus has a regression with respect to + relative path. (Chuan Lin via Colin Patrick McCabe) + Release 2.1.1-beta - UNRELEASED diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java index ad28478aeb8..378311a71a2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java @@ -99,24 +99,24 @@ class Globber { } private String schemeFromPath(Path path) throws IOException { - String scheme = pathPattern.toUri().getScheme(); + String scheme = path.toUri().getScheme(); if (scheme == null) { if (fs != null) { scheme = fs.getUri().getScheme(); } else { - scheme = fc.getFSofPath(path).getUri().getScheme(); + scheme = fc.getDefaultFileSystem().getUri().getScheme(); } } return scheme; } private String authorityFromPath(Path path) throws IOException { - String authority = pathPattern.toUri().getAuthority(); + String authority = path.toUri().getAuthority(); if (authority == null) { if (fs != null) { authority = fs.getUri().getAuthority(); } else { - authority = fc.getFSofPath(path).getUri().getAuthority(); + authority = fc.getDefaultFileSystem().getUri().getAuthority(); } } return authority ; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java index b712be10f0f..820b00bb0b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java @@ -622,21 +622,7 @@ public class TestGlobPaths { cleanupDFS(); } } - - @Test - public void pTestRelativePath() throws IOException { - try { - String [] files = new String[] {"a", "abc", "abc.p", "bacd"}; - Path[] matchedPath = prepareTesting("a*", files); - assertEquals(matchedPath.length, 3); - assertEquals(matchedPath[0], new Path(USER_DIR, path[0])); - assertEquals(matchedPath[1], new Path(USER_DIR, path[1])); - assertEquals(matchedPath[2], new Path(USER_DIR, path[2])); - } finally { - cleanupDFS(); - } - } - + /* Test {xx,yy} */ @Test public void pTestCurlyBracket() throws IOException { @@ -1061,4 +1047,43 @@ public class TestGlobPaths { public void testGlobFillsInSchemeOnFC() throws Exception { testOnFileContext(new TestGlobFillsInScheme()); } + + /** + * Test that globStatus works with relative paths. + **/ + private static class TestRelativePath implements FSTestWrapperGlobTest { + public void run(FSTestWrapper wrap, FileSystem fs, FileContext fc) + throws Exception { + String[] files = new String[] { "a", "abc", "abc.p", "bacd" }; + + Path[] path = new Path[files.length]; + for(int i=0; i < files.length; i++) { + path[i] = wrap.makeQualified(new Path(files[i])); + wrap.mkdir(path[i], FsPermission.getDirDefault(), true); + } + + Path patternPath = new Path("a*"); + Path[] globResults = FileUtil.stat2Paths(wrap.globStatus(patternPath, + new AcceptAllPathFilter()), + patternPath); + + for(int i=0; i < globResults.length; i++) { + globResults[i] = wrap.makeQualified(globResults[i]); + } + + assertEquals(globResults.length, 3); + assertEquals(USER_DIR + "/a;" + USER_DIR + "/abc;" + USER_DIR + "/abc.p", + TestPath.mergeStatuses(globResults)); + } + } + + @Test + public void testRelativePathOnFS() throws Exception { + testOnFileSystem(new TestRelativePath()); + } + + @Test + public void testRelativePathOnFC() throws Exception { + testOnFileContext(new TestRelativePath()); + } } From 99064ec9058df09e554d379950b0e40bf900f9a2 Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Fri, 16 Aug 2013 04:17:08 +0000 Subject: [PATCH 25/53] HDFS-5100. TestNamenodeRetryCache fails on Windows due to incorrect cleanup. Contributed by Chuan Liu. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514573 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 081c080868f..3ea6bbb88b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -345,6 +345,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-2994. If lease soft limit is recovered successfully the append can fail. (Tao Luo via shv) + HDFS-5100. TestNamenodeRetryCache fails on Windows due to incorrect cleanup. + (Chuan Liu via cnauroth) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java index 03ee9fd5d1a..54dda2fe8ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java @@ -96,7 +96,7 @@ public class TestNamenodeRetryCache { * @throws AccessControlException */ @After public void cleanup() throws IOException { - namesystem.delete("/", true); + cluster.shutdown(); } public static void incrementCallId() { From d9de6a928df055647fa3c6138f3b9142a4f6c0b0 Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Fri, 16 Aug 2013 04:38:03 +0000 Subject: [PATCH 26/53] HDFS-5103. TestDirectoryScanner fails on Windows. Contributed by Chuan Liu. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514576 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../hadoop/hdfs/server/datanode/TestDirectoryScanner.java | 6 ++---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 3ea6bbb88b4..44959ef4ffb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -348,6 +348,8 @@ Release 2.1.1-beta - UNRELEASED HDFS-5100. TestNamenodeRetryCache fails on Windows due to incorrect cleanup. (Chuan Liu via cnauroth) + HDFS-5103. TestDirectoryScanner fails on Windows. (Chuan Liu via cnauroth) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java index 579ffe82f13..c5decf2eaa6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java @@ -394,12 +394,12 @@ public class TestDirectoryScanner { @Override public String getBasePath() { - return "/base"; + return (new File("/base")).getAbsolutePath(); } @Override public String getPath(String bpid) throws IOException { - return "/base/current/" + bpid; + return (new File("/base/current/" + bpid)).getAbsolutePath(); } @Override @@ -416,8 +416,6 @@ public class TestDirectoryScanner { void testScanInfoObject(long blockId, File blockFile, File metaFile) throws Exception { - assertEquals("/base/current/" + BPID_1 + "/finalized", - TEST_VOLUME.getFinalizedDir(BPID_1).getAbsolutePath()); DirectoryScanner.ScanInfo scanInfo = new DirectoryScanner.ScanInfo(blockId, blockFile, metaFile, TEST_VOLUME); assertEquals(blockId, scanInfo.getBlockId()); From 1836aceff9cc729665fa42f77111814e0f6d307b Mon Sep 17 00:00:00 2001 From: Konstantin Boudnik Date: Fri, 16 Aug 2013 05:36:53 +0000 Subject: [PATCH 27/53] Moving HDFS-5004 into 2.3.0 release section git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514583 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 44959ef4ffb..35b5fff516b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -120,9 +120,6 @@ Trunk (Unreleased) HDFS-4904. Remove JournalService. (Arpit Agarwal via cnauroth) - HDFS-5004. Add additional JMX bean for NameNode status data - (Trevor Lorimer via cos) - OPTIMIZATIONS BUG FIXES @@ -259,6 +256,9 @@ Release 2.3.0 - UNRELEASED HDFS-4817. Make HDFS advisory caching configurable on a per-file basis. (Colin Patrick McCabe) + HDFS-5004. Add additional JMX bean for NameNode status data + (Trevor Lorimer via cos) + HDFS-5068. Convert NNThroughputBenchmark to a Tool to allow generic options. (shv) From cae55de2cd1f9ea068f3410c8bdea14cf55738cb Mon Sep 17 00:00:00 2001 From: Sanford Ryza Date: Fri, 16 Aug 2013 08:11:04 +0000 Subject: [PATCH 28/53] MAPREDUCE-5462. In map-side sort, swap entire meta entries instead of indexes for better cache performance. (Sandy Ryza) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514608 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 ++ .../org/apache/hadoop/mapred/MapTask.java | 51 ++++++++----------- 2 files changed, 23 insertions(+), 31 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 613a38a52f5..a4123719666 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -189,6 +189,9 @@ Release 2.1.1-beta - UNRELEASED MAPREDUCE-5446. TestJobHistoryEvents and TestJobHistoryParsing have race conditions (jlowe via kihwal) + MAPREDUCE-5462. In map-side sort, swap entire meta entries instead of + indexes for better cache performance. (Sandy Ryza) + BUG FIXES MAPREDUCE-5385. Fixed a bug with JobContext getCacheFiles API. (Omkar Vinit diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java index e9d3ed78863..99f9eac81f7 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java @@ -884,10 +884,10 @@ public class MapTask extends Task { byte[] kvbuffer; // main output buffer private final byte[] b0 = new byte[0]; - private static final int INDEX = 0; // index offset in acct - private static final int VALSTART = 1; // val offset in acct - private static final int KEYSTART = 2; // key offset in acct - private static final int PARTITION = 3; // partition offset in acct + private static final int VALSTART = 0; // val offset in acct + private static final int KEYSTART = 1; // key offset in acct + private static final int PARTITION = 2; // partition offset in acct + private static final int VALLEN = 3; // length of value private static final int NMETA = 4; // num meta ints private static final int METASIZE = NMETA * 4; // size in bytes @@ -1151,10 +1151,10 @@ public class MapTask extends Task { distanceTo(keystart, valend, bufvoid)); // write accounting info - kvmeta.put(kvindex + INDEX, kvindex); kvmeta.put(kvindex + PARTITION, partition); kvmeta.put(kvindex + KEYSTART, keystart); kvmeta.put(kvindex + VALSTART, valstart); + kvmeta.put(kvindex + VALLEN, distanceTo(valstart, valend)); // advance kvindex kvindex = (kvindex - NMETA + kvmeta.capacity()) % kvmeta.capacity(); } catch (MapBufferTooSmallException e) { @@ -1224,17 +1224,11 @@ public class MapTask extends Task { } /** - * For the given meta position, return the dereferenced position in the - * integer array. Each meta block contains several integers describing - * record data in its serialized form, but the INDEX is not necessarily - * related to the proximate metadata. The index value at the referenced int - * position is the start offset of the associated metadata block. So the - * metadata INDEX at metapos may point to the metadata described by the - * metadata block at metapos + k, which contains information about that - * serialized record. + * For the given meta position, return the offset into the int-sized + * kvmeta buffer. */ int offsetFor(int metapos) { - return kvmeta.get(metapos * NMETA + INDEX); + return metapos * NMETA; } /** @@ -1260,16 +1254,17 @@ public class MapTask extends Task { kvmeta.get(kvj + VALSTART) - kvmeta.get(kvj + KEYSTART)); } + final byte META_BUFFER_TMP[] = new byte[METASIZE]; /** - * Swap logical indices st i, j MOD offset capacity. + * Swap metadata for items i, j * @see IndexedSortable#swap */ public void swap(final int mi, final int mj) { - final int kvi = (mi % maxRec) * NMETA + INDEX; - final int kvj = (mj % maxRec) * NMETA + INDEX; - int tmp = kvmeta.get(kvi); - kvmeta.put(kvi, kvmeta.get(kvj)); - kvmeta.put(kvj, tmp); + int iOff = (mi % maxRec) * METASIZE; + int jOff = (mj % maxRec) * METASIZE; + System.arraycopy(kvbuffer, iOff, META_BUFFER_TMP, 0, METASIZE); + System.arraycopy(kvbuffer, jOff, kvbuffer, iOff, METASIZE); + System.arraycopy(META_BUFFER_TMP, 0, kvbuffer, jOff, METASIZE); } /** @@ -1601,9 +1596,9 @@ public class MapTask extends Task { while (spindex < mend && kvmeta.get(offsetFor(spindex % maxRec) + PARTITION) == i) { final int kvoff = offsetFor(spindex % maxRec); - key.reset(kvbuffer, kvmeta.get(kvoff + KEYSTART), - (kvmeta.get(kvoff + VALSTART) - - kvmeta.get(kvoff + KEYSTART))); + int keystart = kvmeta.get(kvoff + KEYSTART); + int valstart = kvmeta.get(kvoff + VALSTART); + key.reset(kvbuffer, keystart, valstart - keystart); getVBytesForOffset(kvoff, value); writer.append(key, value); ++spindex; @@ -1729,14 +1724,8 @@ public class MapTask extends Task { private void getVBytesForOffset(int kvoff, InMemValBytes vbytes) { // get the keystart for the next serialized value to be the end // of this value. If this is the last value in the buffer, use bufend - final int nextindex = kvoff == kvend - ? bufend - : kvmeta.get( - (kvoff - NMETA + kvmeta.capacity() + KEYSTART) % kvmeta.capacity()); - // calculate the length of the value - int vallen = (nextindex >= kvmeta.get(kvoff + VALSTART)) - ? nextindex - kvmeta.get(kvoff + VALSTART) - : (bufvoid - kvmeta.get(kvoff + VALSTART)) + nextindex; + final int vallen = kvmeta.get(kvoff + VALLEN); + assert vallen >= 0; vbytes.reset(kvbuffer, kvmeta.get(kvoff + VALSTART), vallen); } From 45694cce8e2abed07511c385bce9c7c387faf8f5 Mon Sep 17 00:00:00 2001 From: Jason Darrell Lowe Date: Fri, 16 Aug 2013 16:13:55 +0000 Subject: [PATCH 29/53] Update CHANGES.txt to reflect merge of MR-1981 to branch-2.1-beta git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514768 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index a4123719666..7125f28e65c 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -162,9 +162,6 @@ Release 2.3.0 - UNRELEASED OPTIMIZATIONS - MAPREDUCE-1981. Improve getSplits performance by using listLocatedStatus - (Hairong Kuang and Jason Lowe via jlowe) - BUG FIXES MAPREDUCE-5316. job -list-attempt-ids command does not handle illegal @@ -192,6 +189,9 @@ Release 2.1.1-beta - UNRELEASED MAPREDUCE-5462. In map-side sort, swap entire meta entries instead of indexes for better cache performance. (Sandy Ryza) + MAPREDUCE-1981. Improve getSplits performance by using listLocatedStatus + (Hairong Kuang and Jason Lowe via jlowe) + BUG FIXES MAPREDUCE-5385. Fixed a bug with JobContext getCacheFiles API. (Omkar Vinit From 8d21926c2613062149d07d238022f993af4c9c03 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Fri, 16 Aug 2013 17:14:34 +0000 Subject: [PATCH 30/53] HDFS-5102. Snapshot names should not be allowed to contain slash characters. Contributed by Jing Zhao. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514797 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../hdfs/server/namenode/FSDirectory.java | 4 +++ .../namenode/snapshot/TestSnapshot.java | 33 +++++++++++++++++ .../namenode/snapshot/TestSnapshotRename.java | 36 +++++++++++++++++++ 4 files changed, 76 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 35b5fff516b..00e206d00a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -350,6 +350,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5103. TestDirectoryScanner fails on Windows. (Chuan Liu via cnauroth) + HDFS-5102. Snapshot names should not be allowed to contain slash characters. + (jing9) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 9523a50a47d..532a2bfb218 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -2093,6 +2093,10 @@ public class FSDirectory implements Closeable { /** Verify if the snapshot name is legal. */ void verifySnapshotName(String snapshotName, String path) throws PathComponentTooLongException { + if (snapshotName.contains(Path.SEPARATOR)) { + throw new HadoopIllegalArgumentException( + "Snapshot name cannot contain \"" + Path.SEPARATOR + "\""); + } final byte[] bytes = DFSUtil.string2Bytes(snapshotName); verifyINodeName(bytes); verifyMaxComponentLength(bytes, path, 0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java index c84af965b79..27228bd0482 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; @@ -54,6 +55,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.TestDi import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.TestDirectoryTree.Node; import org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer; import org.apache.hadoop.hdfs.tools.offlineImageViewer.XmlImageVisitor; +import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.apache.log4j.Level; @@ -341,6 +343,37 @@ public class TestSnapshot { assertEquals(oldStatus.getAccessTime(), snapshotStatus.getAccessTime()); } + /** + * Test creating a snapshot with illegal name + */ + @Test + public void testCreateSnapshotWithIllegalName() throws Exception { + final Path dir = new Path("/dir"); + hdfs.mkdirs(dir); + + final String name1 = HdfsConstants.DOT_SNAPSHOT_DIR; + try { + hdfs.createSnapshot(dir, name1); + fail("Exception expected when an illegal name is given"); + } catch (RemoteException e) { + String errorMsg = "\"" + HdfsConstants.DOT_SNAPSHOT_DIR + + "\" is a reserved name."; + GenericTestUtils.assertExceptionContains(errorMsg, e); + } + + String errorMsg = "Snapshot name cannot contain \"" + Path.SEPARATOR + "\""; + final String[] badNames = new String[] { "foo" + Path.SEPARATOR, + Path.SEPARATOR + "foo", Path.SEPARATOR, "foo" + Path.SEPARATOR + "bar" }; + for (String badName : badNames) { + try { + hdfs.createSnapshot(dir, badName); + fail("Exception expected when an illegal name is given"); + } catch (RemoteException e) { + GenericTestUtils.assertExceptionContains(errorMsg, e); + } + } + } + /** * Creating snapshots for a directory that is not snapshottable must fail. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java index ab1752a166d..386563bea1f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.util.List; @@ -29,11 +30,14 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff; import org.apache.hadoop.hdfs.util.ReadOnlyList; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Before; import org.junit.Rule; @@ -190,4 +194,36 @@ public class TestSnapshotRename { exception.expectMessage(error); hdfs.renameSnapshot(sub1, "s1", "s2"); } + + /** + * Test renaming a snapshot with illegal name + */ + @Test + public void testRenameWithIllegalName() throws Exception { + DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed); + // Create snapshots for sub1 + SnapshotTestHelper.createSnapshot(hdfs, sub1, "s1"); + + final String name1 = HdfsConstants.DOT_SNAPSHOT_DIR; + try { + hdfs.renameSnapshot(sub1, "s1", name1); + fail("Exception expected when an illegal name is given for rename"); + } catch (RemoteException e) { + String errorMsg = "\"" + HdfsConstants.DOT_SNAPSHOT_DIR + + "\" is a reserved name."; + GenericTestUtils.assertExceptionContains(errorMsg, e); + } + + String errorMsg = "Snapshot name cannot contain \"" + Path.SEPARATOR + "\""; + final String[] badNames = new String[] { "foo" + Path.SEPARATOR, + Path.SEPARATOR + "foo", Path.SEPARATOR, "foo" + Path.SEPARATOR + "bar" }; + for (String badName : badNames) { + try { + hdfs.renameSnapshot(sub1, "s1", badName); + fail("Exception expected when an illegal name is given"); + } catch (RemoteException e) { + GenericTestUtils.assertExceptionContains(errorMsg, e); + } + } + } } From 218ea7380c69e1a2cd73482fba523fee33644288 Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Fri, 16 Aug 2013 19:04:43 +0000 Subject: [PATCH 31/53] HDFS-5105. TestFsck fails on Windows. (Contributed by Chuan Liu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514852 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../hadoop/hdfs/server/namenode/TestFsck.java | 42 ++++++++++++------- 2 files changed, 29 insertions(+), 15 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 00e206d00a1..3df060e5fd6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -353,6 +353,8 @@ Release 2.1.1-beta - UNRELEASED HDFS-5102. Snapshot names should not be allowed to contain slash characters. (jing9) + HDFS-5105. TestFsck fails on Windows. (Chuan Liu via arp) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index 931351d4ac7..754e56966d3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -153,8 +153,8 @@ public class TestFsck { String outStr = runFsck(conf, 0, true, "/"); verifyAuditLogs(); assertEquals(aTime, fs.getFileStatus(file).getAccessTime()); - assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS)); System.out.println(outStr); + assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS)); if (fs != null) {try{fs.close();} catch(Exception e){}} cluster.shutdown(); @@ -194,18 +194,30 @@ public class TestFsck { // Turn off the logs Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger(); logger.setLevel(Level.OFF); - - // Audit log should contain one getfileinfo and one fsck - BufferedReader reader = new BufferedReader(new FileReader(auditLogFile)); - String line = reader.readLine(); - assertNotNull(line); - assertTrue("Expected getfileinfo event not found in audit log", - getfileinfoPattern.matcher(line).matches()); - line = reader.readLine(); - assertNotNull(line); - assertTrue("Expected fsck event not found in audit log", - fsckPattern.matcher(line).matches()); - assertNull("Unexpected event in audit log", reader.readLine()); + + BufferedReader reader = null; + try { + // Audit log should contain one getfileinfo and one fsck + reader = new BufferedReader(new FileReader(auditLogFile)); + String line = reader.readLine(); + assertNotNull(line); + assertTrue("Expected getfileinfo event not found in audit log", + getfileinfoPattern.matcher(line).matches()); + line = reader.readLine(); + assertNotNull(line); + assertTrue("Expected fsck event not found in audit log", fsckPattern + .matcher(line).matches()); + assertNull("Unexpected event in audit log", reader.readLine()); + } finally { + // Close the reader and remove the appender to release the audit log file + // handle after verifying the content of the file. + if (reader != null) { + reader.close(); + } + if (logger != null) { + logger.removeAllAppenders(); + } + } } @Test @@ -963,9 +975,9 @@ public class TestFsck { String outStr = runFsck(conf, 0, true, "/"); verifyAuditLogs(); assertEquals(aTime, fc.getFileStatus(symlink).getAccessTime()); - assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS)); - assertTrue(outStr.contains("Total symlinks:\t\t1\n")); System.out.println(outStr); + assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS)); + assertTrue(outStr.contains("Total symlinks:\t\t1")); util.cleanup(fs, fileName); } finally { if (fs != null) {try{fs.close();} catch(Exception e){}} From 9ba95136e2e06e1a4ca94a87ba89eae8050a9522 Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Fri, 16 Aug 2013 22:28:41 +0000 Subject: [PATCH 32/53] HDFS-5106. TestDatanodeBlockScanner fails on Windows due to incorrect path format. Contributed by Chuan Liu. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514911 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java | 3 ++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 3df060e5fd6..f397a980a50 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -355,6 +355,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5105. TestFsck fails on Windows. (Chuan Liu via arp) + HDFS-5106. TestDatanodeBlockScanner fails on Windows due to incorrect path + format. (Chuan Liu via cnauroth) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java index 30cb035ff90..eb28a14e5f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java @@ -440,7 +440,8 @@ public class TestDatanodeBlockScanner { } } - private static final String BASE_PATH = "/data/current/finalized"; + private static final String BASE_PATH = (new File("/data/current/finalized")) + .getAbsolutePath(); @Test public void testReplicaInfoParsing() throws Exception { From 52f0259502de42e433588c299339bf5cd4ba1f8e Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Fri, 16 Aug 2013 22:35:19 +0000 Subject: [PATCH 33/53] HADOOP-9880. SASL changes from HADOOP-9421 breaks Secure HA NN. Contributed by Daryn Sharp. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514913 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 +++ .../java/org/apache/hadoop/ipc/Server.java | 10 ++++++++- .../apache/hadoop/security/SaslRpcServer.java | 1 - .../DelegationTokenSecretManager.java | 22 +++++++++++++++++++ 4 files changed, 34 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index e18d4584299..92d0df40f5d 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -399,6 +399,9 @@ Release 2.1.1-beta - UNRELEASED HADOOP-9868. Server must not advertise kerberos realm. (daryn via kihwal) + HADOOP-9880. SASL changes from HADOOP-9421 breaks Secure HA NN. (daryn via + jing9) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index 1533b3d00c6..de43646a204 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -1311,7 +1311,15 @@ public abstract class Server { Throwable cause = e; while (cause != null) { if (cause instanceof InvalidToken) { - sendToClient = (InvalidToken) cause; + // FIXME: hadoop method signatures are restricting the SASL + // callbacks to only returning InvalidToken, but some services + // need to throw other exceptions (ex. NN + StandyException), + // so for now we'll tunnel the real exceptions via an + // InvalidToken's cause which normally is not set + if (cause.getCause() != null) { + cause = cause.getCause(); + } + sendToClient = (IOException) cause; break; } cause = cause.getCause(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java index 9408028ffa2..2390dfcd658 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java @@ -127,7 +127,6 @@ public class SaslRpcServer { final CallbackHandler callback; switch (authMethod) { case TOKEN: { - secretManager.checkAvailableForRead(); callback = new SaslDigestCallbackHandler(secretManager, connection); break; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java index 98fb76216ce..17e2ccc61ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java @@ -81,6 +81,28 @@ public class DelegationTokenSecretManager return new DelegationTokenIdentifier(); } + @Override + public synchronized byte[] retrievePassword( + DelegationTokenIdentifier identifier) throws InvalidToken { + try { + // this check introduces inconsistency in the authentication to a + // HA standby NN. non-token auths are allowed into the namespace which + // decides whether to throw a StandbyException. tokens are a bit + // different in that a standby may be behind and thus not yet know + // of all tokens issued by the active NN. the following check does + // not allow ANY token auth, however it should allow known tokens in + checkAvailableForRead(); + } catch (StandbyException se) { + // FIXME: this is a hack to get around changing method signatures by + // tunneling a non-InvalidToken exception as the cause which the + // RPC server will unwrap before returning to the client + InvalidToken wrappedStandby = new InvalidToken("StandbyException"); + wrappedStandby.initCause(se); + throw wrappedStandby; + } + return super.retrievePassword(identifier); + } + @Override //SecretManager public void checkAvailableForRead() throws StandbyException { namesystem.checkOperation(OperationCategory.READ); From 1ad3fe46332586cea73c47ba06342f91359db561 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Fri, 16 Aug 2013 23:00:54 +0000 Subject: [PATCH 34/53] YARN-107. Fixed ResourceManager and clients to better handle forceKillApplication on non-running and finished applications. Contributed by Xuan Gong. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514918 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 4 +++ .../yarn/client/cli/ApplicationCLI.java | 12 +++++-- .../hadoop/yarn/client/cli/TestYarnCLI.java | 35 +++++++++++++++++++ .../resourcemanager/ClientRMService.java | 5 ++- .../resourcemanager/TestClientRMService.java | 22 ++++++++++++ 5 files changed, 73 insertions(+), 5 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 9d10e2a6efe..e276d19489e 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -70,6 +70,10 @@ Release 2.1.1-beta - UNRELEASED YARN-337. RM handles killed application tracking URL poorly (jlowe) + YARN-107. Fixed ResourceManager and clients to better handle + forceKillApplication on non-running and finished applications. (Xuan Gong + via vinodkv) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java index fa22b29ddb9..16e55a6a72d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java @@ -35,6 +35,7 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.util.ConverterUtils; @@ -164,8 +165,15 @@ public class ApplicationCLI extends YarnCLI { private void killApplication(String applicationId) throws YarnException, IOException { ApplicationId appId = ConverterUtils.toApplicationId(applicationId); - sysout.println("Killing application " + applicationId); - client.killApplication(appId); + ApplicationReport appReport = client.getApplicationReport(appId); + if (appReport.getYarnApplicationState() == YarnApplicationState.FINISHED + || appReport.getYarnApplicationState() == YarnApplicationState.KILLED + || appReport.getYarnApplicationState() == YarnApplicationState.FAILED) { + sysout.println("Application " + applicationId + " has already finished "); + } else { + sysout.println("Killing application " + applicationId); + client.killApplication(appId); + } } /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java index 4bc405d67c4..8be8b68e491 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java @@ -26,6 +26,7 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static org.mockito.Mockito.doThrow; import java.io.ByteArrayOutputStream; import java.io.PrintStream; @@ -320,10 +321,44 @@ public class TestYarnCLI { public void testKillApplication() throws Exception { ApplicationCLI cli = createAndGetAppCLI(); ApplicationId applicationId = ApplicationId.newInstance(1234, 5); + + ApplicationReport newApplicationReport2 = ApplicationReport.newInstance( + applicationId, ApplicationAttemptId.newInstance(applicationId, 1), + "user", "queue", "appname", "host", 124, null, + YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0, + FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null); + when(client.getApplicationReport(any(ApplicationId.class))).thenReturn( + newApplicationReport2); int result = cli.run(new String[] { "-kill", applicationId.toString() }); assertEquals(0, result); + verify(client, times(0)).killApplication(any(ApplicationId.class)); + verify(sysOut).println( + "Application " + applicationId + " has already finished "); + + ApplicationReport newApplicationReport = ApplicationReport.newInstance( + applicationId, ApplicationAttemptId.newInstance(applicationId, 1), + "user", "queue", "appname", "host", 124, null, + YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, + FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null); + when(client.getApplicationReport(any(ApplicationId.class))).thenReturn( + newApplicationReport); + result = cli.run(new String[] { "-kill", applicationId.toString() }); + assertEquals(0, result); verify(client).killApplication(any(ApplicationId.class)); verify(sysOut).println("Killing application application_1234_0005"); + + doThrow(new ApplicationNotFoundException("Application with id '" + + applicationId + "' doesn't exist in RM.")).when(client) + .getApplicationReport(applicationId); + cli = createAndGetAppCLI(); + try { + cli.run(new String[] { "-kill", applicationId.toString() }); + Assert.fail(); + } catch (Exception ex) { + Assert.assertTrue(ex instanceof ApplicationNotFoundException); + Assert.assertEquals("Application with id '" + applicationId + + "' doesn't exist in RM.", ex.getMessage()); + } } @Test diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java index 1f7a8477d6e..97f0ef8e0b1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java @@ -353,9 +353,8 @@ public class ClientRMService extends AbstractService implements RMAuditLogger.logFailure(callerUGI.getUserName(), AuditConstants.KILL_APP_REQUEST, "UNKNOWN", "ClientRMService", "Trying to kill an absent application", applicationId); - throw RPCUtil - .getRemoteException("Trying to kill an absent application " - + applicationId); + throw new ApplicationNotFoundException("Trying to kill an absent" + + " application " + applicationId); } if (!checkAccess(callerUGI, application.getUser(), diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java index 4817f45e0eb..ff3c3aadda1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java @@ -51,6 +51,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse; +import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; @@ -197,6 +198,27 @@ public class TestClientRMService { } } + @Test + public void testForceKillApplication() throws YarnException { + RMContext rmContext = mock(RMContext.class); + when(rmContext.getRMApps()).thenReturn( + new ConcurrentHashMap()); + ClientRMService rmService = new ClientRMService(rmContext, null, null, + null, null); + ApplicationId applicationId = + BuilderUtils.newApplicationId(System.currentTimeMillis(), 0); + KillApplicationRequest request = + KillApplicationRequest.newInstance(applicationId); + try { + rmService.forceKillApplication(request); + Assert.fail(); + } catch (ApplicationNotFoundException ex) { + Assert.assertEquals(ex.getMessage(), + "Trying to kill an absent " + + "application " + request.getApplicationId()); + } + } + @Test public void testGetQueueInfo() throws Exception { YarnScheduler yarnScheduler = mock(YarnScheduler.class); From 214d4377fc151297c85b09273dfe8fdddae40d3d Mon Sep 17 00:00:00 2001 From: Brandon Li Date: Sat, 17 Aug 2013 21:16:50 +0000 Subject: [PATCH 35/53] HDFS-5104 Support dotdot name in NFS LOOKUP operation. Contributed by Brandon Li git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515042 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../java/org/apache/hadoop/hdfs/DFSUtil.java | 14 ++- .../hdfs/server/namenode/FSDirectory.java | 13 +++ .../hdfs/server/namenode/TestINodeFile.java | 87 +++++++++++++------ 4 files changed, 87 insertions(+), 29 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index f397a980a50..38da72197ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -291,6 +291,8 @@ Release 2.1.1-beta - UNRELEASED HDFS-5076 Add MXBean methods to query NN's transaction information and JournalNode's journal status. (jing9) + HDFS-5104 Support dotdot name in NFS LOOKUP operation (brandonli) + IMPROVEMENTS HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index fd1fe04e0f8..e3b61abc0bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -70,6 +70,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB; +import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; @@ -204,13 +205,20 @@ public class DFSUtil { String[] components = StringUtils.split(src, '/'); for (int i = 0; i < components.length; i++) { String element = components[i]; - if (element.equals("..") || - element.equals(".") || + if (element.equals(".") || (element.indexOf(":") >= 0) || (element.indexOf("/") >= 0)) { return false; } - + // ".." is allowed in path starting with /.reserved/.inodes + if (element.equals("..")) { + if (components.length > 4 + && components[1].equals(FSDirectory.DOT_RESERVED_STRING) + && components[2].equals(FSDirectory.DOT_INODES_STRING)) { + continue; + } + return false; + } // The string may start or end with a /, but not have // "//" in the middle. if (element.isEmpty() && i != components.length - 1 && diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 532a2bfb218..51642a8b23a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -2730,6 +2730,19 @@ public class FSDirectory implements Closeable { throw new FileNotFoundException( "File for given inode path does not exist: " + src); } + + // Handle single ".." for NFS lookup support. + if ((pathComponents.length > 4) + && DFSUtil.bytes2String(pathComponents[4]).equals("..")) { + INode parent = inode.getParent(); + if (parent == null || parent.getId() == INodeId.ROOT_INODE_ID) { + // inode is root, or its parent is root. + return Path.SEPARATOR; + } else { + return parent.getFullPathName(); + } + } + StringBuilder path = id == INodeId.ROOT_INODE_ID ? new StringBuilder() : new StringBuilder(inode.getFullPathName()); for (int i = 4; i < pathComponents.length; i++) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index 4d1a9a3f9b7..aa12a231fc3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -45,6 +45,7 @@ import org.apache.hadoop.fs.PathIsNotDirectoryException; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSUtil; @@ -901,31 +902,65 @@ public class TestINodeFile { @Test public void testInodeReplacement() throws Exception { final Configuration conf = new Configuration(); - final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf). - numDataNodes(1).build(); - cluster.waitActive(); - final DistributedFileSystem hdfs = cluster.getFileSystem(); - final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory(); - - final Path dir = new Path("/dir"); - hdfs.mkdirs(dir); - INode dirNode = fsdir.getINode(dir.toString()); - INode dirNodeFromNode = fsdir.getInode(dirNode.getId()); - assertSame(dirNode, dirNodeFromNode); - - // set quota to dir, which leads to node replacement - hdfs.setQuota(dir, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1); - dirNode = fsdir.getINode(dir.toString()); - assertTrue(dirNode instanceof INodeDirectoryWithQuota); - // the inode in inodeMap should also be replaced - dirNodeFromNode = fsdir.getInode(dirNode.getId()); - assertSame(dirNode, dirNodeFromNode); - - hdfs.setQuota(dir, -1, -1); - dirNode = fsdir.getINode(dir.toString()); - assertTrue(dirNode instanceof INodeDirectory); - // the inode in inodeMap should also be replaced - dirNodeFromNode = fsdir.getInode(dirNode.getId()); - assertSame(dirNode, dirNodeFromNode); + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + final DistributedFileSystem hdfs = cluster.getFileSystem(); + final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory(); + + final Path dir = new Path("/dir"); + hdfs.mkdirs(dir); + INode dirNode = fsdir.getINode(dir.toString()); + INode dirNodeFromNode = fsdir.getInode(dirNode.getId()); + assertSame(dirNode, dirNodeFromNode); + + // set quota to dir, which leads to node replacement + hdfs.setQuota(dir, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1); + dirNode = fsdir.getINode(dir.toString()); + assertTrue(dirNode instanceof INodeDirectoryWithQuota); + // the inode in inodeMap should also be replaced + dirNodeFromNode = fsdir.getInode(dirNode.getId()); + assertSame(dirNode, dirNodeFromNode); + + hdfs.setQuota(dir, -1, -1); + dirNode = fsdir.getINode(dir.toString()); + assertTrue(dirNode instanceof INodeDirectory); + // the inode in inodeMap should also be replaced + dirNodeFromNode = fsdir.getInode(dirNode.getId()); + assertSame(dirNode, dirNodeFromNode); + } finally { + cluster.shutdown(); + } + } + + @Test + public void testDotdotInodePath() throws Exception { + final Configuration conf = new Configuration(); + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + final DistributedFileSystem hdfs = cluster.getFileSystem(); + final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory(); + + final Path dir = new Path("/dir"); + hdfs.mkdirs(dir); + long dirId = fsdir.getINode(dir.toString()).getId(); + long parentId = fsdir.getINode("/").getId(); + String testPath = "/.reserved/.inodes/" + dirId + "/.."; + + DFSClient client = new DFSClient(NameNode.getAddress(conf), conf); + HdfsFileStatus status = client.getFileInfo(testPath); + assertTrue(parentId == status.getFileId()); + + // Test root's parent is still root + testPath = "/.reserved/.inodes/" + parentId + "/.."; + status = client.getFileInfo(testPath); + assertTrue(parentId == status.getFileId()); + + } finally { + cluster.shutdown(); + } } } From b7fb6fd6c45b0f8f78f6534fc169317f5702b72a Mon Sep 17 00:00:00 2001 From: Brandon Li Date: Sun, 18 Aug 2013 16:49:27 +0000 Subject: [PATCH 36/53] HDFS-5107 Fix array copy error in Readdir and Readdirplus responses. Contributed by Brandon Li git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515166 13f79535-47bb-0310-9956-ffa450edef68 --- .../nfs/nfs3/response/READDIR3Response.java | 20 +++++++++---------- .../nfs3/response/READDIRPLUS3Response.java | 18 ++++++++--------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ 3 files changed, 22 insertions(+), 19 deletions(-) diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIR3Response.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIR3Response.java index fa54c5459fb..9f8d6760b5b 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIR3Response.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIR3Response.java @@ -17,12 +17,14 @@ */ package org.apache.hadoop.nfs.nfs3.response; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; import org.apache.hadoop.nfs.nfs3.Nfs3Status; import org.apache.hadoop.oncrpc.XDR; -import com.google.common.collect.ObjectArrays; - /** * READDIR3 Response */ @@ -56,12 +58,11 @@ public class READDIR3Response extends NFS3Response { } public static class DirList3 { - final Entry3 entries[]; + final List entries; final boolean eof; public DirList3(Entry3[] entries, boolean eof) { - this.entries = ObjectArrays.newArray(entries, entries.length); - System.arraycopy(this.entries, 0, entries, 0, entries.length); + this.entries = Collections.unmodifiableList(Arrays.asList(entries)); this.eof = eof; } } @@ -102,12 +103,11 @@ public class READDIR3Response extends NFS3Response { if (getStatus() == Nfs3Status.NFS3_OK) { xdr.writeLongAsHyper(cookieVerf); - Entry3[] f = dirList.entries; - for (int i = 0; i < f.length; i++) { + for (Entry3 e : dirList.entries) { xdr.writeBoolean(true); // Value follows - xdr.writeLongAsHyper(f[i].getFileId()); - xdr.writeString(f[i].getName()); - xdr.writeLongAsHyper(f[i].getCookie()); + xdr.writeLongAsHyper(e.getFileId()); + xdr.writeString(e.getName()); + xdr.writeLongAsHyper(e.getCookie()); } xdr.writeBoolean(false); diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIRPLUS3Response.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIRPLUS3Response.java index 77794cf48a6..6b41cb27f7a 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIRPLUS3Response.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIRPLUS3Response.java @@ -17,13 +17,15 @@ */ package org.apache.hadoop.nfs.nfs3.response; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; import org.apache.hadoop.nfs.nfs3.Nfs3Status; import org.apache.hadoop.oncrpc.XDR; -import com.google.common.collect.ObjectArrays; - /** * READDIRPLUS3 Response */ @@ -60,16 +62,15 @@ public class READDIRPLUS3Response extends NFS3Response { } public static class DirListPlus3 { - EntryPlus3 entries[]; + List entries; boolean eof; public DirListPlus3(EntryPlus3[] entries, boolean eof) { - this.entries = ObjectArrays.newArray(entries, entries.length); - System.arraycopy(this.entries, 0, entries, 0, entries.length); + this.entries = Collections.unmodifiableList(Arrays.asList(entries)); this.eof = eof; } - EntryPlus3[] getEntries() { + List getEntries() { return entries; } @@ -101,10 +102,9 @@ public class READDIRPLUS3Response extends NFS3Response { if (getStatus() == Nfs3Status.NFS3_OK) { out.writeLongAsHyper(cookieVerf); - EntryPlus3[] f = dirListPlus.getEntries(); - for (int i = 0; i < f.length; i++) { + for (EntryPlus3 f : dirListPlus.getEntries()) { out.writeBoolean(true); // next - f[i].seralize(out); + f.seralize(out); } out.writeBoolean(false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 38da72197ab..242b2b256a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -293,6 +293,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5104 Support dotdot name in NFS LOOKUP operation (brandonli) + HDFS-5107 Fix array copy error in Readdir and Readdirplus responses + (brandonli) + IMPROVEMENTS HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may From 79a184505d5ed71125a92f9c236fcf93b13f954e Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Mon, 19 Aug 2013 01:53:34 +0000 Subject: [PATCH 37/53] YARN-643. Fixed ResourceManager to remove all tokens consistently on app finish. Contributed by Xuan Gong. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515256 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 ++ .../rmapp/attempt/RMAppAttemptImpl.java | 30 ++++++++-------- .../attempt/TestRMAppAttemptTransitions.java | 35 ++++++++++++++++--- .../security/TestAMRMTokens.java | 19 ++++++++++ 4 files changed, 68 insertions(+), 19 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index e276d19489e..dc178d7143a 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -74,6 +74,9 @@ Release 2.1.1-beta - UNRELEASED forceKillApplication on non-running and finished applications. (Xuan Gong via vinodkv) + YARN-643. Fixed ResourceManager to remove all tokens consistently on app + finish. (Xuan Gong via vinodkv) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java index e287c203728..1543110db03 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java @@ -761,6 +761,8 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { rejectedEvent.getApplicationAttemptId().getApplicationId(), message) ); + + appAttempt.removeTokens(appAttempt); } } @@ -847,7 +849,6 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { @Override public void transition(RMAppAttemptImpl appAttempt, RMAppAttemptEvent event) { - ApplicationAttemptId appAttemptId = appAttempt.getAppAttemptId(); // Tell the AMS. Unregister from the ApplicationMasterService @@ -894,9 +895,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { appAttempt.eventHandler.handle(new AppRemovedSchedulerEvent(appAttemptId, finalAttemptState)); - // Remove the AppAttempt from the AMRMTokenSecretManager - appAttempt.rmContext.getAMRMTokenSecretManager() - .applicationMasterFinished(appAttemptId); + appAttempt.removeTokens(appAttempt); } } @@ -1015,7 +1014,6 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { " exitCode: " + status.getExitStatus() + " due to: " + status.getDiagnostics() + "." + "Failing this attempt."); - // Tell the app, scheduler super.transition(appAttempt, finishEvent); } @@ -1042,12 +1040,6 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { appAttempt.rmContext.getAMFinishingMonitor().unregister( appAttempt.getAppAttemptId()); - // Unregister from the ClientToAMTokenSecretManager - if (UserGroupInformation.isSecurityEnabled()) { - appAttempt.rmContext.getClientToAMTokenSecretManager() - .unRegisterApplication(appAttempt.getAppAttemptId()); - } - if(!appAttempt.submissionContext.getUnmanagedAM()) { // Tell the launcher to cleanup. appAttempt.eventHandler.handle(new AMLauncherEvent( @@ -1116,10 +1108,6 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { appAttempt.rmContext.getAMLivelinessMonitor().unregister(appAttemptId); - // Remove the AppAttempt from the AMRMTokenSecretManager - appAttempt.rmContext.getAMRMTokenSecretManager() - .applicationMasterFinished(appAttemptId); - appAttempt.progress = 1.0f; RMAppAttemptUnregistrationEvent unregisterEvent @@ -1267,4 +1255,16 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { + " MasterContainer: " + masterContainer); store.storeApplicationAttempt(this); } + + private void removeTokens(RMAppAttemptImpl appAttempt) { + // Unregister from the ClientToAMTokenSecretManager + if (UserGroupInformation.isSecurityEnabled()) { + appAttempt.rmContext.getClientToAMTokenSecretManager() + .unRegisterApplication(appAttempt.getAppAttemptId()); + } + + // Remove the AppAttempt from the AMRMTokenSecretManager + appAttempt.rmContext.getAMRMTokenSecretManager() + .applicationMasterFinished(appAttempt.getAppAttemptId()); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java index d61b5c9e6f6..5261d077d5c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java @@ -28,6 +28,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static org.mockito.Mockito.spy; import java.util.Collections; import java.util.List; @@ -35,6 +36,7 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.MockApps; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -102,6 +104,11 @@ public class TestRMAppAttemptTransitions { private RMApp application; private RMAppAttempt applicationAttempt; + + private Configuration conf = new Configuration(); + private AMRMTokenSecretManager amRMTokenManager = spy(new AMRMTokenSecretManager(conf)); + private ClientToAMTokenSecretManagerInRM clientToAMTokenManager = + spy(new ClientToAMTokenSecretManagerInRM()); private final class TestApplicationAttemptEventDispatcher implements EventHandler { @@ -163,14 +170,13 @@ public class TestRMAppAttemptTransitions { mock(ContainerAllocationExpirer.class); amLivelinessMonitor = mock(AMLivelinessMonitor.class); amFinishingMonitor = mock(AMLivelinessMonitor.class); - Configuration conf = new Configuration(); rmContext = new RMContextImpl(rmDispatcher, containerAllocationExpirer, amLivelinessMonitor, amFinishingMonitor, - null, new AMRMTokenSecretManager(conf), + null, amRMTokenManager, new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM()); + clientToAMTokenManager); RMStateStore store = mock(RMStateStore.class); ((RMContextImpl) rmContext).setStateStore(store); @@ -261,7 +267,11 @@ public class TestRMAppAttemptTransitions { assertEquals(0.0, (double)applicationAttempt.getProgress(), 0.0001); assertEquals(0, applicationAttempt.getRanNodes().size()); assertNull(applicationAttempt.getFinalApplicationStatus()); - + if (UserGroupInformation.isSecurityEnabled()) { + verify(clientToAMTokenManager).registerApplication( + applicationAttempt.getAppAttemptId()); + } + assertNotNull(applicationAttempt.getAMRMToken()); // Check events verify(masterService). registerAppAttempt(applicationAttempt.getAppAttemptId()); @@ -288,6 +298,7 @@ public class TestRMAppAttemptTransitions { // this works for unmanaged and managed AM's because this is actually doing // verify(application).handle(anyObject()); verify(application).handle(any(RMAppRejectedEvent.class)); + verifyTokenCount(applicationAttempt.getAppAttemptId(), 1); } /** @@ -303,6 +314,7 @@ public class TestRMAppAttemptTransitions { assertEquals(0.0, (double)applicationAttempt.getProgress(), 0.0001); assertEquals(0, applicationAttempt.getRanNodes().size()); assertNull(applicationAttempt.getFinalApplicationStatus()); + verifyTokenCount(applicationAttempt.getAppAttemptId(), 1); } /** @@ -377,6 +389,8 @@ public class TestRMAppAttemptTransitions { // Check events verify(application, times(2)).handle(any(RMAppFailedAttemptEvent.class)); + + verifyTokenCount(applicationAttempt.getAppAttemptId(), 1); } /** @@ -422,6 +436,7 @@ public class TestRMAppAttemptTransitions { applicationAttempt.getTrackingUrl()); assertEquals(container, applicationAttempt.getMasterContainer()); assertEquals(finalStatus, applicationAttempt.getFinalApplicationStatus()); + verifyTokenCount(applicationAttempt.getAppAttemptId(), 0); } /** @@ -442,6 +457,7 @@ public class TestRMAppAttemptTransitions { .getJustFinishedContainers().size()); assertEquals(container, applicationAttempt.getMasterContainer()); assertEquals(finalStatus, applicationAttempt.getFinalApplicationStatus()); + verifyTokenCount(applicationAttempt.getAppAttemptId(), 1); } @@ -592,6 +608,7 @@ public class TestRMAppAttemptTransitions { applicationAttempt.getAppAttemptId(), RMAppAttemptEventType.KILL)); testAppAttemptKilledState(null, EMPTY_DIAGNOSTICS); + verifyTokenCount(applicationAttempt.getAppAttemptId(), 1); } @Test @@ -666,6 +683,7 @@ public class TestRMAppAttemptTransitions { applicationAttempt.getAppAttemptId(), cs)); assertEquals(RMAppAttemptState.FAILED, applicationAttempt.getAppAttemptState()); + verifyTokenCount(applicationAttempt.getAppAttemptId(), 1); } @Test @@ -709,6 +727,7 @@ public class TestRMAppAttemptTransitions { applicationAttempt.getAppAttemptId().getApplicationId()); assertEquals(rmAppPageUrl, applicationAttempt.getOriginalTrackingUrl()); assertEquals(rmAppPageUrl, applicationAttempt.getTrackingUrl()); + verifyTokenCount(applicationAttempt.getAppAttemptId(), 1); } @Test(timeout=10000) @@ -725,6 +744,7 @@ public class TestRMAppAttemptTransitions { applicationAttempt.getAppAttemptId().getApplicationId()); assertEquals(rmAppPageUrl, applicationAttempt.getOriginalTrackingUrl()); assertEquals(rmAppPageUrl, applicationAttempt.getTrackingUrl()); + verifyTokenCount(applicationAttempt.getAppAttemptId(), 1); } @Test(timeout=20000) @@ -742,6 +762,7 @@ public class TestRMAppAttemptTransitions { applicationAttempt.getAppAttemptId().getApplicationId()); assertEquals(rmAppPageUrl, applicationAttempt.getOriginalTrackingUrl()); assertEquals(rmAppPageUrl, applicationAttempt.getTrackingUrl()); + verifyTokenCount(applicationAttempt.getAppAttemptId(), 1); } @Test @@ -848,4 +869,10 @@ public class TestRMAppAttemptTransitions { diagnostics, 0); } + private void verifyTokenCount(ApplicationAttemptId appAttemptId, int count) { + verify(amRMTokenManager, times(count)).applicationMasterFinished(appAttemptId); + if (UserGroupInformation.isSecurityEnabled()) { + verify(clientToAMTokenManager, times(count)).unRegisterApplication(appAttemptId); + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java index b0c04886945..aa894c5f6a9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java @@ -38,6 +38,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ContainerState; +import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.server.resourcemanager.MockNM; @@ -46,6 +48,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization.MockRMW import org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization.MyContainerManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerFinishedEvent; +import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.Records; import org.junit.Assert; import org.junit.Test; @@ -80,6 +84,7 @@ public class TestAMRMTokens { * * @throws Exception */ + @SuppressWarnings("unchecked") @Test public void testTokenExpiry() throws Exception { @@ -134,6 +139,20 @@ public class TestAMRMTokens { finishAMRequest.setTrackingUrl("url"); rmClient.finishApplicationMaster(finishAMRequest); + // Send RMAppAttemptEventType.CONTAINER_FINISHED to transit RMAppAttempt + // from Finishing state to Finished State. Both AMRMToken and + // ClientToAMToken will be removed. + ContainerStatus containerStatus = + BuilderUtils.newContainerStatus(attempt.getMasterContainer().getId(), + ContainerState.COMPLETE, + "AM Container Finished", 0); + rm.getRMContext() + .getDispatcher() + .getEventHandler() + .handle( + new RMAppAttemptContainerFinishedEvent(applicationAttemptId, + containerStatus)); + // Now simulate trying to allocate. RPC call itself should throw auth // exception. rpc.stopProxy(rmClient, conf); // To avoid using cached client From be0317e0211e9b107dd25e5f492cdbc0493ec5e0 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Mon, 19 Aug 2013 06:44:38 +0000 Subject: [PATCH 38/53] Add .classpath, .project and .settings to svn:ignore. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515283 13f79535-47bb-0310-9956-ffa450edef68 From c9b89de0eacf15f21faa3a7ba30d4773f571c9a4 Mon Sep 17 00:00:00 2001 From: Brandon Li Date: Mon, 19 Aug 2013 21:54:51 +0000 Subject: [PATCH 39/53] HDFS-5110 Change FSDataOutputStream to HdfsDataOutputStream for opened streams to fix type cast error. Contributed by Brandon Li git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515624 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java | 9 ++++----- .../org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java | 7 ++++--- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java index 301fedc508c..e13bebcc6f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java @@ -32,7 +32,6 @@ import java.util.concurrent.locks.ReentrantLock; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag; @@ -70,7 +69,7 @@ class OpenFileCtx { // The stream write-back status. True means one thread is doing write back. private boolean asyncStatus; - private final FSDataOutputStream fos; + private final HdfsDataOutputStream fos; private final Nfs3FileAttributes latestAttr; private long nextOffset; @@ -114,7 +113,7 @@ class OpenFileCtx { return nonSequentialWriteInMemory; } - OpenFileCtx(FSDataOutputStream fos, Nfs3FileAttributes latestAttr, + OpenFileCtx(HdfsDataOutputStream fos, Nfs3FileAttributes latestAttr, String dumpFilePath) { this.fos = fos; this.latestAttr = latestAttr; @@ -438,7 +437,7 @@ class OpenFileCtx { FSDataInputStream fis = null; try { // Sync file data and length to avoid partial read failure - ((HdfsDataOutputStream) fos).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH)); + fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH)); fis = new FSDataInputStream(dfsClient.open(path)); readCount = fis.read(offset, readbuffer, 0, count); @@ -527,7 +526,7 @@ class OpenFileCtx { int ret = COMMIT_WAIT; try { // Sync file data and length - ((HdfsDataOutputStream) fos).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH)); + fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH)); // Nothing to do for metadata since attr related change is pass-through ret = COMMIT_FINISHED; } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java index 8db8b1bb734..e96b537d1f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java @@ -29,7 +29,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.FileSystem.Statistics; import org.apache.hadoop.fs.FileUtil; @@ -629,7 +629,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { return new CREATE3Response(Nfs3Status.NFS3ERR_INVAL); } - FSDataOutputStream fos = null; + HdfsDataOutputStream fos = null; String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle); WccAttr preOpDirAttr = null; Nfs3FileAttributes postOpObjAttr = null; @@ -652,7 +652,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { EnumSet flag = (createMode != Nfs3Constant.CREATE_EXCLUSIVE) ? EnumSet .of(CreateFlag.CREATE, CreateFlag.OVERWRITE) : EnumSet .of(CreateFlag.CREATE); - fos = new FSDataOutputStream(dfsClient.create(fileIdPath, permission, + + fos = new HdfsDataOutputStream(dfsClient.create(fileIdPath, permission, flag, false, replication, blockSize, null, bufferSize, null), statistics); diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 242b2b256a4..7d8dc365f86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -296,6 +296,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5107 Fix array copy error in Readdir and Readdirplus responses (brandonli) + HDFS-5110 Change FSDataOutputStream to HdfsDataOutputStream for opened + streams to fix type cast error. (brandonli) + IMPROVEMENTS HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may From 8f343e684c4672212ad206c412603e2a5b0ee733 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Mon, 19 Aug 2013 22:04:18 +0000 Subject: [PATCH 40/53] YARN-1006. Fixed broken rendering in the Nodes list web page on the RM web UI. Contributed by Xuan Gong. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515629 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 +++ .../yarn/server/resourcemanager/webapp/NodesPage.java | 6 +++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index dc178d7143a..6101901f12a 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -77,6 +77,9 @@ Release 2.1.1-beta - UNRELEASED YARN-643. Fixed ResourceManager to remove all tokens consistently on app finish. (Xuan Gong via vinodkv) + YARN-1006. Fixed broken rendering in the Nodes list web page on the RM web + UI. (Xuan Gong via vinodkv) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java index 493fbad2cf6..87720e05960 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java @@ -155,10 +155,10 @@ class NodesPage extends RmView { private String nodesTableInit() { StringBuilder b = tableInit().append(", aoColumnDefs: ["); - b.append("{'bSearchable': false, 'aTargets': [ 7 ]}"); + b.append("{'bSearchable': false, 'aTargets': [ 6 ]}"); b.append(", {'sType': 'title-numeric', 'bSearchable': false, " + - "'aTargets': [ 8, 9 ] }"); - b.append(", {'sType': 'title-numeric', 'aTargets': [ 5 ]}"); + "'aTargets': [ 7, 8 ] }"); + b.append(", {'sType': 'title-numeric', 'aTargets': [ 4 ]}"); b.append("]}"); return b.toString(); } From 23abbd8f649150d1c73834aea36de8ed53b3023c Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Mon, 19 Aug 2013 22:52:29 +0000 Subject: [PATCH 41/53] HADOOP-9860. Remove class HackedKeytab and HackedKeytabEncoder from hadoop-minikdc once jira DIRSERVER-1882 solved. (ywskycn via tucu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515652 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 + hadoop-common-project/hadoop-minikdc/pom.xml | 2 +- .../kerberos/shared/keytab/HackedKeytab.java | 42 ------ .../shared/keytab/HackedKeytabEncoder.java | 121 ------------------ .../org/apache/hadoop/minikdc/MiniKdc.java | 6 +- .../apache/hadoop/minikdc/TestMiniKdc.java | 4 +- 6 files changed, 9 insertions(+), 169 deletions(-) delete mode 100644 hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/directory/server/kerberos/shared/keytab/HackedKeytab.java delete mode 100644 hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/directory/server/kerberos/shared/keytab/HackedKeytabEncoder.java diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 92d0df40f5d..db23e0640df 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -316,6 +316,9 @@ Release 2.3.0 - UNRELEASED HADOOP-9848. Create a MiniKDC for use with security testing. (ywskycn via tucu) + HADOOP-9860. Remove class HackedKeytab and HackedKeytabEncoder from + hadoop-minikdc once jira DIRSERVER-1882 solved. (ywskycn via tucu) + OPTIMIZATIONS HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn) diff --git a/hadoop-common-project/hadoop-minikdc/pom.xml b/hadoop-common-project/hadoop-minikdc/pom.xml index 554e4a6d17e..f3e663c2b96 100644 --- a/hadoop-common-project/hadoop-minikdc/pom.xml +++ b/hadoop-common-project/hadoop-minikdc/pom.xml @@ -38,7 +38,7 @@ org.apache.directory.server apacheds-all - 2.0.0-M14 + 2.0.0-M15 compile diff --git a/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/directory/server/kerberos/shared/keytab/HackedKeytab.java b/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/directory/server/kerberos/shared/keytab/HackedKeytab.java deleted file mode 100644 index cf4680a1fa1..00000000000 --- a/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/directory/server/kerberos/shared/keytab/HackedKeytab.java +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.directory.server.kerberos.shared.keytab; - -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; - -//This is a hack for ApacheDS 2.0.0-M14 to be able to create -//keytab files with more than one principal. -//It needs to be in this package because the KeytabEncoder class is package -// private. -//This class can be removed once jira DIRSERVER-1882 -// (https://issues.apache.org/jira/browse/DIRSERVER-1882) solved -public class HackedKeytab extends Keytab { - - private byte[] keytabVersion = VERSION_52; - - public void write( File file, int principalCount ) throws IOException - { - HackedKeytabEncoder writer = new HackedKeytabEncoder(); - ByteBuffer buffer = writer.write( keytabVersion, getEntries(), - principalCount ); - writeFile( buffer, file ); - } - -} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/directory/server/kerberos/shared/keytab/HackedKeytabEncoder.java b/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/directory/server/kerberos/shared/keytab/HackedKeytabEncoder.java deleted file mode 100644 index 0e04d155f7a..00000000000 --- a/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/directory/server/kerberos/shared/keytab/HackedKeytabEncoder.java +++ /dev/null @@ -1,121 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.directory.server.kerberos.shared.keytab; - -import org.apache.directory.shared.kerberos.components.EncryptionKey; - -import java.nio.ByteBuffer; -import java.util.Iterator; -import java.util.List; - -//This is a hack for ApacheDS 2.0.0-M14 to be able to create -//keytab files with more than one principal. -//It needs to be in this package because the KeytabEncoder class is package -// private. -//This class can be removed once jira DIRSERVER-1882 -// (https://issues.apache.org/jira/browse/DIRSERVER-1882) solved -class HackedKeytabEncoder extends KeytabEncoder { - - ByteBuffer write( byte[] keytabVersion, List entries, - int principalCount ) - { - ByteBuffer buffer = ByteBuffer.allocate( 512 * principalCount); - putKeytabVersion(buffer, keytabVersion); - putKeytabEntries( buffer, entries ); - buffer.flip(); - return buffer; - } - - private void putKeytabVersion( ByteBuffer buffer, byte[] version ) - { - buffer.put( version ); - } - - private void putKeytabEntries( ByteBuffer buffer, List entries ) - { - Iterator iterator = entries.iterator(); - - while ( iterator.hasNext() ) - { - ByteBuffer entryBuffer = putKeytabEntry( iterator.next() ); - int size = entryBuffer.position(); - - entryBuffer.flip(); - - buffer.putInt( size ); - buffer.put( entryBuffer ); - } - } - - private ByteBuffer putKeytabEntry( KeytabEntry entry ) - { - ByteBuffer buffer = ByteBuffer.allocate( 100 ); - - putPrincipalName( buffer, entry.getPrincipalName() ); - - buffer.putInt( ( int ) entry.getPrincipalType() ); - - buffer.putInt( ( int ) ( entry.getTimeStamp().getTime() / 1000 ) ); - - buffer.put( entry.getKeyVersion() ); - - putKeyBlock( buffer, entry.getKey() ); - - return buffer; - } - - private void putPrincipalName( ByteBuffer buffer, String principalName ) - { - String[] split = principalName.split("@"); - String nameComponent = split[0]; - String realm = split[1]; - - String[] nameComponents = nameComponent.split( "/" ); - - // increment for v1 - buffer.putShort( ( short ) nameComponents.length ); - - putCountedString( buffer, realm ); - // write components - - for ( int ii = 0; ii < nameComponents.length; ii++ ) - { - putCountedString( buffer, nameComponents[ii] ); - } - } - - private void putKeyBlock( ByteBuffer buffer, EncryptionKey key ) - { - buffer.putShort( ( short ) key.getKeyType().getValue() ); - putCountedBytes( buffer, key.getKeyValue() ); - } - - private void putCountedString( ByteBuffer buffer, String string ) - { - byte[] data = string.getBytes(); - buffer.putShort( ( short ) data.length ); - buffer.put( data ); - } - - private void putCountedBytes( ByteBuffer buffer, byte[] data ) - { - buffer.putShort( ( short ) data.length ); - buffer.put( data ); - } - -} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java b/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java index d328cd31ed3..c8aa78a9f34 100644 --- a/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java +++ b/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java @@ -37,7 +37,7 @@ import org.apache.directory.server.core.partition.impl.btree.jdbm.JdbmPartition; import org.apache.directory.server.core.partition.ldif.LdifPartition; import org.apache.directory.server.kerberos.kdc.KdcServer; import org.apache.directory.server.kerberos.shared.crypto.encryption.KerberosKeyFactory; -import org.apache.directory.server.kerberos.shared.keytab.HackedKeytab; +import org.apache.directory.server.kerberos.shared.keytab.Keytab; import org.apache.directory.server.kerberos.shared.keytab.KeytabEntry; import org.apache.directory.server.protocol.shared.transport.TcpTransport; import org.apache.directory.server.protocol.shared.transport.UdpTransport; @@ -514,7 +514,7 @@ public class MiniKdc { public void createPrincipal(File keytabFile, String ... principals) throws Exception { String generatedPassword = UUID.randomUUID().toString(); - HackedKeytab keytab = new HackedKeytab(); + Keytab keytab = new Keytab(); List entries = new ArrayList(); for (String principal : principals) { createPrincipal(principal, generatedPassword); @@ -529,6 +529,6 @@ public class MiniKdc { } } keytab.setEntries(entries); - keytab.write(keytabFile, principals.length); + keytab.write(keytabFile); } } \ No newline at end of file diff --git a/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java b/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java index ff41519ae49..c1fc56daecd 100644 --- a/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java +++ b/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java @@ -137,7 +137,7 @@ public class TestMiniKdc extends KerberosSecurityTestcase { subject.getPrincipals().iterator().next().getClass()); Assert.assertEquals(principal + "@" + kdc.getRealm(), subject.getPrincipals().iterator().next().getName()); - loginContext.login(); + loginContext.logout(); //server login subject = new Subject(false, principals, new HashSet(), @@ -151,7 +151,7 @@ public class TestMiniKdc extends KerberosSecurityTestcase { subject.getPrincipals().iterator().next().getClass()); Assert.assertEquals(principal + "@" + kdc.getRealm(), subject.getPrincipals().iterator().next().getName()); - loginContext.login(); + loginContext.logout(); } finally { if (loginContext != null) { From ffdedf6b8be667ae5f71a79abde683c56db4326a Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Mon, 19 Aug 2013 23:02:24 +0000 Subject: [PATCH 42/53] HADOOP-9866. convert hadoop-auth testcases requiring kerberos to use minikdc. (ywskycn via tucu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515657 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-auth/pom.xml | 61 +--------- .../authentication/KerberosTestUtils.java | 23 ++-- .../client/AuthenticatorTestCase.java | 26 ++-- .../client/TestAuthenticatedURL.java | 47 ++++---- .../client/TestKerberosAuthenticator.java | 60 +++++++--- .../client/TestPseudoAuthenticator.java | 61 ++++++---- .../TestAltKerberosAuthenticationHandler.java | 17 ++- .../server/TestAuthenticationFilter.java | 113 ++++++++++-------- .../server/TestAuthenticationToken.java | 72 ++++++----- .../TestKerberosAuthenticationHandler.java | 76 +++++++----- .../TestPseudoAuthenticationHandler.java | 29 +++-- .../authentication/util/TestKerberosName.java | 19 ++- .../authentication/util/TestKerberosUtil.java | 13 +- .../authentication/util/TestSigner.java | 36 +++--- .../hadoop-auth/src/test/resources/krb5.conf | 28 ----- .../hadoop-common/CHANGES.txt | 3 + 16 files changed, 356 insertions(+), 328 deletions(-) delete mode 100644 hadoop-common-project/hadoop-auth/src/test/resources/krb5.conf diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml index 9819b3fe084..e2beb0d49e7 100644 --- a/hadoop-common-project/hadoop-auth/pom.xml +++ b/hadoop-common-project/hadoop-auth/pom.xml @@ -33,7 +33,6 @@ yyyyMMdd - LOCALHOST @@ -83,38 +82,15 @@ slf4j-log4j12 runtime + + org.apache.hadoop + hadoop-minikdc + test + - - - ${basedir}/src/test/resources - true - - krb5.conf - - - - - org.apache.maven.plugins - maven-surefire-plugin - - always - 600 - - ${project.build.directory}/test-classes/krb5.conf - ${kerberos.realm} - - - **/${test.exclude}.java - ${test.exclude.pattern} - **/TestKerberosAuth*.java - **/TestAltKerberosAuth*.java - **/Test*$*.java - - - org.apache.maven.plugins maven-source-plugin @@ -134,33 +110,6 @@ - - testKerberos - - false - - - - - org.apache.maven.plugins - maven-surefire-plugin - - always - 600 - - ${project.build.directory}/test-classes/krb5.conf - ${kerberos.realm} - - - **/${test.exclude}.java - ${test.exclude.pattern} - **/Test*$*.java - - - - - - docs diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java index ea0f17f04cf..7629a302791 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java @@ -13,7 +13,6 @@ */ package org.apache.hadoop.security.authentication; - import javax.security.auth.Subject; import javax.security.auth.kerberos.KerberosPrincipal; import javax.security.auth.login.AppConfigurationEntry; @@ -26,6 +25,7 @@ import java.io.File; import java.security.Principal; import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; +import java.util.UUID; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -36,32 +36,23 @@ import java.util.concurrent.Callable; * Test helper class for Java Kerberos setup. */ public class KerberosTestUtils { - private static final String PREFIX = "hadoop-auth.test."; - - public static final String REALM = PREFIX + "kerberos.realm"; - - public static final String CLIENT_PRINCIPAL = PREFIX + "kerberos.client.principal"; - - public static final String SERVER_PRINCIPAL = PREFIX + "kerberos.server.principal"; - - public static final String KEYTAB_FILE = PREFIX + "kerberos.keytab.file"; + private static String keytabFile = new File(System.getProperty("test.dir", "target"), + UUID.randomUUID().toString()).toString(); public static String getRealm() { - return System.getProperty(REALM, "LOCALHOST"); + return "EXAMPLE.COM"; } public static String getClientPrincipal() { - return System.getProperty(CLIENT_PRINCIPAL, "client") + "@" + getRealm(); + return "client@EXAMPLE.COM"; } public static String getServerPrincipal() { - return System.getProperty(SERVER_PRINCIPAL, "HTTP/localhost") + "@" + getRealm(); + return "HTTP/localhost@EXAMPLE.COM"; } public static String getKeytabFile() { - String keytabFile = - new File(System.getProperty("user.home"), System.getProperty("user.name") + ".keytab").toString(); - return System.getProperty(KEYTAB_FILE, keytabFile); + return keytabFile; } private static class KerberosConfiguration extends Configuration { diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java index 6059d8caf83..ba7b43343d6 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java @@ -2,9 +2,9 @@ * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -13,10 +13,7 @@ */ package org.apache.hadoop.security.authentication.client; -import junit.framework.Assert; import org.apache.hadoop.security.authentication.server.AuthenticationFilter; -import junit.framework.TestCase; -import org.mockito.Mockito; import org.mortbay.jetty.Server; import org.mortbay.jetty.servlet.Context; import org.mortbay.jetty.servlet.FilterHolder; @@ -27,19 +24,20 @@ import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; -import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; -import java.io.InputStreamReader; import java.io.OutputStream; import java.io.OutputStreamWriter; +import java.io.BufferedReader; +import java.io.InputStreamReader; import java.io.Writer; import java.net.HttpURLConnection; import java.net.ServerSocket; import java.net.URL; import java.util.Properties; +import org.junit.Assert; -public abstract class AuthenticatorTestCase extends TestCase { +public class AuthenticatorTestCase { private Server server; private String host = null; private int port = -1; @@ -151,18 +149,18 @@ public abstract class AuthenticatorTestCase extends TestCase { writer.write(POST); writer.close(); } - assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); + Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); if (doPost) { BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream())); String echo = reader.readLine(); - assertEquals(POST, echo); - assertNull(reader.readLine()); + Assert.assertEquals(POST, echo); + Assert.assertNull(reader.readLine()); } aUrl = new AuthenticatedURL(); conn = aUrl.openConnection(url, token); conn.connect(); - assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); - assertEquals(tokenStr, token.toString()); + Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); + Assert.assertEquals(tokenStr, token.toString()); } finally { stop(); } diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java index 02ab92fac97..5be0b382f2f 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java @@ -13,8 +13,8 @@ */ package org.apache.hadoop.security.authentication.client; -import junit.framework.Assert; -import junit.framework.TestCase; +import org.junit.Assert; +import org.junit.Test; import org.mockito.Mockito; import java.net.HttpURLConnection; @@ -24,46 +24,48 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -public class TestAuthenticatedURL extends TestCase { +public class TestAuthenticatedURL { + @Test public void testToken() throws Exception { AuthenticatedURL.Token token = new AuthenticatedURL.Token(); - assertFalse(token.isSet()); + Assert.assertFalse(token.isSet()); token = new AuthenticatedURL.Token("foo"); - assertTrue(token.isSet()); - assertEquals("foo", token.toString()); + Assert.assertTrue(token.isSet()); + Assert.assertEquals("foo", token.toString()); AuthenticatedURL.Token token1 = new AuthenticatedURL.Token(); AuthenticatedURL.Token token2 = new AuthenticatedURL.Token(); - assertEquals(token1.hashCode(), token2.hashCode()); - assertTrue(token1.equals(token2)); + Assert.assertEquals(token1.hashCode(), token2.hashCode()); + Assert.assertTrue(token1.equals(token2)); token1 = new AuthenticatedURL.Token(); token2 = new AuthenticatedURL.Token("foo"); - assertNotSame(token1.hashCode(), token2.hashCode()); - assertFalse(token1.equals(token2)); + Assert.assertNotSame(token1.hashCode(), token2.hashCode()); + Assert.assertFalse(token1.equals(token2)); token1 = new AuthenticatedURL.Token("foo"); token2 = new AuthenticatedURL.Token(); - assertNotSame(token1.hashCode(), token2.hashCode()); - assertFalse(token1.equals(token2)); + Assert.assertNotSame(token1.hashCode(), token2.hashCode()); + Assert.assertFalse(token1.equals(token2)); token1 = new AuthenticatedURL.Token("foo"); token2 = new AuthenticatedURL.Token("foo"); - assertEquals(token1.hashCode(), token2.hashCode()); - assertTrue(token1.equals(token2)); + Assert.assertEquals(token1.hashCode(), token2.hashCode()); + Assert.assertTrue(token1.equals(token2)); token1 = new AuthenticatedURL.Token("bar"); token2 = new AuthenticatedURL.Token("foo"); - assertNotSame(token1.hashCode(), token2.hashCode()); - assertFalse(token1.equals(token2)); + Assert.assertNotSame(token1.hashCode(), token2.hashCode()); + Assert.assertFalse(token1.equals(token2)); token1 = new AuthenticatedURL.Token("foo"); token2 = new AuthenticatedURL.Token("bar"); - assertNotSame(token1.hashCode(), token2.hashCode()); - assertFalse(token1.equals(token2)); + Assert.assertNotSame(token1.hashCode(), token2.hashCode()); + Assert.assertFalse(token1.equals(token2)); } + @Test public void testInjectToken() throws Exception { HttpURLConnection conn = Mockito.mock(HttpURLConnection.class); AuthenticatedURL.Token token = new AuthenticatedURL.Token(); @@ -72,6 +74,7 @@ public class TestAuthenticatedURL extends TestCase { Mockito.verify(conn).addRequestProperty(Mockito.eq("Cookie"), Mockito.anyString()); } + @Test public void testExtractTokenOK() throws Exception { HttpURLConnection conn = Mockito.mock(HttpURLConnection.class); @@ -87,9 +90,10 @@ public class TestAuthenticatedURL extends TestCase { AuthenticatedURL.Token token = new AuthenticatedURL.Token(); AuthenticatedURL.extractToken(conn, token); - assertEquals(tokenStr, token.toString()); + Assert.assertEquals(tokenStr, token.toString()); } + @Test public void testExtractTokenFail() throws Exception { HttpURLConnection conn = Mockito.mock(HttpURLConnection.class); @@ -106,15 +110,16 @@ public class TestAuthenticatedURL extends TestCase { token.set("bar"); try { AuthenticatedURL.extractToken(conn, token); - fail(); + Assert.fail(); } catch (AuthenticationException ex) { // Expected Assert.assertFalse(token.isSet()); } catch (Exception ex) { - fail(); + Assert.fail(); } } + @Test public void testConnectionConfigurator() throws Exception { HttpURLConnection conn = Mockito.mock(HttpURLConnection.class); Mockito.when(conn.getResponseCode()). diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java index 93d1d027a29..fd4b57258b9 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java @@ -13,17 +13,33 @@ */ package org.apache.hadoop.security.authentication.client; +import org.apache.hadoop.minikdc.KerberosSecurityTestcase; import org.apache.hadoop.security.authentication.KerberosTestUtils; import org.apache.hadoop.security.authentication.server.AuthenticationFilter; import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler; import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import java.io.File; import java.net.HttpURLConnection; import java.net.URL; import java.util.Properties; import java.util.concurrent.Callable; -public class TestKerberosAuthenticator extends AuthenticatorTestCase { +public class TestKerberosAuthenticator extends KerberosSecurityTestcase { + + @Before + public void setup() throws Exception { + // create keytab + File keytabFile = new File(KerberosTestUtils.getKeytabFile()); + String clientPrincipal = KerberosTestUtils.getClientPrincipal(); + String serverPrincipal = KerberosTestUtils.getServerPrincipal(); + clientPrincipal = clientPrincipal.substring(0, clientPrincipal.lastIndexOf("@")); + serverPrincipal = serverPrincipal.substring(0, serverPrincipal.lastIndexOf("@")); + getKdc().createPrincipal(keytabFile, clientPrincipal, serverPrincipal); + } private Properties getAuthenticationHandlerConfiguration() { Properties props = new Properties(); @@ -35,57 +51,67 @@ public class TestKerberosAuthenticator extends AuthenticatorTestCase { return props; } + @Test(timeout=60000) public void testFallbacktoPseudoAuthenticator() throws Exception { + AuthenticatorTestCase auth = new AuthenticatorTestCase(); Properties props = new Properties(); props.setProperty(AuthenticationFilter.AUTH_TYPE, "simple"); props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "false"); - setAuthenticationHandlerConfig(props); - _testAuthentication(new KerberosAuthenticator(), false); + auth.setAuthenticationHandlerConfig(props); + auth._testAuthentication(new KerberosAuthenticator(), false); } + @Test(timeout=60000) public void testFallbacktoPseudoAuthenticatorAnonymous() throws Exception { + AuthenticatorTestCase auth = new AuthenticatorTestCase(); Properties props = new Properties(); props.setProperty(AuthenticationFilter.AUTH_TYPE, "simple"); props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "true"); - setAuthenticationHandlerConfig(props); - _testAuthentication(new KerberosAuthenticator(), false); + auth.setAuthenticationHandlerConfig(props); + auth._testAuthentication(new KerberosAuthenticator(), false); } + @Test(timeout=60000) public void testNotAuthenticated() throws Exception { - setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration()); - start(); + AuthenticatorTestCase auth = new AuthenticatorTestCase(); + auth.setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration()); + auth.start(); try { - URL url = new URL(getBaseURL()); + URL url = new URL(auth.getBaseURL()); HttpURLConnection conn = (HttpURLConnection) url.openConnection(); conn.connect(); - assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, conn.getResponseCode()); - assertTrue(conn.getHeaderField(KerberosAuthenticator.WWW_AUTHENTICATE) != null); + Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, conn.getResponseCode()); + Assert.assertTrue(conn.getHeaderField(KerberosAuthenticator.WWW_AUTHENTICATE) != null); } finally { - stop(); + auth.stop(); } } - + @Test(timeout=60000) public void testAuthentication() throws Exception { - setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration()); + final AuthenticatorTestCase auth = new AuthenticatorTestCase(); + auth.setAuthenticationHandlerConfig( + getAuthenticationHandlerConfiguration()); KerberosTestUtils.doAsClient(new Callable() { @Override public Void call() throws Exception { - _testAuthentication(new KerberosAuthenticator(), false); + auth._testAuthentication(new KerberosAuthenticator(), false); return null; } }); } + @Test(timeout=60000) public void testAuthenticationPost() throws Exception { - setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration()); + final AuthenticatorTestCase auth = new AuthenticatorTestCase(); + auth.setAuthenticationHandlerConfig( + getAuthenticationHandlerConfiguration()); KerberosTestUtils.doAsClient(new Callable() { @Override public Void call() throws Exception { - _testAuthentication(new KerberosAuthenticator(), true); + auth._testAuthentication(new KerberosAuthenticator(), true); return null; } }); } - } diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestPseudoAuthenticator.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestPseudoAuthenticator.java index 807052e8484..20ec587ac8f 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestPseudoAuthenticator.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestPseudoAuthenticator.java @@ -15,12 +15,14 @@ package org.apache.hadoop.security.authentication.client; import org.apache.hadoop.security.authentication.server.AuthenticationFilter; import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler; +import org.junit.Assert; +import org.junit.Test; import java.net.HttpURLConnection; import java.net.URL; import java.util.Properties; -public class TestPseudoAuthenticator extends AuthenticatorTestCase { +public class TestPseudoAuthenticator { private Properties getAuthenticationHandlerConfiguration(boolean anonymousAllowed) { Properties props = new Properties(); @@ -29,55 +31,74 @@ public class TestPseudoAuthenticator extends AuthenticatorTestCase { return props; } + @Test public void testGetUserName() throws Exception { PseudoAuthenticator authenticator = new PseudoAuthenticator(); - assertEquals(System.getProperty("user.name"), authenticator.getUserName()); + Assert.assertEquals(System.getProperty("user.name"), authenticator.getUserName()); } + @Test public void testAnonymousAllowed() throws Exception { - setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration(true)); - start(); + AuthenticatorTestCase auth = new AuthenticatorTestCase(); + auth.setAuthenticationHandlerConfig( + getAuthenticationHandlerConfiguration(true)); + auth.start(); try { - URL url = new URL(getBaseURL()); + URL url = new URL(auth.getBaseURL()); HttpURLConnection conn = (HttpURLConnection) url.openConnection(); conn.connect(); - assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); + Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); } finally { - stop(); + auth.stop(); } } + @Test public void testAnonymousDisallowed() throws Exception { - setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration(false)); - start(); + AuthenticatorTestCase auth = new AuthenticatorTestCase(); + auth.setAuthenticationHandlerConfig( + getAuthenticationHandlerConfiguration(false)); + auth.start(); try { - URL url = new URL(getBaseURL()); + URL url = new URL(auth.getBaseURL()); HttpURLConnection conn = (HttpURLConnection) url.openConnection(); conn.connect(); - assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, conn.getResponseCode()); + Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, conn.getResponseCode()); } finally { - stop(); + auth.stop(); } } + @Test public void testAuthenticationAnonymousAllowed() throws Exception { - setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration(true)); - _testAuthentication(new PseudoAuthenticator(), false); + AuthenticatorTestCase auth = new AuthenticatorTestCase(); + auth.setAuthenticationHandlerConfig( + getAuthenticationHandlerConfiguration(true)); + auth._testAuthentication(new PseudoAuthenticator(), false); } + @Test public void testAuthenticationAnonymousDisallowed() throws Exception { - setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration(false)); - _testAuthentication(new PseudoAuthenticator(), false); + AuthenticatorTestCase auth = new AuthenticatorTestCase(); + auth.setAuthenticationHandlerConfig( + getAuthenticationHandlerConfiguration(false)); + auth._testAuthentication(new PseudoAuthenticator(), false); } + @Test public void testAuthenticationAnonymousAllowedWithPost() throws Exception { - setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration(true)); - _testAuthentication(new PseudoAuthenticator(), true); + AuthenticatorTestCase auth = new AuthenticatorTestCase(); + auth.setAuthenticationHandlerConfig( + getAuthenticationHandlerConfiguration(true)); + auth._testAuthentication(new PseudoAuthenticator(), true); } + @Test public void testAuthenticationAnonymousDisallowedWithPost() throws Exception { - setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration(false)); - _testAuthentication(new PseudoAuthenticator(), true); + AuthenticatorTestCase auth = new AuthenticatorTestCase(); + auth.setAuthenticationHandlerConfig( + getAuthenticationHandlerConfiguration(false)); + auth._testAuthentication(new PseudoAuthenticator(), true); } } diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAltKerberosAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAltKerberosAuthenticationHandler.java index c2d43ebb3ca..3b838033090 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAltKerberosAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAltKerberosAuthenticationHandler.java @@ -18,6 +18,8 @@ import java.util.Properties; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.hadoop.security.authentication.client.AuthenticationException; +import org.junit.Assert; +import org.junit.Test; import org.mockito.Mockito; public class TestAltKerberosAuthenticationHandler @@ -45,6 +47,7 @@ public class TestAltKerberosAuthenticationHandler return AltKerberosAuthenticationHandler.TYPE; } + @Test(timeout=60000) public void testAlternateAuthenticationAsBrowser() throws Exception { HttpServletRequest request = Mockito.mock(HttpServletRequest.class); HttpServletResponse response = Mockito.mock(HttpServletResponse.class); @@ -54,11 +57,12 @@ public class TestAltKerberosAuthenticationHandler Mockito.when(request.getHeader("User-Agent")).thenReturn("Some Browser"); AuthenticationToken token = handler.authenticate(request, response); - assertEquals("A", token.getUserName()); - assertEquals("B", token.getName()); - assertEquals(getExpectedType(), token.getType()); + Assert.assertEquals("A", token.getUserName()); + Assert.assertEquals("B", token.getName()); + Assert.assertEquals(getExpectedType(), token.getType()); } + @Test(timeout=60000) public void testNonDefaultNonBrowserUserAgentAsBrowser() throws Exception { HttpServletRequest request = Mockito.mock(HttpServletRequest.class); HttpServletResponse response = Mockito.mock(HttpServletResponse.class); @@ -81,11 +85,12 @@ public class TestAltKerberosAuthenticationHandler Mockito.when(request.getHeader("User-Agent")).thenReturn("blah"); // Should use alt authentication AuthenticationToken token = handler.authenticate(request, response); - assertEquals("A", token.getUserName()); - assertEquals("B", token.getName()); - assertEquals(getExpectedType(), token.getType()); + Assert.assertEquals("A", token.getUserName()); + Assert.assertEquals("B", token.getName()); + Assert.assertEquals(getExpectedType(), token.getType()); } + @Test(timeout=60000) public void testNonDefaultNonBrowserUserAgentAsNonBrowser() throws Exception { if (handler != null) { handler.destroy(); diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java index 1c31e54ba52..6820151210c 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java @@ -16,7 +16,8 @@ package org.apache.hadoop.security.authentication.server; import org.apache.hadoop.security.authentication.client.AuthenticatedURL; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.authentication.util.Signer; -import junit.framework.TestCase; +import org.junit.Assert; +import org.junit.Test; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @@ -34,8 +35,9 @@ import java.util.Arrays; import java.util.Properties; import java.util.Vector; -public class TestAuthenticationFilter extends TestCase { +public class TestAuthenticationFilter { + @Test public void testGetConfiguration() throws Exception { AuthenticationFilter filter = new AuthenticationFilter(); FilterConfig config = Mockito.mock(FilterConfig.class); @@ -43,27 +45,28 @@ public class TestAuthenticationFilter extends TestCase { Mockito.when(config.getInitParameter("a")).thenReturn("A"); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList("a")).elements()); Properties props = filter.getConfiguration("", config); - assertEquals("A", props.getProperty("a")); + Assert.assertEquals("A", props.getProperty("a")); config = Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameter(AuthenticationFilter.CONFIG_PREFIX)).thenReturn("foo"); Mockito.when(config.getInitParameter("foo.a")).thenReturn("A"); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList("foo.a")).elements()); props = filter.getConfiguration("foo.", config); - assertEquals("A", props.getProperty("a")); + Assert.assertEquals("A", props.getProperty("a")); } + @Test public void testInitEmpty() throws Exception { AuthenticationFilter filter = new AuthenticationFilter(); try { FilterConfig config = Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector().elements()); filter.init(config); - fail(); + Assert.fail(); } catch (ServletException ex) { // Expected } catch (Exception ex) { - fail(); + Assert.fail(); } finally { filter.destroy(); } @@ -126,6 +129,7 @@ public class TestAuthenticationFilter extends TestCase { } } + @Test public void testInit() throws Exception { // minimal configuration & simple auth handler (Pseudo) @@ -138,11 +142,11 @@ public class TestAuthenticationFilter extends TestCase { new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE, AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements()); filter.init(config); - assertEquals(PseudoAuthenticationHandler.class, filter.getAuthenticationHandler().getClass()); - assertTrue(filter.isRandomSecret()); - assertNull(filter.getCookieDomain()); - assertNull(filter.getCookiePath()); - assertEquals(1000, filter.getValidity()); + Assert.assertEquals(PseudoAuthenticationHandler.class, filter.getAuthenticationHandler().getClass()); + Assert.assertTrue(filter.isRandomSecret()); + Assert.assertNull(filter.getCookieDomain()); + Assert.assertNull(filter.getCookiePath()); + Assert.assertEquals(1000, filter.getValidity()); } finally { filter.destroy(); } @@ -157,7 +161,7 @@ public class TestAuthenticationFilter extends TestCase { new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE, AuthenticationFilter.SIGNATURE_SECRET)).elements()); filter.init(config); - assertFalse(filter.isRandomSecret()); + Assert.assertFalse(filter.isRandomSecret()); } finally { filter.destroy(); } @@ -174,13 +178,12 @@ public class TestAuthenticationFilter extends TestCase { AuthenticationFilter.COOKIE_DOMAIN, AuthenticationFilter.COOKIE_PATH)).elements()); filter.init(config); - assertEquals(".foo.com", filter.getCookieDomain()); - assertEquals("/bar", filter.getCookiePath()); + Assert.assertEquals(".foo.com", filter.getCookieDomain()); + Assert.assertEquals("/bar", filter.getCookiePath()); } finally { filter.destroy(); } - // authentication handler lifecycle, and custom impl DummyAuthenticationHandler.reset(); filter = new AuthenticationFilter(); @@ -195,10 +198,10 @@ public class TestAuthenticationFilter extends TestCase { Arrays.asList(AuthenticationFilter.AUTH_TYPE, "management.operation.return")).elements()); filter.init(config); - assertTrue(DummyAuthenticationHandler.init); + Assert.assertTrue(DummyAuthenticationHandler.init); } finally { filter.destroy(); - assertTrue(DummyAuthenticationHandler.destroy); + Assert.assertTrue(DummyAuthenticationHandler.destroy); } // kerberos auth handler @@ -212,11 +215,12 @@ public class TestAuthenticationFilter extends TestCase { } catch (ServletException ex) { // Expected } finally { - assertEquals(KerberosAuthenticationHandler.class, filter.getAuthenticationHandler().getClass()); + Assert.assertEquals(KerberosAuthenticationHandler.class, filter.getAuthenticationHandler().getClass()); filter.destroy(); } } + @Test public void testGetRequestURL() throws Exception { AuthenticationFilter filter = new AuthenticationFilter(); try { @@ -235,12 +239,13 @@ public class TestAuthenticationFilter extends TestCase { Mockito.when(request.getRequestURL()).thenReturn(new StringBuffer("http://foo:8080/bar")); Mockito.when(request.getQueryString()).thenReturn("a=A&b=B"); - assertEquals("http://foo:8080/bar?a=A&b=B", filter.getRequestURL(request)); + Assert.assertEquals("http://foo:8080/bar?a=A&b=B", filter.getRequestURL(request)); } finally { filter.destroy(); } } + @Test public void testGetToken() throws Exception { AuthenticationFilter filter = new AuthenticationFilter(); try { @@ -268,12 +273,13 @@ public class TestAuthenticationFilter extends TestCase { AuthenticationToken newToken = filter.getToken(request); - assertEquals(token.toString(), newToken.toString()); + Assert.assertEquals(token.toString(), newToken.toString()); } finally { filter.destroy(); } } + @Test public void testGetTokenExpired() throws Exception { AuthenticationFilter filter = new AuthenticationFilter(); try { @@ -300,17 +306,18 @@ public class TestAuthenticationFilter extends TestCase { try { filter.getToken(request); - fail(); + Assert.fail(); } catch (AuthenticationException ex) { // Expected } catch (Exception ex) { - fail(); + Assert.fail(); } } finally { filter.destroy(); } } + @Test public void testGetTokenInvalidType() throws Exception { AuthenticationFilter filter = new AuthenticationFilter(); try { @@ -338,17 +345,18 @@ public class TestAuthenticationFilter extends TestCase { try { filter.getToken(request); - fail(); + Assert.fail(); } catch (AuthenticationException ex) { // Expected } catch (Exception ex) { - fail(); + Assert.fail(); } } finally { filter.destroy(); } } + @Test public void testDoFilterNotAuthenticated() throws Exception { AuthenticationFilter filter = new AuthenticationFilter(); try { @@ -374,7 +382,7 @@ public class TestAuthenticationFilter extends TestCase { new Answer() { @Override public Object answer(InvocationOnMock invocation) throws Throwable { - fail(); + Assert.fail(); return null; } } @@ -468,27 +476,27 @@ public class TestAuthenticationFilter extends TestCase { Mockito.verify(response, Mockito.never()). addCookie(Mockito.any(Cookie.class)); } else { - assertNotNull(setCookie[0]); - assertEquals(AuthenticatedURL.AUTH_COOKIE, setCookie[0].getName()); - assertTrue(setCookie[0].getValue().contains("u=")); - assertTrue(setCookie[0].getValue().contains("p=")); - assertTrue(setCookie[0].getValue().contains("t=")); - assertTrue(setCookie[0].getValue().contains("e=")); - assertTrue(setCookie[0].getValue().contains("s=")); - assertTrue(calledDoFilter[0]); + Assert.assertNotNull(setCookie[0]); + Assert.assertEquals(AuthenticatedURL.AUTH_COOKIE, setCookie[0].getName()); + Assert.assertTrue(setCookie[0].getValue().contains("u=")); + Assert.assertTrue(setCookie[0].getValue().contains("p=")); + Assert.assertTrue(setCookie[0].getValue().contains("t=")); + Assert.assertTrue(setCookie[0].getValue().contains("e=")); + Assert.assertTrue(setCookie[0].getValue().contains("s=")); + Assert.assertTrue(calledDoFilter[0]); Signer signer = new Signer("secret".getBytes()); String value = signer.verifyAndExtract(setCookie[0].getValue()); AuthenticationToken token = AuthenticationToken.parse(value); - assertEquals(System.currentTimeMillis() + 1000 * 1000, + Assert.assertEquals(System.currentTimeMillis() + 1000 * 1000, token.getExpires(), 100); if (withDomainPath) { - assertEquals(".foo.com", setCookie[0].getDomain()); - assertEquals("/bar", setCookie[0].getPath()); + Assert.assertEquals(".foo.com", setCookie[0].getDomain()); + Assert.assertEquals("/bar", setCookie[0].getPath()); } else { - assertNull(setCookie[0].getDomain()); - assertNull(setCookie[0].getPath()); + Assert.assertNull(setCookie[0].getDomain()); + Assert.assertNull(setCookie[0].getPath()); } } } finally { @@ -496,22 +504,27 @@ public class TestAuthenticationFilter extends TestCase { } } + @Test public void testDoFilterAuthentication() throws Exception { _testDoFilterAuthentication(false, false, false); } + @Test public void testDoFilterAuthenticationImmediateExpiration() throws Exception { _testDoFilterAuthentication(false, false, true); } + @Test public void testDoFilterAuthenticationWithInvalidToken() throws Exception { _testDoFilterAuthentication(false, true, false); } + @Test public void testDoFilterAuthenticationWithDomainPath() throws Exception { _testDoFilterAuthentication(true, false, false); } + @Test public void testDoFilterAuthenticated() throws Exception { AuthenticationFilter filter = new AuthenticationFilter(); try { @@ -547,8 +560,8 @@ public class TestAuthenticationFilter extends TestCase { public Object answer(InvocationOnMock invocation) throws Throwable { Object[] args = invocation.getArguments(); HttpServletRequest request = (HttpServletRequest) args[0]; - assertEquals("u", request.getRemoteUser()); - assertEquals("p", request.getUserPrincipal().getName()); + Assert.assertEquals("u", request.getRemoteUser()); + Assert.assertEquals("p", request.getUserPrincipal().getName()); return null; } } @@ -561,6 +574,7 @@ public class TestAuthenticationFilter extends TestCase { } } + @Test public void testDoFilterAuthenticatedExpired() throws Exception { AuthenticationFilter filter = new AuthenticationFilter(); try { @@ -594,7 +608,7 @@ public class TestAuthenticationFilter extends TestCase { new Answer() { @Override public Object answer(InvocationOnMock invocation) throws Throwable { - fail(); + Assert.fail(); return null; } } @@ -616,15 +630,15 @@ public class TestAuthenticationFilter extends TestCase { Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED), Mockito.anyString()); - assertNotNull(setCookie[0]); - assertEquals(AuthenticatedURL.AUTH_COOKIE, setCookie[0].getName()); - assertEquals("", setCookie[0].getValue()); + Assert.assertNotNull(setCookie[0]); + Assert.assertEquals(AuthenticatedURL.AUTH_COOKIE, setCookie[0].getName()); + Assert.assertEquals("", setCookie[0].getValue()); } finally { filter.destroy(); } } - + @Test public void testDoFilterAuthenticatedInvalidType() throws Exception { AuthenticationFilter filter = new AuthenticationFilter(); try { @@ -658,7 +672,7 @@ public class TestAuthenticationFilter extends TestCase { new Answer() { @Override public Object answer(InvocationOnMock invocation) throws Throwable { - fail(); + Assert.fail(); return null; } } @@ -680,14 +694,15 @@ public class TestAuthenticationFilter extends TestCase { Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED), Mockito.anyString()); - assertNotNull(setCookie[0]); - assertEquals(AuthenticatedURL.AUTH_COOKIE, setCookie[0].getName()); - assertEquals("", setCookie[0].getValue()); + Assert.assertNotNull(setCookie[0]); + Assert.assertEquals(AuthenticatedURL.AUTH_COOKIE, setCookie[0].getName()); + Assert.assertEquals("", setCookie[0].getValue()); } finally { filter.destroy(); } } + @Test public void testManagementOperation() throws Exception { AuthenticationFilter filter = new AuthenticationFilter(); try { diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationToken.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationToken.java index bb5251f8339..c17c71033a7 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationToken.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationToken.java @@ -14,98 +14,104 @@ package org.apache.hadoop.security.authentication.server; import org.apache.hadoop.security.authentication.client.AuthenticationException; -import junit.framework.TestCase; +import org.junit.Assert; +import org.junit.Test; -public class TestAuthenticationToken extends TestCase { +public class TestAuthenticationToken { + @Test public void testAnonymous() { - assertNotNull(AuthenticationToken.ANONYMOUS); - assertEquals(null, AuthenticationToken.ANONYMOUS.getUserName()); - assertEquals(null, AuthenticationToken.ANONYMOUS.getName()); - assertEquals(null, AuthenticationToken.ANONYMOUS.getType()); - assertEquals(-1, AuthenticationToken.ANONYMOUS.getExpires()); - assertFalse(AuthenticationToken.ANONYMOUS.isExpired()); + Assert.assertNotNull(AuthenticationToken.ANONYMOUS); + Assert.assertEquals(null, AuthenticationToken.ANONYMOUS.getUserName()); + Assert.assertEquals(null, AuthenticationToken.ANONYMOUS.getName()); + Assert.assertEquals(null, AuthenticationToken.ANONYMOUS.getType()); + Assert.assertEquals(-1, AuthenticationToken.ANONYMOUS.getExpires()); + Assert.assertFalse(AuthenticationToken.ANONYMOUS.isExpired()); } + @Test public void testConstructor() throws Exception { try { new AuthenticationToken(null, "p", "t"); - fail(); + Assert.fail(); } catch (IllegalArgumentException ex) { // Expected } catch (Throwable ex) { - fail(); + Assert.fail(); } try { new AuthenticationToken("", "p", "t"); - fail(); + Assert.fail(); } catch (IllegalArgumentException ex) { // Expected } catch (Throwable ex) { - fail(); + Assert.fail(); } try { new AuthenticationToken("u", null, "t"); - fail(); + Assert.fail(); } catch (IllegalArgumentException ex) { // Expected } catch (Throwable ex) { - fail(); + Assert.fail(); } try { new AuthenticationToken("u", "", "t"); - fail(); + Assert.fail(); } catch (IllegalArgumentException ex) { // Expected } catch (Throwable ex) { - fail(); + Assert.fail(); } try { new AuthenticationToken("u", "p", null); - fail(); + Assert.fail(); } catch (IllegalArgumentException ex) { // Expected } catch (Throwable ex) { - fail(); + Assert.fail(); } try { new AuthenticationToken("u", "p", ""); - fail(); + Assert.fail(); } catch (IllegalArgumentException ex) { // Expected } catch (Throwable ex) { - fail(); + Assert.fail(); } new AuthenticationToken("u", "p", "t"); } + @Test public void testGetters() throws Exception { long expires = System.currentTimeMillis() + 50; AuthenticationToken token = new AuthenticationToken("u", "p", "t"); token.setExpires(expires); - assertEquals("u", token.getUserName()); - assertEquals("p", token.getName()); - assertEquals("t", token.getType()); - assertEquals(expires, token.getExpires()); - assertFalse(token.isExpired()); + Assert.assertEquals("u", token.getUserName()); + Assert.assertEquals("p", token.getName()); + Assert.assertEquals("t", token.getType()); + Assert.assertEquals(expires, token.getExpires()); + Assert.assertFalse(token.isExpired()); Thread.sleep(70); // +20 msec fuzz for timer granularity. - assertTrue(token.isExpired()); + Assert.assertTrue(token.isExpired()); } + @Test public void testToStringAndParse() throws Exception { long expires = System.currentTimeMillis() + 50; AuthenticationToken token = new AuthenticationToken("u", "p", "t"); token.setExpires(expires); String str = token.toString(); token = AuthenticationToken.parse(str); - assertEquals("p", token.getName()); - assertEquals("t", token.getType()); - assertEquals(expires, token.getExpires()); - assertFalse(token.isExpired()); + Assert.assertEquals("p", token.getName()); + Assert.assertEquals("t", token.getType()); + Assert.assertEquals(expires, token.getExpires()); + Assert.assertFalse(token.isExpired()); Thread.sleep(70); // +20 msec fuzz for timer granularity. - assertTrue(token.isExpired()); + Assert.assertTrue(token.isExpired()); } + @Test public void testParseInvalid() throws Exception { long expires = System.currentTimeMillis() + 50; AuthenticationToken token = new AuthenticationToken("u", "p", "t"); @@ -114,11 +120,11 @@ public class TestAuthenticationToken extends TestCase { str = str.substring(0, str.indexOf("e=")); try { AuthenticationToken.parse(str); - fail(); + Assert.fail(); } catch (AuthenticationException ex) { // Expected } catch (Exception ex) { - fail(); + Assert.fail(); } } } diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java index d198e58431d..ab793b7c61d 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java @@ -13,25 +13,31 @@ */ package org.apache.hadoop.security.authentication.server; +import org.apache.hadoop.minikdc.KerberosSecurityTestcase; import org.apache.hadoop.security.authentication.KerberosTestUtils; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.authentication.client.KerberosAuthenticator; -import junit.framework.TestCase; import org.apache.commons.codec.binary.Base64; import org.apache.hadoop.security.authentication.util.KerberosName; import org.apache.hadoop.security.authentication.util.KerberosUtil; import org.ietf.jgss.GSSContext; import org.ietf.jgss.GSSManager; import org.ietf.jgss.GSSName; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; import org.mockito.Mockito; import org.ietf.jgss.Oid; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; +import java.io.File; import java.util.Properties; import java.util.concurrent.Callable; -public class TestKerberosAuthenticationHandler extends TestCase { +public class TestKerberosAuthenticationHandler + extends KerberosSecurityTestcase { protected KerberosAuthenticationHandler handler; @@ -54,9 +60,16 @@ public class TestKerberosAuthenticationHandler extends TestCase { return props; } - @Override - protected void setUp() throws Exception { - super.setUp(); + @Before + public void setup() throws Exception { + // create keytab + File keytabFile = new File(KerberosTestUtils.getKeytabFile()); + String clientPrincipal = KerberosTestUtils.getClientPrincipal(); + String serverPrincipal = KerberosTestUtils.getServerPrincipal(); + clientPrincipal = clientPrincipal.substring(0, clientPrincipal.lastIndexOf("@")); + serverPrincipal = serverPrincipal.substring(0, serverPrincipal.lastIndexOf("@")); + getKdc().createPrincipal(keytabFile, clientPrincipal, serverPrincipal); + // handler handler = getNewAuthenticationHandler(); Properties props = getDefaultProperties(); try { @@ -67,18 +80,10 @@ public class TestKerberosAuthenticationHandler extends TestCase { } } - @Override - protected void tearDown() throws Exception { - if (handler != null) { - handler.destroy(); - handler = null; - } - super.tearDown(); - } - + @Test(timeout=60000) public void testNameRules() throws Exception { KerberosName kn = new KerberosName(KerberosTestUtils.getServerPrincipal()); - assertEquals(KerberosTestUtils.getRealm(), kn.getRealm()); + Assert.assertEquals(KerberosTestUtils.getRealm(), kn.getRealm()); //destroy handler created in setUp() handler.destroy(); @@ -93,30 +98,32 @@ public class TestKerberosAuthenticationHandler extends TestCase { } catch (Exception ex) { } kn = new KerberosName("bar@BAR"); - assertEquals("bar", kn.getShortName()); + Assert.assertEquals("bar", kn.getShortName()); kn = new KerberosName("bar@FOO"); try { kn.getShortName(); - fail(); + Assert.fail(); } catch (Exception ex) { } } - + + @Test(timeout=60000) public void testInit() throws Exception { - assertEquals(KerberosTestUtils.getServerPrincipal(), handler.getPrincipal()); - assertEquals(KerberosTestUtils.getKeytabFile(), handler.getKeytab()); + Assert.assertEquals(KerberosTestUtils.getServerPrincipal(), handler.getPrincipal()); + Assert.assertEquals(KerberosTestUtils.getKeytabFile(), handler.getKeytab()); } + @Test(timeout=60000) public void testType() throws Exception { - assertEquals(getExpectedType(), handler.getType()); + Assert.assertEquals(getExpectedType(), handler.getType()); } public void testRequestWithoutAuthorization() throws Exception { HttpServletRequest request = Mockito.mock(HttpServletRequest.class); HttpServletResponse response = Mockito.mock(HttpServletResponse.class); - assertNull(handler.authenticate(request, response)); + Assert.assertNull(handler.authenticate(request, response)); Mockito.verify(response).setHeader(KerberosAuthenticator.WWW_AUTHENTICATE, KerberosAuthenticator.NEGOTIATE); Mockito.verify(response).setStatus(HttpServletResponse.SC_UNAUTHORIZED); } @@ -126,11 +133,12 @@ public class TestKerberosAuthenticationHandler extends TestCase { HttpServletResponse response = Mockito.mock(HttpServletResponse.class); Mockito.when(request.getHeader(KerberosAuthenticator.AUTHORIZATION)).thenReturn("invalid"); - assertNull(handler.authenticate(request, response)); + Assert.assertNull(handler.authenticate(request, response)); Mockito.verify(response).setHeader(KerberosAuthenticator.WWW_AUTHENTICATE, KerberosAuthenticator.NEGOTIATE); Mockito.verify(response).setStatus(HttpServletResponse.SC_UNAUTHORIZED); } + @Test(timeout=60000) public void testRequestWithIncompleteAuthorization() throws Exception { HttpServletRequest request = Mockito.mock(HttpServletRequest.class); HttpServletResponse response = Mockito.mock(HttpServletResponse.class); @@ -139,15 +147,14 @@ public class TestKerberosAuthenticationHandler extends TestCase { .thenReturn(KerberosAuthenticator.NEGOTIATE); try { handler.authenticate(request, response); - fail(); + Assert.fail(); } catch (AuthenticationException ex) { // Expected } catch (Exception ex) { - fail(); + Assert.fail(); } } - public void testRequestWithAuthorization() throws Exception { String token = KerberosTestUtils.doAsClient(new Callable() { @Override @@ -191,9 +198,9 @@ public class TestKerberosAuthenticationHandler extends TestCase { Mockito.matches(KerberosAuthenticator.NEGOTIATE + " .*")); Mockito.verify(response).setStatus(HttpServletResponse.SC_OK); - assertEquals(KerberosTestUtils.getClientPrincipal(), authToken.getName()); - assertTrue(KerberosTestUtils.getClientPrincipal().startsWith(authToken.getUserName())); - assertEquals(getExpectedType(), authToken.getType()); + Assert.assertEquals(KerberosTestUtils.getClientPrincipal(), authToken.getName()); + Assert.assertTrue(KerberosTestUtils.getClientPrincipal().startsWith(authToken.getUserName())); + Assert.assertEquals(getExpectedType(), authToken.getType()); } else { Mockito.verify(response).setHeader(Mockito.eq(KerberosAuthenticator.WWW_AUTHENTICATE), Mockito.matches(KerberosAuthenticator.NEGOTIATE + " .*")); @@ -213,12 +220,19 @@ public class TestKerberosAuthenticationHandler extends TestCase { try { handler.authenticate(request, response); - fail(); + Assert.fail(); } catch (AuthenticationException ex) { // Expected } catch (Exception ex) { - fail(); + Assert.fail(); } } + @After + public void tearDown() throws Exception { + if (handler != null) { + handler.destroy(); + handler = null; + } + } } diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestPseudoAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestPseudoAuthenticationHandler.java index dbc2c368336..da7eda7bc8e 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestPseudoAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestPseudoAuthenticationHandler.java @@ -14,33 +14,37 @@ package org.apache.hadoop.security.authentication.server; import org.apache.hadoop.security.authentication.client.AuthenticationException; -import junit.framework.TestCase; import org.apache.hadoop.security.authentication.client.PseudoAuthenticator; +import org.junit.Assert; +import org.junit.Test; import org.mockito.Mockito; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import java.util.Properties; -public class TestPseudoAuthenticationHandler extends TestCase { +public class TestPseudoAuthenticationHandler { + @Test public void testInit() throws Exception { PseudoAuthenticationHandler handler = new PseudoAuthenticationHandler(); try { Properties props = new Properties(); props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "false"); handler.init(props); - assertEquals(false, handler.getAcceptAnonymous()); + Assert.assertEquals(false, handler.getAcceptAnonymous()); } finally { handler.destroy(); } } + @Test public void testType() throws Exception { PseudoAuthenticationHandler handler = new PseudoAuthenticationHandler(); - assertEquals(PseudoAuthenticationHandler.TYPE, handler.getType()); + Assert.assertEquals(PseudoAuthenticationHandler.TYPE, handler.getType()); } + @Test public void testAnonymousOn() throws Exception { PseudoAuthenticationHandler handler = new PseudoAuthenticationHandler(); try { @@ -53,12 +57,13 @@ public class TestPseudoAuthenticationHandler extends TestCase { AuthenticationToken token = handler.authenticate(request, response); - assertEquals(AuthenticationToken.ANONYMOUS, token); + Assert.assertEquals(AuthenticationToken.ANONYMOUS, token); } finally { handler.destroy(); } } + @Test public void testAnonymousOff() throws Exception { PseudoAuthenticationHandler handler = new PseudoAuthenticationHandler(); try { @@ -70,11 +75,11 @@ public class TestPseudoAuthenticationHandler extends TestCase { HttpServletResponse response = Mockito.mock(HttpServletResponse.class); handler.authenticate(request, response); - fail(); + Assert.fail(); } catch (AuthenticationException ex) { // Expected } catch (Exception ex) { - fail(); + Assert.fail(); } finally { handler.destroy(); } @@ -93,19 +98,21 @@ public class TestPseudoAuthenticationHandler extends TestCase { AuthenticationToken token = handler.authenticate(request, response); - assertNotNull(token); - assertEquals("user", token.getUserName()); - assertEquals("user", token.getName()); - assertEquals(PseudoAuthenticationHandler.TYPE, token.getType()); + Assert.assertNotNull(token); + Assert.assertEquals("user", token.getUserName()); + Assert.assertEquals("user", token.getName()); + Assert.assertEquals(PseudoAuthenticationHandler.TYPE, token.getType()); } finally { handler.destroy(); } } + @Test public void testUserNameAnonymousOff() throws Exception { _testUserName(false); } + @Test public void testUserNameAnonymousOn() throws Exception { _testUserName(true); } diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java index b6c0b0fb2ec..e82a0a6c182 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java @@ -21,14 +21,19 @@ package org.apache.hadoop.security.authentication.util; import java.io.IOException; import org.apache.hadoop.security.authentication.KerberosTestUtils; +import org.junit.After; import org.junit.Before; import org.junit.Test; -import static org.junit.Assert.*; + +import org.junit.Assert; public class TestKerberosName { @Before public void setUp() throws Exception { + System.setProperty("java.security.krb5.realm", KerberosTestUtils.getRealm()); + System.setProperty("java.security.krb5.kdc", "localhost:88"); + String rules = "RULE:[1:$1@$0](.*@YAHOO\\.COM)s/@.*//\n" + "RULE:[2:$1](johndoe)s/^.*$/guest/\n" + @@ -44,7 +49,7 @@ public class TestKerberosName { KerberosName nm = new KerberosName(from); String simple = nm.getShortName(); System.out.println("to " + simple); - assertEquals("short name incorrect", to, simple); + Assert.assertEquals("short name incorrect", to, simple); } @Test @@ -61,7 +66,7 @@ public class TestKerberosName { System.out.println("Checking " + name + " to ensure it is bad."); try { new KerberosName(name); - fail("didn't get exception for " + name); + Assert.fail("didn't get exception for " + name); } catch (IllegalArgumentException iae) { // PASS } @@ -72,7 +77,7 @@ public class TestKerberosName { KerberosName nm = new KerberosName(from); try { nm.getShortName(); - fail("didn't get exception for " + from); + Assert.fail("didn't get exception for " + from); } catch (IOException ie) { // PASS } @@ -85,4 +90,10 @@ public class TestKerberosName { checkBadTranslation("foo@ACME.COM"); checkBadTranslation("root/joe@FOO.COM"); } + + @After + public void clear() { + System.clearProperty("java.security.krb5.realm"); + System.clearProperty("java.security.krb5.kdc"); + } } diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosUtil.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosUtil.java index 4c91e2bf13a..7da78aa20e0 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosUtil.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosUtil.java @@ -16,11 +16,10 @@ */ package org.apache.hadoop.security.authentication.util; -import static org.junit.Assert.*; +import org.junit.Assert; import java.io.IOException; -import org.apache.hadoop.security.authentication.util.KerberosUtil; import org.junit.Test; public class TestKerberosUtil { @@ -32,23 +31,23 @@ public class TestKerberosUtil { String testHost = "FooBar"; // send null hostname - assertEquals("When no hostname is sent", + Assert.assertEquals("When no hostname is sent", service + "/" + localHostname.toLowerCase(), KerberosUtil.getServicePrincipal(service, null)); // send empty hostname - assertEquals("When empty hostname is sent", + Assert.assertEquals("When empty hostname is sent", service + "/" + localHostname.toLowerCase(), KerberosUtil.getServicePrincipal(service, "")); // send 0.0.0.0 hostname - assertEquals("When 0.0.0.0 hostname is sent", + Assert.assertEquals("When 0.0.0.0 hostname is sent", service + "/" + localHostname.toLowerCase(), KerberosUtil.getServicePrincipal(service, "0.0.0.0")); // send uppercase hostname - assertEquals("When uppercase hostname is sent", + Assert.assertEquals("When uppercase hostname is sent", service + "/" + testHost.toLowerCase(), KerberosUtil.getServicePrincipal(service, testHost)); // send lowercase hostname - assertEquals("When lowercase hostname is sent", + Assert.assertEquals("When lowercase hostname is sent", service + "/" + testHost.toLowerCase(), KerberosUtil.getServicePrincipal(service, testHost.toLowerCase())); } diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestSigner.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestSigner.java index 9b3d1a2a2a6..e7cd0e1a8ed 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestSigner.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestSigner.java @@ -13,68 +13,75 @@ */ package org.apache.hadoop.security.authentication.util; -import junit.framework.TestCase; +import org.junit.Assert; +import org.junit.Test; -public class TestSigner extends TestCase { +public class TestSigner { + @Test public void testNoSecret() throws Exception { try { new Signer(null); - fail(); + Assert.fail(); } catch (IllegalArgumentException ex) { } } + @Test public void testNullAndEmptyString() throws Exception { Signer signer = new Signer("secret".getBytes()); try { signer.sign(null); - fail(); + Assert.fail(); } catch (IllegalArgumentException ex) { // Expected } catch (Throwable ex) { - fail(); + Assert.fail(); } try { signer.sign(""); - fail(); + Assert.fail(); } catch (IllegalArgumentException ex) { // Expected } catch (Throwable ex) { - fail(); + Assert.fail(); } } + @Test public void testSignature() throws Exception { Signer signer = new Signer("secret".getBytes()); String s1 = signer.sign("ok"); String s2 = signer.sign("ok"); String s3 = signer.sign("wrong"); - assertEquals(s1, s2); - assertNotSame(s1, s3); + Assert.assertEquals(s1, s2); + Assert.assertNotSame(s1, s3); } + @Test public void testVerify() throws Exception { Signer signer = new Signer("secret".getBytes()); String t = "test"; String s = signer.sign(t); String e = signer.verifyAndExtract(s); - assertEquals(t, e); + Assert.assertEquals(t, e); } + @Test public void testInvalidSignedText() throws Exception { Signer signer = new Signer("secret".getBytes()); try { signer.verifyAndExtract("test"); - fail(); + Assert.fail(); } catch (SignerException ex) { // Expected } catch (Throwable ex) { - fail(); + Assert.fail(); } } + @Test public void testTampering() throws Exception { Signer signer = new Signer("secret".getBytes()); String t = "test"; @@ -82,12 +89,11 @@ public class TestSigner extends TestCase { s += "x"; try { signer.verifyAndExtract(s); - fail(); + Assert.fail(); } catch (SignerException ex) { // Expected } catch (Throwable ex) { - fail(); + Assert.fail(); } } - } diff --git a/hadoop-common-project/hadoop-auth/src/test/resources/krb5.conf b/hadoop-common-project/hadoop-auth/src/test/resources/krb5.conf deleted file mode 100644 index c9f956705fa..00000000000 --- a/hadoop-common-project/hadoop-auth/src/test/resources/krb5.conf +++ /dev/null @@ -1,28 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -[libdefaults] - default_realm = ${kerberos.realm} - udp_preference_limit = 1 - extra_addresses = 127.0.0.1 -[realms] - ${kerberos.realm} = { - admin_server = localhost:88 - kdc = localhost:88 - } -[domain_realm] - localhost = ${kerberos.realm} diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index db23e0640df..c14c615fa35 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -319,6 +319,9 @@ Release 2.3.0 - UNRELEASED HADOOP-9860. Remove class HackedKeytab and HackedKeytabEncoder from hadoop-minikdc once jira DIRSERVER-1882 solved. (ywskycn via tucu) + HADOOP-9866. convert hadoop-auth testcases requiring kerberos to + use minikdc. (ywskycn via tucu) + OPTIMIZATIONS HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn) From 172d0cef6909779f6f167fa62439f0553d2f69b1 Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Mon, 19 Aug 2013 23:49:27 +0000 Subject: [PATCH 43/53] HADOOP-9487 Deprecation warnings in Configuration should go to their own log or otherwise be suppressible git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515672 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../hadoop-common/src/main/conf/log4j.properties | 10 +++++++++- .../java/org/apache/hadoop/conf/Configuration.java | 9 ++++++++- 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index c14c615fa35..ca7be334c23 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -322,6 +322,9 @@ Release 2.3.0 - UNRELEASED HADOOP-9866. convert hadoop-auth testcases requiring kerberos to use minikdc. (ywskycn via tucu) + HADOOP-9487 Deprecation warnings in Configuration should go to their + own log or otherwise be suppressible (Chu Tong via stevel) + OPTIMIZATIONS HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn) diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties index aef773a5f79..d436db9df3b 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties +++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties @@ -130,6 +130,13 @@ log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd +# +# hadoop configuration logging +# + +# Uncomment the following line to turn off configuration deprecation warnings. +# log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN + # # hdfs audit logging # @@ -231,4 +238,5 @@ log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n #log4j.appender.HSAUDIT.File=${hadoop.log.dir}/hs-audit.log #log4j.appender.HSAUDIT.layout=org.apache.log4j.PatternLayout #log4j.appender.HSAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n -#log4j.appender.HSAUDIT.DatePattern=.yyyy-MM-dd \ No newline at end of file +#log4j.appender.HSAUDIT.DatePattern=.yyyy-MM-dd + diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java index 9bc7472da8a..71d5ce4320a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java @@ -153,6 +153,10 @@ import com.google.common.base.Preconditions; * will be resolved to another property in this Configuration, while * ${user.name} would then ordinarily be resolved to the value * of the System property with that name. + * By default, warnings will be given to any deprecated configuration + * parameters and these are suppressible by configuring + * log4j.logger.org.apache.hadoop.conf.Configuration.deprecation in + * log4j.properties file. */ @InterfaceAudience.Public @InterfaceStability.Stable @@ -161,6 +165,9 @@ public class Configuration implements Iterable>, private static final Log LOG = LogFactory.getLog(Configuration.class); + private static final Log LOG_DEPRECATION = + LogFactory.getLog("org.apache.hadoop.conf.Configuration.deprecation"); + private boolean quietmode = true; private static class Resource { @@ -836,7 +843,7 @@ public class Configuration implements Iterable>, private void warnOnceIfDeprecated(String name) { DeprecatedKeyInfo keyInfo = deprecatedKeyMap.get(name); if (keyInfo != null && !keyInfo.accessed) { - LOG.warn(keyInfo.getWarningMessage(name)); + LOG_DEPRECATION.info(keyInfo.getWarningMessage(name)); } } From f6a1f4d1e0c60bceccc82d546b85abb25f1bcd88 Mon Sep 17 00:00:00 2001 From: Sanford Ryza Date: Tue, 20 Aug 2013 06:24:22 +0000 Subject: [PATCH 44/53] HADOOP-9879. Move the version info of zookeeper dependencies to hadoop-project/pom (Karthik Kambatla via Sandy Ryza) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515711 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ hadoop-common-project/hadoop-common/pom.xml | 2 -- hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml | 1 - hadoop-hdfs-project/hadoop-hdfs/pom.xml | 1 - hadoop-project/pom.xml | 7 +++++++ 5 files changed, 10 insertions(+), 4 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index ca7be334c23..e32580fe7a3 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -378,6 +378,9 @@ Release 2.1.1-beta - UNRELEASED HADOOP-9802. Support Snappy codec on Windows. (cnauroth) + HADOOP-9879. Move the version info of zookeeper dependencies to + hadoop-project/pom (Karthik Kambatla via Sandy Ryza) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index a7462ea5a3b..af5a7f4ee95 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -217,7 +217,6 @@ org.apache.zookeeper zookeeper - 3.4.2 jline @@ -245,7 +244,6 @@ org.apache.zookeeper zookeeper - 3.4.2 test-jar test diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml index cb1906d9c83..945bb658385 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml @@ -77,7 +77,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.zookeeper zookeeper - 3.4.2 test-jar test diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index f0e3ac882b5..59abffa0f96 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -63,7 +63,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.zookeeper zookeeper - 3.4.2 test-jar test diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index e1bb833c630..8dee23a467e 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -663,6 +663,13 @@ + + org.apache.zookeeper + zookeeper + 3.4.2 + test-jar + test + org.apache.bookkeeper bookkeeper-server From f7ca7ec4c9eb3ec65f0d365b41ce1b5e8923eb8a Mon Sep 17 00:00:00 2001 From: Jason Darrell Lowe Date: Tue, 20 Aug 2013 16:41:16 +0000 Subject: [PATCH 45/53] MAPREDUCE-5001. LocalJobRunner has race condition resulting in job failures. Contributed by Sandy Ryza git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515863 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 6 ++++++ .../java/org/apache/hadoop/mapreduce/Cluster.java | 14 +++++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 7125f28e65c..00848a2ddc2 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -223,6 +223,9 @@ Release 2.1.1-beta - UNRELEASED MAPREDUCE-5454. TestDFSIO fails intermittently on JDK7 (Karthik Kambatla via Sandy Ryza) + MAPREDUCE-5001. LocalJobRunner has race condition resulting in job + failures (Sandy Ryza via jlowe) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES @@ -1298,6 +1301,9 @@ Release 0.23.10 - UNRELEASED MAPREDUCE-5440. TestCopyCommitter Fails on JDK7 (Robert Parker via jlowe) + MAPREDUCE-5001. LocalJobRunner has race condition resulting in job + failures (Sandy Ryza via jlowe) + Release 0.23.9 - 2013-07-08 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java index e93f2736044..2fcc0461c39 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java @@ -18,6 +18,7 @@ package org.apache.hadoop.mapreduce; +import java.io.FileNotFoundException; import java.io.IOException; import java.net.InetSocketAddress; import java.security.PrivilegedExceptionAction; @@ -181,7 +182,18 @@ public class Cluster { public Job getJob(JobID jobId) throws IOException, InterruptedException { JobStatus status = client.getJobStatus(jobId); if (status != null) { - return Job.getInstance(this, status, new JobConf(status.getJobFile())); + JobConf conf; + try { + conf = new JobConf(status.getJobFile()); + } catch (RuntimeException ex) { + // If job file doesn't exist it means we can't find the job + if (ex.getCause() instanceof FileNotFoundException) { + return null; + } else { + throw ex; + } + } + return Job.getInstance(this, status, conf); } return null; } From 3015429368e139bf54a697b93b692caa3629164b Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Tue, 20 Aug 2013 17:18:53 +0000 Subject: [PATCH 46/53] HDFS-2933. Improve DataNode Web UI Index Page. (Vivek Ganesan via Arpit Agarwal) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515890 13f79535-47bb-0310-9956-ffa450edef68 --- .../server/datanode/DatanodeJspHelper.java | 28 +++++++++ .../main/webapps/datanode/dataNodeHome.jsp | 58 +++++++++++++++++++ .../src/main/webapps/datanode/index.html | 35 +++++++++++ .../src/main/webapps/hdfs/index.html | 4 +- 4 files changed, 123 insertions(+), 2 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/dataNodeHome.jsp create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java index 80732f0d304..639468bbc75 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java @@ -19,7 +19,9 @@ package org.apache.hadoop.hdfs.server.datanode; import java.io.File; import java.io.IOException; +import java.net.InetAddress; import java.net.InetSocketAddress; +import java.net.URI; import java.net.URL; import java.net.URLEncoder; import java.security.PrivilegedExceptionAction; @@ -27,6 +29,7 @@ import java.text.SimpleDateFormat; import java.util.Date; import java.util.List; +import javax.servlet.ServletContext; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.servlet.jsp.JspWriter; @@ -36,6 +39,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -43,6 +47,9 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.server.common.JspHelper; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer; import org.apache.hadoop.http.HtmlQuoting; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.net.NetUtils; @@ -50,6 +57,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.ServletUtil; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.VersionInfo; @InterfaceAudience.Private public class DatanodeJspHelper { @@ -712,4 +720,24 @@ public class DatanodeJspHelper { final String nnAddr = request.getParameter(JspHelper.NAMENODE_ADDRESS); return getDFSClient(ugi, nnAddr, conf); } + + /** Return a table containing version information. */ + public static String getVersionTable(ServletContext context) { + StringBuilder sb = new StringBuilder(); + final DataNode dataNode = (DataNode) context.getAttribute("datanode"); + sb.append("
"); + sb.append("\n" + "\n \n \n
Version:"); + sb.append(VersionInfo.getVersion() + ", " + VersionInfo.getRevision()); + sb.append("
Compiled:" + + VersionInfo.getDate()); + sb.append(" by " + VersionInfo.getUser() + " from " + + VersionInfo.getBranch()); + if (dataNode != null) { + sb.append("
Cluster ID:" + + dataNode.getClusterId()); + } + sb.append("
"); + return sb.toString(); + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/dataNodeHome.jsp b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/dataNodeHome.jsp new file mode 100644 index 00000000000..56a4f50dd1b --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/dataNodeHome.jsp @@ -0,0 +1,58 @@ +<% +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +%> +<%@page import="org.apache.hadoop.hdfs.tools.GetConf"%> +<%@page import="org.apache.hadoop.hdfs.server.datanode.DatanodeJspHelper"%> +<%@page import="org.apache.hadoop.hdfs.server.datanode.DataNode"%> +<%@ page + contentType="text/html; charset=UTF-8" + import="org.apache.hadoop.util.ServletUtil" +%> +<%! + //for java.io.Serializable + private static final long serialVersionUID = 1L; +%> +<% + DataNode dataNode = (DataNode)getServletContext().getAttribute("datanode"); + String state = dataNode.isDatanodeUp()?"active":"inactive"; + String dataNodeLabel = dataNode.getDisplayName(); +%> + + + + + +Hadoop DataNode <%=dataNodeLabel%> + + +

DataNode '<%=dataNodeLabel%>' (<%=state%>)

+<%= DatanodeJspHelper.getVersionTable(getServletContext()) %> +
+DataNode Logs +
+View/Set Log Level +
+Metrics +
+Configuration +
+Block Scanner Report +<% +out.println(ServletUtil.htmlFooter()); +%> diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html new file mode 100644 index 00000000000..eaaa2228e3d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html @@ -0,0 +1,35 @@ + + + + +Hadoop Administration + + + + +

Hadoop Administration

+ + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html index 648da4ae2a8..7fc136b0c85 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html @@ -1,5 +1,3 @@ - - + + Hadoop Administration From 00afcdfd3904e704c1b277e912d2eea26ef27b98 Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Tue, 20 Aug 2013 17:26:24 +0000 Subject: [PATCH 47/53] HDFS-2933. Update CHANGES.txt git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515892 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 7d8dc365f86..15e7a1b367e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -316,6 +316,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5047. Supress logging of full stack trace of quota and lease exceptions. (Robert Parker via kihwal) + HDFS-2933. Improve DataNode Web UI Index Page. (Vivek Ganesan via + Arpit Agarwal) + OPTIMIZATIONS BUG FIXES From 9718fd4c7245851bc678ee05e66ba6a98138a9cc Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Tue, 20 Aug 2013 17:33:07 +0000 Subject: [PATCH 48/53] HDFS-5111. Remove duplicated error message for snapshot commands when processing invalid arguments. Contributed by Jing Zhao. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515895 13f79535-47bb-0310-9956-ffa450edef68 --- .../java/org/apache/hadoop/fs/shell/SnapshotCommands.java | 6 +++--- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java index ed687c14a61..570e442c282 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java @@ -79,7 +79,7 @@ class SnapshotCommands extends FsCommand { protected void processArguments(LinkedList items) throws IOException { super.processArguments(items); - if (exitCode != 0) { // check for error collecting paths + if (numErrors != 0) { // check for error collecting paths return; } assert(items.size() == 1); @@ -119,7 +119,7 @@ class SnapshotCommands extends FsCommand { protected void processArguments(LinkedList items) throws IOException { super.processArguments(items); - if (exitCode != 0) { // check for error collecting paths + if (numErrors != 0) { // check for error collecting paths return; } assert (items.size() == 1); @@ -160,7 +160,7 @@ class SnapshotCommands extends FsCommand { protected void processArguments(LinkedList items) throws IOException { super.processArguments(items); - if (exitCode != 0) { // check for error collecting paths + if (numErrors != 0) { // check for error collecting paths return; } Preconditions.checkArgument(items.size() == 1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 15e7a1b367e..b2b285ecb05 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -319,6 +319,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-2933. Improve DataNode Web UI Index Page. (Vivek Ganesan via Arpit Agarwal) + HDFS-5111. Remove duplicated error message for snapshot commands when + processing invalid arguments. (jing9) + OPTIMIZATIONS BUG FIXES From b7a6c5ebb48275b3512bdbf201e0e8873b6d77b6 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Tue, 20 Aug 2013 20:19:07 +0000 Subject: [PATCH 49/53] HADOOP-9877. Fix listing of snapshot directories in globStatus. (Binglin Chang via Andrew Wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515955 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 2 + .../java/org/apache/hadoop/fs/Globber.java | 56 ++++++++++++++++--- .../fs/FileContextMainOperationsBaseTest.java | 15 +++++ .../hadoop/fs/TestFsShellReturnCode.java | 20 +++++++ .../fs/TestHDFSFileContextMainOperations.java | 11 ++++ 5 files changed, 96 insertions(+), 8 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index e32580fe7a3..02ff55b5ab3 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -349,6 +349,8 @@ Release 2.3.0 - UNRELEASED HADOOP-9865. FileContext#globStatus has a regression with respect to relative path. (Chuan Lin via Colin Patrick McCabe) + HADOOP-9877. Fix listing of snapshot directories in globStatus. + (Binglin Chang via Andrew Wang) Release 2.1.1-beta - UNRELEASED diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java index 378311a71a2..57ad45e81d4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java @@ -62,6 +62,18 @@ class Globber { } } + private FileStatus getFileLinkStatus(Path path) { + try { + if (fs != null) { + return fs.getFileLinkStatus(path); + } else { + return fc.getFileLinkStatus(path); + } + } catch (IOException e) { + return null; + } + } + private FileStatus[] listStatus(Path path) { try { if (fs != null) { @@ -122,6 +134,18 @@ class Globber { return authority ; } + /** + * The glob filter builds a regexp per path component. If the component + * does not contain a shell metachar, then it falls back to appending the + * raw string to the list of built up paths. This raw path needs to have + * the quoting removed. Ie. convert all occurrences of "\X" to "X" + * @param name of the path component + * @return the unquoted path component + */ + private static String unquotePathComponent(String name) { + return name.replaceAll("\\\\(.)", "$1"); + } + public FileStatus[] glob() throws IOException { // First we get the scheme and authority of the pattern that was passed // in. @@ -176,14 +200,30 @@ class Globber { resolvedCandidate.isDirectory() == false) { continue; } - FileStatus[] children = listStatus(candidate.getPath()); - for (FileStatus child : children) { - // Set the child path based on the parent path. - // This keeps the symlinks in our path. - child.setPath(new Path(candidate.getPath(), - child.getPath().getName())); - if (globFilter.accept(child.getPath())) { - newCandidates.add(child); + // For components without pattern, we get its FileStatus directly + // using getFileLinkStatus for two reasons: + // 1. It should be faster to only get FileStatus needed rather than + // get all children. + // 2. Some special filesystem directories (e.g. HDFS snapshot + // directories) are not returned by listStatus, but do exist if + // checked explicitly via getFileLinkStatus. + if (globFilter.hasPattern()) { + FileStatus[] children = listStatus(candidate.getPath()); + for (FileStatus child : children) { + // Set the child path based on the parent path. + // This keeps the symlinks in our path. + child.setPath(new Path(candidate.getPath(), + child.getPath().getName())); + if (globFilter.accept(child.getPath())) { + newCandidates.add(child); + } + } + } else { + Path p = new Path(candidate.getPath(), unquotePathComponent(component)); + FileStatus s = getFileLinkStatus(p); + if (s != null) { + s.setPath(p); + newCandidates.add(s); } } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java index 354f7aabfd6..877a491bf9b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.permission.FsPermission; import org.junit.After; import org.junit.Assert; +import org.junit.Assume; import org.junit.Before; import org.junit.Test; @@ -632,6 +633,20 @@ public abstract class FileContextMainOperationsBaseTest { filteredPaths)); } + protected Path getHiddenPathForTest() { + return null; + } + + @Test + public void testGlobStatusFilterWithHiddenPathTrivialFilter() + throws Exception { + Path hidden = getHiddenPathForTest(); + Assume.assumeNotNull(hidden); + FileStatus[] filteredPaths = fc.util().globStatus(hidden, DEFAULT_FILTER); + Assert.assertNotNull(filteredPaths); + Assert.assertEquals(1, filteredPaths.length); + } + @Test public void testWriteReadAndDeleteEmptyFile() throws Exception { writeReadAndDelete(0); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java index dcc19df3d4e..2fff29e38d4 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java @@ -517,6 +517,26 @@ public class TestFsShellReturnCode { } return stat; } + + @Override + public FileStatus getFileLinkStatus(Path p) throws IOException { + String f = makeQualified(p).toString(); + FileStatus stat = super.getFileLinkStatus(p); + + stat.getPermission(); + if (owners.containsKey(f)) { + stat.setOwner("STUB-"+owners.get(f)); + } else { + stat.setOwner("REAL-"+stat.getOwner()); + } + if (groups.containsKey(f)) { + stat.setGroup("STUB-"+groups.get(f)); + } else { + stat.setGroup("REAL-"+stat.getGroup()); + } + return stat; + } + } static class MyFsShell extends FsShell { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java index 6388bdd9e7a..8f5f14db614 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java @@ -59,6 +59,9 @@ public class TestHDFSFileContextMainOperations extends defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName())); fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true); + // Make defaultWorkingDirectory snapshottable to enable + // testGlobStatusFilterWithHiddenPathTrivialFilter + cluster.getFileSystem().allowSnapshot(defaultWorkingDirectory); } private static void restartCluster() throws IOException, LoginException { @@ -73,6 +76,9 @@ public class TestHDFSFileContextMainOperations extends defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName())); fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true); + // Make defaultWorkingDirectory snapshottable to enable + // testGlobStatusFilterWithHiddenPathTrivialFilter + cluster.getFileSystem().allowSnapshot(defaultWorkingDirectory); } @AfterClass @@ -92,6 +98,11 @@ public class TestHDFSFileContextMainOperations extends super.tearDown(); } + @Override + protected Path getHiddenPathForTest() { + return new Path(defaultWorkingDirectory, ".snapshot"); + } + @Override protected Path getDefaultWorkingDirectory() { return defaultWorkingDirectory; From 2cd6064195da817d2c34b64f19d4c6d630efbc4a Mon Sep 17 00:00:00 2001 From: Jonathan Turner Eagles Date: Tue, 20 Aug 2013 21:53:38 +0000 Subject: [PATCH 50/53] HADOOP-9686. Easy access to final parameters in Configuration (Jason Lowe via jeagles) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515984 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 +++ .../org/apache/hadoop/conf/Configuration.java | 9 +++++++++ .../apache/hadoop/conf/TestConfiguration.java | 18 +++++++++++++++++- 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 02ff55b5ab3..5ccfc1d56ef 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -2074,6 +2074,9 @@ Release 0.23.10 - UNRELEASED IMPROVEMENTS + HADOOP-9686. Easy access to final parameters in Configuration (Jason Lowe + via jeagles) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java index 71d5ce4320a..8a4cc00f56a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java @@ -1918,6 +1918,15 @@ public class Configuration implements Iterable>, } } + /** + * Get the set of parameters marked final. + * + * @return final parameter set. + */ + public Set getFinalParameters() { + return new HashSet(finalParameters); + } + protected synchronized Properties getProps() { if (properties == null) { properties = new Properties(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java index 3bb211c54eb..87ebb61f49e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java @@ -1272,7 +1272,23 @@ public class TestConfiguration extends TestCase { Class clazz = config.getClassByNameOrNull("java.lang.Object"); assertNotNull(clazz); } - + + public void testGetFinalParameters() throws Exception { + out=new BufferedWriter(new FileWriter(CONFIG)); + startConfig(); + declareProperty("my.var", "x", "x", true); + endConfig(); + Path fileResource = new Path(CONFIG); + Configuration conf = new Configuration(); + Set finalParameters = conf.getFinalParameters(); + assertFalse("my.var already exists", finalParameters.contains("my.var")); + conf.addResource(fileResource); + assertEquals("my.var is undefined", "x", conf.get("my.var")); + assertFalse("finalparams not copied", finalParameters.contains("my.var")); + finalParameters = conf.getFinalParameters(); + assertTrue("my.var is not final", finalParameters.contains("my.var")); + } + public static void main(String[] argv) throws Exception { junit.textui.TestRunner.main(new String[]{ TestConfiguration.class.getName() From 5e68bc4cd62337e91e4eafe4d8c3c4692c0e30ea Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Tue, 20 Aug 2013 22:15:22 +0000 Subject: [PATCH 51/53] HDFS-4594. WebHDFS open sets Content-Length header to what is specified by length parameter rather than how much data is actually returned. Contributed by Chris Nauroth. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515989 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../web/resources/DatanodeWebHdfsMethods.java | 5 +- .../web/TestWebHdfsFileSystemContract.java | 102 ++++++++++++++++++ 3 files changed, 108 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index b2b285ecb05..6a6e094a526 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -372,6 +372,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5106. TestDatanodeBlockScanner fails on Windows due to incorrect path format. (Chuan Liu via cnauroth) + HDFS-4594. WebHDFS open sets Content-Length header to what is specified by + length parameter rather than how much data is actually returned. (cnauroth) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java index 262b66f9bcf..973d0916b90 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java @@ -410,8 +410,9 @@ public class DatanodeWebHdfsMethods { throw ioe; } - final long n = length.getValue() != null? length.getValue() - : in.getVisibleLength() - offset.getValue(); + final long n = length.getValue() != null ? + Math.min(length.getValue(), in.getVisibleLength() - offset.getValue()) : + in.getVisibleLength() - offset.getValue(); return Response.ok(new OpenEntity(in, n, dfsclient)).type( MediaType.APPLICATION_OCTET_STREAM).build(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java index 343aa775d0b..4181ce60376 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs.web; import java.io.BufferedReader; import java.io.FileNotFoundException; import java.io.IOException; +import java.io.InputStream; import java.io.InputStreamReader; import java.net.HttpURLConnection; import java.net.URL; @@ -45,8 +46,11 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.web.resources.DoAsParam; import org.apache.hadoop.hdfs.web.resources.GetOpParam; import org.apache.hadoop.hdfs.web.resources.HttpOpParam; +import org.apache.hadoop.hdfs.web.resources.LengthParam; import org.apache.hadoop.hdfs.web.resources.NamenodeRpcAddressParam; +import org.apache.hadoop.hdfs.web.resources.OffsetParam; import org.apache.hadoop.hdfs.web.resources.PutOpParam; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.junit.Assert; @@ -288,6 +292,104 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest { } } + /** + * Test get with length parameter greater than actual file length. + */ + public void testLengthParamLongerThanFile() throws IOException { + WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs; + Path dir = new Path("/test"); + assertTrue(webhdfs.mkdirs(dir)); + + // Create a file with some content. + Path testFile = new Path("/test/testLengthParamLongerThanFile"); + String content = "testLengthParamLongerThanFile"; + FSDataOutputStream testFileOut = webhdfs.create(testFile); + try { + testFileOut.write(content.getBytes("US-ASCII")); + } finally { + IOUtils.closeStream(testFileOut); + } + + // Open the file, but request length longer than actual file length by 1. + HttpOpParam.Op op = GetOpParam.Op.OPEN; + URL url = webhdfs.toUrl(op, testFile, new LengthParam(Long.valueOf( + content.length() + 1))); + HttpURLConnection conn = null; + InputStream is = null; + try { + conn = (HttpURLConnection)url.openConnection(); + conn.setRequestMethod(op.getType().toString()); + conn.setDoOutput(op.getDoOutput()); + conn.setInstanceFollowRedirects(true); + + // Expect OK response and Content-Length header equal to actual length. + assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode()); + assertEquals(String.valueOf(content.length()), conn.getHeaderField( + "Content-Length")); + + // Check content matches. + byte[] respBody = new byte[content.length()]; + is = conn.getInputStream(); + IOUtils.readFully(is, respBody, 0, content.length()); + assertEquals(content, new String(respBody, "US-ASCII")); + } finally { + IOUtils.closeStream(is); + if (conn != null) { + conn.disconnect(); + } + } + } + + /** + * Test get with offset and length parameters that combine to request a length + * greater than actual file length. + */ + public void testOffsetPlusLengthParamsLongerThanFile() throws IOException { + WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs; + Path dir = new Path("/test"); + assertTrue(webhdfs.mkdirs(dir)); + + // Create a file with some content. + Path testFile = new Path("/test/testOffsetPlusLengthParamsLongerThanFile"); + String content = "testOffsetPlusLengthParamsLongerThanFile"; + FSDataOutputStream testFileOut = webhdfs.create(testFile); + try { + testFileOut.write(content.getBytes("US-ASCII")); + } finally { + IOUtils.closeStream(testFileOut); + } + + // Open the file, but request offset starting at 1 and length equal to file + // length. Considering the offset, this is longer than the actual content. + HttpOpParam.Op op = GetOpParam.Op.OPEN; + URL url = webhdfs.toUrl(op, testFile, new LengthParam(Long.valueOf( + content.length())), new OffsetParam(1L)); + HttpURLConnection conn = null; + InputStream is = null; + try { + conn = (HttpURLConnection)url.openConnection(); + conn.setRequestMethod(op.getType().toString()); + conn.setDoOutput(op.getDoOutput()); + conn.setInstanceFollowRedirects(true); + + // Expect OK response and Content-Length header equal to actual length. + assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode()); + assertEquals(String.valueOf(content.length() - 1), conn.getHeaderField( + "Content-Length")); + + // Check content matches. + byte[] respBody = new byte[content.length() - 1]; + is = conn.getInputStream(); + IOUtils.readFully(is, respBody, 0, content.length() - 1); + assertEquals(content.substring(1), new String(respBody, "US-ASCII")); + } finally { + IOUtils.closeStream(is); + if (conn != null) { + conn.disconnect(); + } + } + } + public void testResponseCode() throws IOException { final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs; final Path root = new Path("/"); From a6ef93307eda6ff5c8bed5cfd72bb06b037644ce Mon Sep 17 00:00:00 2001 From: Arpit Gupta Date: Tue, 20 Aug 2013 23:44:47 +0000 Subject: [PATCH 52/53] HADOOP-9886. Turn warning message in RetryInvocationHandler to debug. Contributed by Arpit Gupta git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1516034 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++ .../java/org/apache/hadoop/io/retry/RetryInvocationHandler.java | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 5ccfc1d56ef..3d2f59b8cee 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -382,6 +382,8 @@ Release 2.1.1-beta - UNRELEASED HADOOP-9879. Move the version info of zookeeper dependencies to hadoop-project/pom (Karthik Kambatla via Sandy Ryza) + + HADOOP-9886. Turn warning message in RetryInvocationHandler to debug (arpit) OPTIMIZATIONS diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java index 4bf000ee85c..974bac91eb4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java @@ -136,8 +136,6 @@ public class RetryInvocationHandler implements RpcInvocationHandler { msg += ". Trying to fail over " + formatSleepMessage(action.delayMillis); if (LOG.isDebugEnabled()) { LOG.debug(msg, e); - } else { - LOG.warn(msg); } } else { if(LOG.isDebugEnabled()) { From 782191f1ba27e0ff0acf3c6cf8a88df00274d308 Mon Sep 17 00:00:00 2001 From: Luke Lu Date: Wed, 21 Aug 2013 10:12:13 +0000 Subject: [PATCH 53/53] HADOOP-9784. Add a builder for HttpServer. (Junping Du via llu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1516128 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 2 + .../org/apache/hadoop/http/HttpServer.java | 114 +++++++++++++++++- .../hadoop/http/HttpServerFunctionalTest.java | 16 ++- .../apache/hadoop/http/TestHttpServer.java | 9 +- .../org/apache/hadoop/log/TestLogLevel.java | 5 +- .../server/JournalNodeHttpServer.java | 21 ++-- .../hadoop/hdfs/server/datanode/DataNode.java | 14 ++- .../server/namenode/NameNodeHttpServer.java | 112 +++++++++-------- .../server/namenode/SecondaryNameNode.java | 22 ++-- .../namenode/TestEditLogFileInputStream.java | 3 +- .../hadoop/mapred/TestJobEndNotifier.java | 3 +- .../yarn/server/webproxy/WebAppProxy.java | 5 +- 12 files changed, 223 insertions(+), 103 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 3d2f59b8cee..a2d1304ead7 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -291,6 +291,8 @@ Release 2.3.0 - UNRELEASED IMPROVEMENTS + HADOOP-9784. Add a builder for HttpServer. (Junping Du via llu) + HADOOP 9871. Fix intermittent findbugs warnings in DefaultMetricsSystem. (Junping Du via llu) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java index b3d6d4ac68a..50582065473 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java @@ -47,6 +47,7 @@ import javax.servlet.http.HttpServletResponse; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.ConfServlet; @@ -119,18 +120,117 @@ public class HttpServer implements FilterContainer { protected final Map defaultContexts = new HashMap(); protected final List filterNames = new ArrayList(); - private static final int MAX_RETRIES = 10; static final String STATE_DESCRIPTION_ALIVE = " - alive"; static final String STATE_DESCRIPTION_NOT_LIVE = " - not live"; private final boolean listenerStartedExternally; + /** + * Class to construct instances of HTTP server with specific options. + */ + public static class Builder { + String name; + String bindAddress; + Integer port; + Boolean findPort; + Configuration conf; + Connector connector; + String[] pathSpecs; + AccessControlList adminsAcl; + boolean securityEnabled = false; + String usernameConfKey = null; + String keytabConfKey = null; + + public Builder setName(String name){ + this.name = name; + return this; + } + + public Builder setBindAddress(String bindAddress){ + this.bindAddress = bindAddress; + return this; + } + + public Builder setPort(int port) { + this.port = port; + return this; + } + + public Builder setFindPort(boolean findPort) { + this.findPort = findPort; + return this; + } + + public Builder setConf(Configuration conf) { + this.conf = conf; + return this; + } + + public Builder setConnector(Connector connector) { + this.connector = connector; + return this; + } + + public Builder setPathSpec(String[] pathSpec) { + this.pathSpecs = pathSpec; + return this; + } + + public Builder setACL(AccessControlList acl) { + this.adminsAcl = acl; + return this; + } + + public Builder setSecurityEnabled(boolean securityEnabled) { + this.securityEnabled = securityEnabled; + return this; + } + + public Builder setUsernameConfKey(String usernameConfKey) { + this.usernameConfKey = usernameConfKey; + return this; + } + + public Builder setKeytabConfKey(String keytabConfKey) { + this.keytabConfKey = keytabConfKey; + return this; + } + + public HttpServer build() throws IOException { + if (this.name == null) { + throw new HadoopIllegalArgumentException("name is not set"); + } + if (this.bindAddress == null) { + throw new HadoopIllegalArgumentException("bindAddress is not set"); + } + if (this.port == null) { + throw new HadoopIllegalArgumentException("port is not set"); + } + if (this.findPort == null) { + throw new HadoopIllegalArgumentException("findPort is not set"); + } + + if (this.conf == null) { + conf = new Configuration(); + } + + HttpServer server = new HttpServer(this.name, this.bindAddress, this.port, + this.findPort, this.conf, this.adminsAcl, this.connector, this.pathSpecs); + if (this.securityEnabled) { + server.initSpnego(this.conf, this.usernameConfKey, this.keytabConfKey); + } + return server; + } + } + /** Same as this(name, bindAddress, port, findPort, null); */ + @Deprecated public HttpServer(String name, String bindAddress, int port, boolean findPort ) throws IOException { this(name, bindAddress, port, findPort, new Configuration()); } - + + @Deprecated public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, Connector connector) throws IOException { this(name, bindAddress, port, findPort, conf, null, connector, null); @@ -150,6 +250,7 @@ public class HttpServer implements FilterContainer { * @param pathSpecs Path specifications that this httpserver will be serving. * These will be added to any filters. */ + @Deprecated public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, String[] pathSpecs) throws IOException { this(name, bindAddress, port, findPort, conf, null, null, pathSpecs); @@ -164,11 +265,13 @@ public class HttpServer implements FilterContainer { * increment by 1 until it finds a free port. * @param conf Configuration */ + @Deprecated public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf) throws IOException { this(name, bindAddress, port, findPort, conf, null, null, null); } + @Deprecated public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, AccessControlList adminsAcl) throws IOException { @@ -186,6 +289,7 @@ public class HttpServer implements FilterContainer { * @param conf Configuration * @param adminsAcl {@link AccessControlList} of the admins */ + @Deprecated public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, AccessControlList adminsAcl, Connector connector) throws IOException { @@ -529,7 +633,7 @@ public class HttpServer implements FilterContainer { /** * Define a filter for a context and set up default url mappings. */ - protected void defineFilter(Context ctx, String name, + public void defineFilter(Context ctx, String name, String classname, Map parameters, String[] urls) { FilterHolder holder = new FilterHolder(); @@ -569,6 +673,10 @@ public class HttpServer implements FilterContainer { public Object getAttribute(String name) { return webAppContext.getAttribute(name); } + + public WebAppContext getWebAppContext(){ + return this.webAppContext; + } /** * Get the pathname to the webapps files. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java index 6dee7eb7134..52d569d6e6d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java @@ -116,7 +116,8 @@ public class HttpServerFunctionalTest extends Assert { public static HttpServer createServer(String host, int port) throws IOException { prepareTestWebapp(); - return new HttpServer(TEST, host, port, true); + return new HttpServer.Builder().setName(TEST).setBindAddress(host) + .setPort(port).setFindPort(true).build(); } /** @@ -126,7 +127,8 @@ public class HttpServerFunctionalTest extends Assert { * @throws IOException if it could not be created */ public static HttpServer createServer(String webapp) throws IOException { - return new HttpServer(webapp, "0.0.0.0", 0, true); + return new HttpServer.Builder().setName(webapp).setBindAddress("0.0.0.0") + .setPort(0).setFindPort(true).build(); } /** * Create an HttpServer instance for the given webapp @@ -137,13 +139,16 @@ public class HttpServerFunctionalTest extends Assert { */ public static HttpServer createServer(String webapp, Configuration conf) throws IOException { - return new HttpServer(webapp, "0.0.0.0", 0, true, conf); + return new HttpServer.Builder().setName(webapp).setBindAddress("0.0.0.0") + .setPort(0).setFindPort(true).setConf(conf).build(); } public static HttpServer createServer(String webapp, Configuration conf, AccessControlList adminsAcl) throws IOException { - return new HttpServer(webapp, "0.0.0.0", 0, true, conf, adminsAcl); + return new HttpServer.Builder().setName(webapp).setBindAddress("0.0.0.0") + .setPort(0).setFindPort(true).setConf(conf).setACL(adminsAcl).build(); } + /** * Create an HttpServer instance for the given webapp * @param webapp the webapp to work with @@ -154,7 +159,8 @@ public class HttpServerFunctionalTest extends Assert { */ public static HttpServer createServer(String webapp, Configuration conf, String[] pathSpecs) throws IOException { - return new HttpServer(webapp, "0.0.0.0", 0, true, conf, pathSpecs); + return new HttpServer.Builder().setName(webapp).setBindAddress("0.0.0.0") + .setPort(0).setFindPort(true).setConf(conf).setPathSpec(pathSpecs).build(); } /** diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java index 079bc370209..9dfaf3ec2a4 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java @@ -121,7 +121,6 @@ public class TestHttpServer extends HttpServerFunctionalTest { @SuppressWarnings("serial") public static class LongHeaderServlet extends HttpServlet { - @SuppressWarnings("unchecked") @Override public void doGet(HttpServletRequest request, HttpServletResponse response @@ -362,7 +361,8 @@ public class TestHttpServer extends HttpServerFunctionalTest { MyGroupsProvider.mapping.put("userA", Arrays.asList("groupA")); MyGroupsProvider.mapping.put("userB", Arrays.asList("groupB")); - HttpServer myServer = new HttpServer("test", "0.0.0.0", 0, true, conf); + HttpServer myServer = new HttpServer.Builder().setName("test") + .setBindAddress("0.0.0.0").setPort(0).setFindPort(true).build(); myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf); myServer.start(); int port = myServer.getPort(); @@ -403,8 +403,9 @@ public class TestHttpServer extends HttpServerFunctionalTest { MyGroupsProvider.mapping.put("userD", Arrays.asList("groupD")); MyGroupsProvider.mapping.put("userE", Arrays.asList("groupE")); - HttpServer myServer = new HttpServer("test", "0.0.0.0", 0, true, conf, - new AccessControlList("userA,userB groupC,groupD")); + HttpServer myServer = new HttpServer.Builder().setName("test") + .setBindAddress("0.0.0.0").setPort(0).setFindPort(true).setConf(conf) + .setACL(new AccessControlList("userA,userB groupC,groupD")).build(); myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf); myServer.start(); int port = myServer.getPort(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java index f2443c04d90..c5a0d0bc04c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java @@ -42,7 +42,10 @@ public class TestLogLevel extends TestCase { log.error("log.error1"); assertTrue(!Level.ERROR.equals(log.getEffectiveLevel())); - HttpServer server = new HttpServer("..", "localhost", 22222, true); + HttpServer server = new HttpServer.Builder().setName("..") + .setBindAddress("localhost").setPort(22222).setFindPort(true) + .build(); + server.start(); int port = server.getPort(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java index d5758a2698a..6c26dd75fc0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java @@ -36,8 +36,8 @@ import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.http.HttpServer; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.authorize.AccessControlList; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; /** * Encapsulates the HTTP server started by the Journal Service. @@ -69,16 +69,15 @@ public class JournalNodeHttpServer { bindAddr.getHostName())); int tmpInfoPort = bindAddr.getPort(); - httpServer = new HttpServer("journal", bindAddr.getHostName(), - tmpInfoPort, tmpInfoPort == 0, conf, new AccessControlList(conf - .get(DFS_ADMIN, " "))) { - { - if (UserGroupInformation.isSecurityEnabled()) { - initSpnego(conf, DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY, - DFSUtil.getSpnegoKeytabKey(conf, DFS_JOURNALNODE_KEYTAB_FILE_KEY)); - } - } - }; + httpServer = new HttpServer.Builder().setName("journal") + .setBindAddress(bindAddr.getHostName()).setPort(tmpInfoPort) + .setFindPort(tmpInfoPort == 0).setConf(conf).setACL( + new AccessControlList(conf.get(DFS_ADMIN, " "))) + .setSecurityEnabled(UserGroupInformation.isSecurityEnabled()) + .setUsernameConfKey( + DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY) + .setKeytabConfKey(DFSUtil.getSpnegoKeytabKey(conf, + DFS_JOURNALNODE_KEYTAB_FILE_KEY)).build(); httpServer.setAttribute(JN_ATTRIBUTE_KEY, localJournalNode); httpServer.setAttribute(JspHelper.CURRENT_CONF, conf); httpServer.addInternalServlet("getJournal", "/getJournal", diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 9a057830eaa..b86a5caebd1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -385,11 +385,15 @@ public class DataNode extends Configured String infoHost = infoSocAddr.getHostName(); int tmpInfoPort = infoSocAddr.getPort(); this.infoServer = (secureResources == null) - ? new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0, - conf, new AccessControlList(conf.get(DFS_ADMIN, " "))) - : new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0, - conf, new AccessControlList(conf.get(DFS_ADMIN, " ")), - secureResources.getListener()); + ? new HttpServer.Builder().setName("datanode") + .setBindAddress(infoHost).setPort(tmpInfoPort) + .setFindPort(tmpInfoPort == 0).setConf(conf) + .setACL(new AccessControlList(conf.get(DFS_ADMIN, " "))).build() + : new HttpServer.Builder().setName("datanode") + .setBindAddress(infoHost).setPort(tmpInfoPort) + .setFindPort(tmpInfoPort == 0).setConf(conf) + .setACL(new AccessControlList(conf.get(DFS_ADMIN, " "))) + .setConnector(secureResources.getListener()).build(); LOG.info("Opened info server at " + infoHost + ":" + tmpInfoPort); if (conf.getBoolean(DFS_HTTPS_ENABLE_KEY, false)) { boolean needClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java index 93726b2a0b0..b645c9a0d47 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN; + import java.io.IOException; import java.net.InetSocketAddress; import java.util.HashMap; @@ -70,66 +71,31 @@ public class NameNodeHttpServer { public void start() throws IOException { final String infoHost = bindAddress.getHostName(); int infoPort = bindAddress.getPort(); + httpServer = new HttpServer.Builder().setName("hdfs") + .setBindAddress(infoHost).setPort(infoPort) + .setFindPort(infoPort == 0).setConf(conf).setACL( + new AccessControlList(conf.get(DFS_ADMIN, " "))) + .setSecurityEnabled(UserGroupInformation.isSecurityEnabled()) + .setUsernameConfKey( + DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY) + .setKeytabConfKey(DFSUtil.getSpnegoKeytabKey(conf, + DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY)).build(); + if (WebHdfsFileSystem.isEnabled(conf, HttpServer.LOG)) { + //add SPNEGO authentication filter for webhdfs + final String name = "SPNEGO"; + final String classname = AuthFilter.class.getName(); + final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*"; + Map params = getAuthFilterParams(conf); + httpServer.defineFilter(httpServer.getWebAppContext(), name, classname, params, + new String[]{pathSpec}); + HttpServer.LOG.info("Added filter '" + name + "' (class=" + classname + ")"); - httpServer = new HttpServer("hdfs", infoHost, infoPort, - infoPort == 0, conf, - new AccessControlList(conf.get(DFS_ADMIN, " "))) { - { - // Add SPNEGO support to NameNode - if (UserGroupInformation.isSecurityEnabled()) { - initSpnego(conf, - DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY, - DFSUtil.getSpnegoKeytabKey(conf, - DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY)); - } - if (WebHdfsFileSystem.isEnabled(conf, LOG)) { - //add SPNEGO authentication filter for webhdfs - final String name = "SPNEGO"; - final String classname = AuthFilter.class.getName(); - final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*"; - Map params = getAuthFilterParams(conf); - defineFilter(webAppContext, name, classname, params, - new String[]{pathSpec}); - LOG.info("Added filter '" + name + "' (class=" + classname + ")"); - - // add webhdfs packages - addJerseyResourcePackage( - NamenodeWebHdfsMethods.class.getPackage().getName() - + ";" + Param.class.getPackage().getName(), pathSpec); - } + // add webhdfs packages + httpServer.addJerseyResourcePackage( + NamenodeWebHdfsMethods.class.getPackage().getName() + + ";" + Param.class.getPackage().getName(), pathSpec); } - private Map getAuthFilterParams(Configuration conf) - throws IOException { - Map params = new HashMap(); - String principalInConf = conf - .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY); - if (principalInConf != null && !principalInConf.isEmpty()) { - params - .put( - DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, - SecurityUtil.getServerPrincipal(principalInConf, - bindAddress.getHostName())); - } else if (UserGroupInformation.isSecurityEnabled()) { - LOG.error("WebHDFS and security are enabled, but configuration property '" + - DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY + - "' is not set."); - } - String httpKeytab = conf.get(DFSUtil.getSpnegoKeytabKey(conf, - DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY)); - if (httpKeytab != null && !httpKeytab.isEmpty()) { - params.put( - DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY, - httpKeytab); - } else if (UserGroupInformation.isSecurityEnabled()) { - LOG.error("WebHDFS and security are enabled, but configuration property '" + - DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY + - "' is not set."); - } - return params; - } - }; - boolean certSSL = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, false); if (certSSL) { boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false); @@ -153,6 +119,38 @@ public class NameNodeHttpServer { httpServer.start(); httpAddress = new InetSocketAddress(bindAddress.getAddress(), httpServer.getPort()); } + + private Map getAuthFilterParams(Configuration conf) + throws IOException { + Map params = new HashMap(); + String principalInConf = conf + .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY); + if (principalInConf != null && !principalInConf.isEmpty()) { + params + .put( + DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, + SecurityUtil.getServerPrincipal(principalInConf, + bindAddress.getHostName())); + } else if (UserGroupInformation.isSecurityEnabled()) { + HttpServer.LOG.error( + "WebHDFS and security are enabled, but configuration property '" + + DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY + + "' is not set."); + } + String httpKeytab = conf.get(DFSUtil.getSpnegoKeytabKey(conf, + DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY)); + if (httpKeytab != null && !httpKeytab.isEmpty()) { + params.put( + DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY, + httpKeytab); + } else if (UserGroupInformation.isSecurityEnabled()) { + HttpServer.LOG.error( + "WebHDFS and security are enabled, but configuration property '" + + DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY + + "' is not set."); + } + return params; + } public void stop() throws Exception { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index 47f7222b1b6..844c77f1cfa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -256,19 +256,15 @@ public class SecondaryNameNode implements Runnable { // initialize the webserver for uploading files. int tmpInfoPort = infoSocAddr.getPort(); - infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort, - tmpInfoPort == 0, conf, - new AccessControlList(conf.get(DFS_ADMIN, " "))) { - { - if (UserGroupInformation.isSecurityEnabled()) { - initSpnego( - conf, - DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY, - DFSUtil.getSpnegoKeytabKey(conf, - DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY)); - } - } - }; + infoServer = new HttpServer.Builder().setName("secondary") + .setBindAddress(infoBindAddress).setPort(tmpInfoPort) + .setFindPort(tmpInfoPort == 0).setConf(conf).setACL( + new AccessControlList(conf.get(DFS_ADMIN, " "))) + .setSecurityEnabled(UserGroupInformation.isSecurityEnabled()) + .setUsernameConfKey( + DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY) + .setKeytabConfKey(DFSUtil.getSpnegoKeytabKey(conf, + DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY)).build(); infoServer.setAttribute("secondary.name.node", this); infoServer.setAttribute("name.system.image", checkpointImage); infoServer.setAttribute(JspHelper.CURRENT_CONF, conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java index ff0dff7cc31..c3497064c8a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java @@ -42,7 +42,8 @@ public class TestEditLogFileInputStream { @Test public void testReadURL() throws Exception { // Start a simple web server which hosts the log data. - HttpServer server = new HttpServer("test", "0.0.0.0", 0, true); + HttpServer server = new HttpServer.Builder().setName("test") + .setBindAddress("0.0.0.0").setPort(0).setFindPort(true).build(); server.start(); try { server.addServlet("fakeLog", "/fakeLog", FakeLogServlet.class); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java index 84905dadf5e..9e7ffc18003 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java @@ -102,7 +102,8 @@ public class TestJobEndNotifier extends TestCase { public void setUp() throws Exception { new File(System.getProperty("build.webapps", "build/webapps") + "/test" ).mkdirs(); - server = new HttpServer("test", "0.0.0.0", 0, true); + server = new HttpServer.Builder().setName("test") + .setBindAddress("0.0.0.0").setPort(0).setFindPort(true).build(); server.addServlet("delay", "/delay", DelayServlet.class); server.addServlet("jobend", "/jobend", JobEndServlet.class); server.addServlet("fail", "/fail", FailServlet.class); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java index 6b7e42fd0fc..76568d326b9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java @@ -87,8 +87,9 @@ public class WebAppProxy extends AbstractService { @Override protected void serviceStart() throws Exception { try { - proxyServer = new HttpServer("proxy", bindAddress, port, - port == 0, getConfig(), acl); + proxyServer = new HttpServer.Builder().setName("proxy") + .setBindAddress(bindAddress).setPort(port).setFindPort(port == 0) + .setConf(getConfig()).setACL(acl).build(); proxyServer.addServlet(ProxyUriUtils.PROXY_SERVLET_NAME, ProxyUriUtils.PROXY_PATH_SPEC, WebAppProxyServlet.class); proxyServer.setAttribute(FETCHER_ATTRIBUTE, fetcher);