From 67a74198b164d3d86f500e35855707e03d7f8fe3 Mon Sep 17 00:00:00 2001 From: Arun Murthy Date: Wed, 28 Mar 2012 07:37:06 +0000 Subject: [PATCH 1/6] Changed version in trunk to 3.0.0-SNAPSHOT. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1306200 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-assemblies/pom.xml | 4 ++-- hadoop-client/pom.xml | 4 ++-- hadoop-common-project/hadoop-annotations/pom.xml | 4 ++-- hadoop-common-project/hadoop-auth-examples/pom.xml | 4 ++-- hadoop-common-project/hadoop-auth/pom.xml | 4 ++-- hadoop-common-project/hadoop-common/pom.xml | 4 ++-- .../hadoop-common/src/main/resources/core-default.xml | 2 +- hadoop-common-project/pom.xml | 4 ++-- hadoop-dist/pom.xml | 4 ++-- hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml | 4 ++-- hadoop-hdfs-project/hadoop-hdfs/pom.xml | 4 ++-- .../hadoop-hdfs/src/contrib/bkjournal/pom.xml | 4 ++-- .../hadoop-hdfs/src/test/aop/build/aop.xml | 2 +- hadoop-hdfs-project/pom.xml | 4 ++-- hadoop-mapreduce-project/build.xml | 2 +- .../hadoop-mapreduce-client-app/pom.xml | 6 +++--- .../hadoop-mapreduce-client-common/pom.xml | 4 ++-- .../hadoop-mapreduce-client-core/pom.xml | 4 ++-- .../hadoop-mapreduce-client-hs/pom.xml | 4 ++-- .../hadoop-mapreduce-client-jobclient/pom.xml | 4 ++-- .../hadoop-mapreduce-client-shuffle/pom.xml | 4 ++-- hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml | 4 ++-- .../hadoop-mapreduce-examples/pom.xml | 4 ++-- .../hadoop-yarn/hadoop-yarn-api/pom.xml | 4 ++-- .../hadoop-yarn-applications-distributedshell/pom.xml | 4 ++-- .../hadoop-yarn/hadoop-yarn-applications/pom.xml | 4 ++-- .../hadoop-yarn/hadoop-yarn-common/pom.xml | 4 ++-- .../hadoop-yarn-server/hadoop-yarn-server-common/pom.xml | 4 ++-- .../hadoop-yarn-server-nodemanager/pom.xml | 4 ++-- .../hadoop-yarn-server-resourcemanager/pom.xml | 4 ++-- .../hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml | 4 ++-- .../hadoop-yarn-server-web-proxy/pom.xml | 4 ++-- .../hadoop-yarn/hadoop-yarn-server/pom.xml | 4 ++-- .../hadoop-yarn/hadoop-yarn-site/pom.xml | 4 ++-- hadoop-mapreduce-project/hadoop-yarn/pom.xml | 4 ++-- .../ivy/hadoop-mapred-instrumented-template.xml | 2 +- hadoop-mapreduce-project/ivy/hadoop-mapred-template.xml | 2 +- hadoop-mapreduce-project/ivy/libraries.properties | 8 ++++---- hadoop-mapreduce-project/pom.xml | 4 ++-- hadoop-mapreduce-project/src/test/aop/build/aop.xml | 2 +- hadoop-minicluster/pom.xml | 4 ++-- hadoop-project-dist/pom.xml | 4 ++-- hadoop-project/pom.xml | 4 ++-- hadoop-tools/hadoop-archives/pom.xml | 4 ++-- hadoop-tools/hadoop-distcp/pom.xml | 4 ++-- hadoop-tools/hadoop-extras/pom.xml | 4 ++-- hadoop-tools/hadoop-rumen/pom.xml | 4 ++-- hadoop-tools/hadoop-streaming/pom.xml | 4 ++-- hadoop-tools/hadoop-tools-dist/pom.xml | 4 ++-- hadoop-tools/pom.xml | 4 ++-- pom.xml | 2 +- 51 files changed, 98 insertions(+), 98 deletions(-) diff --git a/hadoop-assemblies/pom.xml b/hadoop-assemblies/pom.xml index c33fbce5b6e..560ea78ed2e 100644 --- a/hadoop-assemblies/pom.xml +++ b/hadoop-assemblies/pom.xml @@ -20,12 +20,12 @@ org.apache.hadoop hadoop-project - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT ../hadoop-project org.apache.hadoop hadoop-assemblies - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT Apache Hadoop Assemblies Apache Hadoop Assemblies diff --git a/hadoop-client/pom.xml b/hadoop-client/pom.xml index 6eecfc07ae2..fced787fe49 100644 --- a/hadoop-client/pom.xml +++ b/hadoop-client/pom.xml @@ -18,12 +18,12 @@ org.apache.hadoop hadoop-project-dist - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT ../hadoop-project-dist org.apache.hadoop hadoop-client - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT jar Apache Hadoop Client diff --git a/hadoop-common-project/hadoop-annotations/pom.xml b/hadoop-common-project/hadoop-annotations/pom.xml index bb9de9c5494..e9a86474871 100644 --- a/hadoop-common-project/hadoop-annotations/pom.xml +++ b/hadoop-common-project/hadoop-annotations/pom.xml @@ -17,12 +17,12 @@ org.apache.hadoop hadoop-project - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT ../../hadoop-project org.apache.hadoop hadoop-annotations - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT Apache Hadoop Annotations Apache Hadoop Annotations jar diff --git a/hadoop-common-project/hadoop-auth-examples/pom.xml b/hadoop-common-project/hadoop-auth-examples/pom.xml index 67f113ba2c4..ceeb769bfa3 100644 --- a/hadoop-common-project/hadoop-auth-examples/pom.xml +++ b/hadoop-common-project/hadoop-auth-examples/pom.xml @@ -17,12 +17,12 @@ org.apache.hadoop hadoop-project - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT ../../hadoop-project org.apache.hadoop hadoop-auth-examples - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT war Apache Hadoop Auth Examples diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml index 9bcf629f032..b140199f91b 100644 --- a/hadoop-common-project/hadoop-auth/pom.xml +++ b/hadoop-common-project/hadoop-auth/pom.xml @@ -17,12 +17,12 @@ org.apache.hadoop hadoop-project - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT ../../hadoop-project org.apache.hadoop hadoop-auth - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT jar Apache Hadoop Auth diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index fd18b607a25..41305460501 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -17,12 +17,12 @@ org.apache.hadoop hadoop-project-dist - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT ../../hadoop-project-dist org.apache.hadoop hadoop-common - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT Apache Hadoop Common Apache Hadoop Common jar diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index d0a1d3b67a6..1202002ed5f 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -28,7 +28,7 @@ hadoop.common.configuration.version - 0.24.0 + 3.0.0 version of this configuration file diff --git a/hadoop-common-project/pom.xml b/hadoop-common-project/pom.xml index ac196188a7a..57112b7cb58 100644 --- a/hadoop-common-project/pom.xml +++ b/hadoop-common-project/pom.xml @@ -17,12 +17,12 @@ org.apache.hadoop hadoop-project - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT ../hadoop-project org.apache.hadoop hadoop-common-project - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT Apache Hadoop Common Project Apache Hadoop Common Project pom diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml index de47700c993..1c8bf83aca9 100644 --- a/hadoop-dist/pom.xml +++ b/hadoop-dist/pom.xml @@ -17,12 +17,12 @@ org.apache.hadoop hadoop-project - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT ../hadoop-project org.apache.hadoop hadoop-dist - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT Apache Hadoop Distribution Apache Hadoop Distribution jar diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml index c1325e495d8..ae55c6056c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml @@ -19,12 +19,12 @@ org.apache.hadoop hadoop-project - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT ../../hadoop-project org.apache.hadoop hadoop-hdfs-httpfs - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT war Apache Hadoop HttpFS diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 13103752d3e..b87c59748b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -17,12 +17,12 @@ org.apache.hadoop hadoop-project-dist - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT ../../hadoop-project-dist org.apache.hadoop hadoop-hdfs - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT Apache Hadoop HDFS Apache Hadoop HDFS jar diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml index 1fba8466ed2..aab4d5f4ab2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml @@ -17,13 +17,13 @@ org.apache.hadoop hadoop-project - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT ../../../../../hadoop-project org.apache.hadoop.contrib hadoop-hdfs-bkjournal - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT Apache Hadoop HDFS BookKeeper Journal Apache Hadoop HDFS BookKeeper Journal jar diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/build/aop.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/build/aop.xml index f3944837a7c..d838c61c90e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/build/aop.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/build/aop.xml @@ -21,7 +21,7 @@ - + diff --git a/hadoop-hdfs-project/pom.xml b/hadoop-hdfs-project/pom.xml index 299d6f86348..1d39a2c4a91 100644 --- a/hadoop-hdfs-project/pom.xml +++ b/hadoop-hdfs-project/pom.xml @@ -17,12 +17,12 @@ org.apache.hadoop hadoop-project - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT ../hadoop-project org.apache.hadoop hadoop-hdfs-project - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT Apache Hadoop HDFS Project Apache Hadoop HDFS Project pom diff --git a/hadoop-mapreduce-project/build.xml b/hadoop-mapreduce-project/build.xml index 22964d38e0c..b2d44ec87b7 100644 --- a/hadoop-mapreduce-project/build.xml +++ b/hadoop-mapreduce-project/build.xml @@ -32,7 +32,7 @@ - + diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml index 88032cad0a5..2059d280380 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml @@ -16,12 +16,12 @@ hadoop-mapreduce-client org.apache.hadoop - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT 4.0.0 org.apache.hadoop hadoop-mapreduce-client-app - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT hadoop-mapreduce-client-app @@ -112,7 +112,7 @@ - diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml index e33e589c9e2..1dd455877ea 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml @@ -16,12 +16,12 @@ hadoop-mapreduce-client org.apache.hadoop - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT 4.0.0 org.apache.hadoop hadoop-mapreduce-client-common - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT hadoop-mapreduce-client-common diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml index ca194cc239f..cfb8ce4bd7e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml @@ -16,12 +16,12 @@ hadoop-mapreduce-client org.apache.hadoop - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT 4.0.0 org.apache.hadoop hadoop-mapreduce-client-core - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT hadoop-mapreduce-client-core diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml index 863cb58c1ba..9fa93b4c4f6 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml @@ -16,12 +16,12 @@ hadoop-mapreduce-client org.apache.hadoop - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT 4.0.0 org.apache.hadoop hadoop-mapreduce-client-hs - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT hadoop-mapreduce-client-hs diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml index b12c09aae18..4df2edaa5c4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml @@ -16,12 +16,12 @@ hadoop-mapreduce-client org.apache.hadoop - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT 4.0.0 org.apache.hadoop hadoop-mapreduce-client-jobclient - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT hadoop-mapreduce-client-jobclient diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml index 3af3129fafa..07f436e7617 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml @@ -16,12 +16,12 @@ hadoop-mapreduce-client org.apache.hadoop - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT 4.0.0 org.apache.hadoop hadoop-mapreduce-client-shuffle - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT hadoop-mapreduce-client-shuffle diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml index 63113611b9e..ad08306ba77 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml @@ -17,12 +17,12 @@ org.apache.hadoop hadoop-project - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT ../../hadoop-project org.apache.hadoop hadoop-mapreduce-client - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT hadoop-mapreduce-client pom diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml index ac365b20106..83424a80de2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml @@ -17,12 +17,12 @@ org.apache.hadoop hadoop-project - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT ../../hadoop-project org.apache.hadoop hadoop-mapreduce-examples - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT Apache Hadoop MapReduce Examples Apache Hadoop MapReduce Examples jar diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/pom.xml index 4302f815cd2..462ecc361ff 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/pom.xml +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/pom.xml @@ -16,12 +16,12 @@ hadoop-yarn org.apache.hadoop - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT 4.0.0 org.apache.hadoop hadoop-yarn-api - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT hadoop-yarn-api diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml index a21cd11ca46..37a5e99702c 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml @@ -16,12 +16,12 @@ hadoop-yarn-applications org.apache.hadoop - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT 4.0.0 org.apache.hadoop hadoop-yarn-applications-distributedshell - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT hadoop-yarn-applications-distributedshell diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/pom.xml index ecd886bb96d..fd51584ab7f 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/pom.xml +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/pom.xml @@ -16,12 +16,12 @@ hadoop-yarn org.apache.hadoop - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT 4.0.0 org.apache.hadoop hadoop-yarn-applications - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT hadoop-yarn-applications pom diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/pom.xml index 89d8566538e..1acf220a876 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/pom.xml +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/pom.xml @@ -16,12 +16,12 @@ hadoop-yarn org.apache.hadoop - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT 4.0.0 org.apache.hadoop hadoop-yarn-common - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT hadoop-yarn-common diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml index 53c5afb501a..2dd4277a080 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml @@ -16,12 +16,12 @@ hadoop-yarn-server org.apache.hadoop - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT 4.0.0 org.apache.hadoop hadoop-yarn-server-common - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT hadoop-yarn-server-common diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml index 1272dde76fa..6032aabd6a4 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml @@ -16,12 +16,12 @@ hadoop-yarn-server org.apache.hadoop - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT 4.0.0 org.apache.hadoop hadoop-yarn-server-nodemanager - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT hadoop-yarn-server-nodemanager diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml index b65a5246962..fd7b767faa8 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml @@ -16,12 +16,12 @@ hadoop-yarn-server org.apache.hadoop - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT 4.0.0 org.apache.hadoop hadoop-yarn-server-resourcemanager - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT hadoop-yarn-server-resourcemanager diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml index fe95cdf530e..87c5d7e174a 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml @@ -16,11 +16,11 @@ hadoop-yarn-server org.apache.hadoop - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT org.apache.hadoop hadoop-yarn-server-tests - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT hadoop-yarn-server-tests diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml index 6e657bd9abc..7be3676c5c4 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml @@ -16,12 +16,12 @@ hadoop-yarn-server org.apache.hadoop - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT 4.0.0 org.apache.hadoop hadoop-yarn-server-web-proxy - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT hadoop-yarn-server-web-proxy diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/pom.xml index 3d82949e286..2de4e331e5b 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/pom.xml +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/pom.xml @@ -16,12 +16,12 @@ hadoop-yarn org.apache.hadoop - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT 4.0.0 org.apache.hadoop hadoop-yarn-server - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT hadoop-yarn-server pom diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/pom.xml index 887950e3741..5cc90dbddfd 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/pom.xml +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/pom.xml @@ -16,12 +16,12 @@ hadoop-yarn org.apache.hadoop - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT 4.0.0 org.apache.hadoop hadoop-yarn-site - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT hadoop-yarn-site diff --git a/hadoop-mapreduce-project/hadoop-yarn/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/pom.xml index 6b5f6e17c18..0f76b24480c 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/pom.xml +++ b/hadoop-mapreduce-project/hadoop-yarn/pom.xml @@ -17,12 +17,12 @@ org.apache.hadoop hadoop-project - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT ../../hadoop-project org.apache.hadoop hadoop-yarn - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT pom hadoop-yarn diff --git a/hadoop-mapreduce-project/ivy/hadoop-mapred-instrumented-template.xml b/hadoop-mapreduce-project/ivy/hadoop-mapred-instrumented-template.xml index 701a00537c2..142f11b2ee3 100644 --- a/hadoop-mapreduce-project/ivy/hadoop-mapred-instrumented-template.xml +++ b/hadoop-mapreduce-project/ivy/hadoop-mapred-instrumented-template.xml @@ -28,7 +28,7 @@ org.apache.hadoop hadoop-common - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT diff --git a/hadoop-mapreduce-project/ivy/hadoop-mapred-template.xml b/hadoop-mapreduce-project/ivy/hadoop-mapred-template.xml index 23813cab3cf..7bfb31d3a82 100644 --- a/hadoop-mapreduce-project/ivy/hadoop-mapred-template.xml +++ b/hadoop-mapreduce-project/ivy/hadoop-mapred-template.xml @@ -28,7 +28,7 @@ org.apache.hadoop hadoop-common - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT diff --git a/hadoop-mapreduce-project/ivy/libraries.properties b/hadoop-mapreduce-project/ivy/libraries.properties index 33a3fdc479e..527900b2281 100644 --- a/hadoop-mapreduce-project/ivy/libraries.properties +++ b/hadoop-mapreduce-project/ivy/libraries.properties @@ -41,8 +41,8 @@ ftplet-api.version=1.0.0 ftpserver-core.version=1.0.0 ftpserver-deprecated.version=1.0.0-M2 -hadoop-common.version=0.24.0-SNAPSHOT -hadoop-hdfs.version=0.24.0-SNAPSHOT +hadoop-common.version=3.0.0-SNAPSHOT +hadoop-hdfs.version=3.0.0-SNAPSHOT hsqldb.version=1.8.0.10 @@ -82,5 +82,5 @@ xmlenc.version=0.52 xerces.version=1.4.4 jackson.version=1.8.8 -yarn.version=0.24.0-SNAPSHOT -hadoop-mapreduce.version=0.24.0-SNAPSHOT +yarn.version=3.0.0-SNAPSHOT +hadoop-mapreduce.version=3.0.0-SNAPSHOT diff --git a/hadoop-mapreduce-project/pom.xml b/hadoop-mapreduce-project/pom.xml index a4f321679ca..859eb26a6d0 100644 --- a/hadoop-mapreduce-project/pom.xml +++ b/hadoop-mapreduce-project/pom.xml @@ -18,12 +18,12 @@ org.apache.hadoop hadoop-project - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT ../hadoop-project org.apache.hadoop hadoop-mapreduce - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT pom hadoop-mapreduce http://hadoop.apache.org/mapreduce/ diff --git a/hadoop-mapreduce-project/src/test/aop/build/aop.xml b/hadoop-mapreduce-project/src/test/aop/build/aop.xml index 9029e629a18..390a0953f1a 100644 --- a/hadoop-mapreduce-project/src/test/aop/build/aop.xml +++ b/hadoop-mapreduce-project/src/test/aop/build/aop.xml @@ -21,7 +21,7 @@ - + diff --git a/hadoop-minicluster/pom.xml b/hadoop-minicluster/pom.xml index 793a98db9cc..c9e54d3dba7 100644 --- a/hadoop-minicluster/pom.xml +++ b/hadoop-minicluster/pom.xml @@ -18,12 +18,12 @@ org.apache.hadoop hadoop-project - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT ../hadoop-project org.apache.hadoop hadoop-minicluster - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT jar Apache Hadoop Mini-Cluster diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml index 41ba7564a27..1c7042516b8 100644 --- a/hadoop-project-dist/pom.xml +++ b/hadoop-project-dist/pom.xml @@ -17,12 +17,12 @@ org.apache.hadoop hadoop-project - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT ../hadoop-project org.apache.hadoop hadoop-project-dist - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT Apache Hadoop Project Dist POM Apache Hadoop Project Dist POM pom diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index dee063166f6..b37b0bad952 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -17,11 +17,11 @@ org.apache.hadoop hadoop-main - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT org.apache.hadoop hadoop-project - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT Apache Hadoop Project POM Apache Hadoop Project POM pom diff --git a/hadoop-tools/hadoop-archives/pom.xml b/hadoop-tools/hadoop-archives/pom.xml index 73f5201004d..1560cb08835 100644 --- a/hadoop-tools/hadoop-archives/pom.xml +++ b/hadoop-tools/hadoop-archives/pom.xml @@ -17,12 +17,12 @@ org.apache.hadoop hadoop-project - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT ../../hadoop-project org.apache.hadoop hadoop-archives - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT Apache Hadoop Archives Apache Hadoop Archives jar diff --git a/hadoop-tools/hadoop-distcp/pom.xml b/hadoop-tools/hadoop-distcp/pom.xml index 125a118edb4..46e0c1a6ebe 100644 --- a/hadoop-tools/hadoop-distcp/pom.xml +++ b/hadoop-tools/hadoop-distcp/pom.xml @@ -17,12 +17,12 @@ org.apache.hadoop hadoop-project - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT ../../hadoop-project org.apache.hadoop hadoop-distcp - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT Apache Hadoop Distributed Copy Apache Hadoop Distributed Copy jar diff --git a/hadoop-tools/hadoop-extras/pom.xml b/hadoop-tools/hadoop-extras/pom.xml index aadb2dbd6e3..da8bd155ba9 100644 --- a/hadoop-tools/hadoop-extras/pom.xml +++ b/hadoop-tools/hadoop-extras/pom.xml @@ -17,12 +17,12 @@ org.apache.hadoop hadoop-project - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT ../../hadoop-project org.apache.hadoop hadoop-extras - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT Apache Hadoop Extras Apache Hadoop Extras jar diff --git a/hadoop-tools/hadoop-rumen/pom.xml b/hadoop-tools/hadoop-rumen/pom.xml index 00bfa937df4..0c3d2257382 100644 --- a/hadoop-tools/hadoop-rumen/pom.xml +++ b/hadoop-tools/hadoop-rumen/pom.xml @@ -17,12 +17,12 @@ org.apache.hadoop hadoop-project - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT ../../hadoop-project org.apache.hadoop hadoop-rumen - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT Apache Hadoop Rumen Apache Hadoop Rumen jar diff --git a/hadoop-tools/hadoop-streaming/pom.xml b/hadoop-tools/hadoop-streaming/pom.xml index 85d9ba65538..c19872e6303 100644 --- a/hadoop-tools/hadoop-streaming/pom.xml +++ b/hadoop-tools/hadoop-streaming/pom.xml @@ -17,12 +17,12 @@ org.apache.hadoop hadoop-project - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT ../../hadoop-project org.apache.hadoop hadoop-streaming - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT Apache Hadoop MapReduce Streaming Apache Hadoop MapReduce Streaming jar diff --git a/hadoop-tools/hadoop-tools-dist/pom.xml b/hadoop-tools/hadoop-tools-dist/pom.xml index 3395e4f2d64..46d1c195305 100644 --- a/hadoop-tools/hadoop-tools-dist/pom.xml +++ b/hadoop-tools/hadoop-tools-dist/pom.xml @@ -17,12 +17,12 @@ org.apache.hadoop hadoop-project-dist - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT ../../hadoop-project-dist org.apache.hadoop hadoop-tools-dist - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT Apache Hadoop Tools Dist Apache Hadoop Tools Dist jar diff --git a/hadoop-tools/pom.xml b/hadoop-tools/pom.xml index a4e99aa77b5..dfa9049a80e 100644 --- a/hadoop-tools/pom.xml +++ b/hadoop-tools/pom.xml @@ -17,12 +17,12 @@ org.apache.hadoop hadoop-project - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT ../hadoop-project org.apache.hadoop hadoop-tools - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT Apache Hadoop Tools Apache Hadoop Tools pom diff --git a/pom.xml b/pom.xml index cac707e7c7f..d2eae64d4ed 100644 --- a/pom.xml +++ b/pom.xml @@ -16,7 +16,7 @@ 4.0.0 org.apache.hadoop hadoop-main - 0.24.0-SNAPSHOT + 3.0.0-SNAPSHOT Apache Hadoop Main Apache Hadoop Main pom From 1427468a72244e97984c6e9b3eb3e8d9c2d0c025 Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Wed, 28 Mar 2012 17:58:23 +0000 Subject: [PATCH 2/6] HDFS-3156. TestDFSHAAdmin is failing post HADOOP-8202. Contributed by Aaron T. Myers. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1306517 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 71a2ebda08d..de78357d7c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -360,6 +360,8 @@ Release 0.23.3 - UNRELEASED HDFS-3132. Fix findbugs warning on HDFS trunk. (todd) + HDFS-3156. TestDFSHAAdmin is failing post HADOOP-8202. (atm) + BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java index 89fde370c0c..79793e2440d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java @@ -36,6 +36,7 @@ import org.apache.hadoop.ha.HAServiceTarget; import org.apache.hadoop.ha.HealthCheckFailedException; import org.apache.hadoop.ha.NodeFencer; +import org.apache.hadoop.test.MockitoUtil; import org.junit.Before; import org.junit.Test; @@ -79,7 +80,7 @@ private HdfsConfiguration getHAConf() { @Before public void setup() throws IOException { - mockProtocol = Mockito.mock(HAServiceProtocol.class); + mockProtocol = MockitoUtil.mockProtocol(HAServiceProtocol.class); tool = new DFSHAAdmin() { @Override From 991eddd04375d778a370e14f6f41e8a52919dccc Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Wed, 28 Mar 2012 18:55:33 +0000 Subject: [PATCH 3/6] Change "0.23.3" to "2.0.0" in CHANGES.txt files. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1306538 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 +- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +- hadoop-mapreduce-project/CHANGES.txt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index efb2a756c14..1dfd0898296 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -113,7 +113,7 @@ Trunk (unreleased changes) HADOOP-7761. Improve the performance of raw comparisons. (todd) -Release 0.23.3 - UNRELEASED +Release 2.0.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index de78357d7c6..a3bb31e4d6a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -109,7 +109,7 @@ Trunk (unreleased changes) HDFS-3116. Typo in fetchdt error message. (AOE Takashi via atm) -Release 0.23.3 - UNRELEASED +Release 2.0.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index d79d3ae1390..d0c538e8884 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -96,7 +96,7 @@ Trunk (unreleased changes) MAPREDUCE-1740. NPE in getMatchingLevelForNodes when node locations are variable depth (ahmed via tucu) [IMPORTANT: this is dead code in trunk] -Release 0.23.3 - UNRELEASED +Release 2.0.0 - UNRELEASED INCOMPATIBLE CHANGES From aa8cb2287f5068e7985442ef293f0336fd40c3de Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Wed, 28 Mar 2012 19:02:16 +0000 Subject: [PATCH 4/6] HDFS-3143. TestGetBlocks.testGetBlocks is failing. Contributed by Arpit Gupta. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1306542 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index a3bb31e4d6a..eaa595d7c07 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -362,6 +362,8 @@ Release 2.0.0 - UNRELEASED HDFS-3156. TestDFSHAAdmin is failing post HADOOP-8202. (atm) + HDFS-3143. TestGetBlocks.testGetBlocks is failing. (Arpit Gupta via atm) + BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java index b0878d1eb8c..c27895f1f95 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java @@ -135,7 +135,7 @@ private void getBlocksWithException(NamenodeProtocol namenode, namenode.getBlocks(new DatanodeInfo(), 2); } catch(RemoteException e) { getException = true; - assertTrue(e.getMessage().contains("IllegalArgumentException")); + assertTrue(e.getClassName().contains("HadoopIllegalArgumentException")); } assertTrue(getException); } From 0475795066ad89fc3ac4bfbe0dbe061555f3fbf7 Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Wed, 28 Mar 2012 19:33:22 +0000 Subject: [PATCH 5/6] HDFS-3139. Minor Datanode logging improvement. Contributed by Eli Collins git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1306549 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../hadoop/hdfs/protocol/DatanodeID.java | 16 +++++++-------- .../hadoop/hdfs/protocol/DatanodeInfo.java | 20 +++++++++---------- .../blockmanagement/DatanodeDescriptor.java | 19 ++++++++---------- .../hadoop/hdfs/server/datanode/DataNode.java | 14 ++++++------- .../datanode/SecureDataNodeStarter.java | 20 +++++++++++-------- .../hadoop/hdfs/TestDFSAddressConfig.java | 6 +++--- 7 files changed, 49 insertions(+), 48 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index eaa595d7c07..10100b875a7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -873,6 +873,8 @@ Release 0.23.1 - 2012-02-17 HDFS-2868. Expose xceiver counts via the DataNode MXBean. (harsh) + HDFS-3139. Minor Datanode logging improvement. (eli) + OPTIMIZATIONS HDFS-2130. Switch default checksum to CRC32C. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java index 21e549d26a1..9c837d291f4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java @@ -28,20 +28,20 @@ import org.apache.hadoop.io.WritableComparable; /** - * DatanodeID is composed of the data node - * name (hostname:portNumber) and the data storage ID, - * which it currently represents. - * + * This class represents the primary identifier for a Datanode. + * Datanodes are identified by how they can be contacted (hostname + * and ports) and their storage ID, a unique number that associates + * the Datanodes blocks with a particular Datanode. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class DatanodeID implements WritableComparable { public static final DatanodeID[] EMPTY_ARRAY = {}; - public String name; /// hostname:portNumber - public String storageID; /// unique per cluster storageID - protected int infoPort; /// the port where the infoserver is running - public int ipcPort; /// the port where the ipc server is running + public String name; // hostname:port (data transfer port) + public String storageID; // unique per cluster storageID + protected int infoPort; // info server port + public int ipcPort; // ipc server port /** Equivalent to DatanodeID(""). */ public DatanodeID() {this("");} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java index 80b2d28d802..2065ae1d1eb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java @@ -37,9 +37,9 @@ import org.apache.hadoop.util.StringUtils; /** - * DatanodeInfo represents the status of a DataNode. - * This object is used for communication in the - * Datanode Protocol and the Client Protocol. + * This class extends the primary identifier of a Datanode with ephemeral + * state, eg usage information, current administrative state, and the + * network location that is communicated to clients. */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -52,12 +52,10 @@ public class DatanodeInfo extends DatanodeID implements Node { protected int xceiverCount; protected String location = NetworkTopology.DEFAULT_RACK; - /** HostName as supplied by the datanode during registration as its - * name. Namenode uses datanode IP address as the name. - */ + // The FQDN of the IP associated with the Datanode's hostname protected String hostName = null; - // administrative states of a datanode + // Datanode administrative states public enum AdminStates { NORMAL("In Service"), DECOMMISSION_INPROGRESS("Decommission In Progress"), @@ -241,12 +239,14 @@ public String getDatanodeReport() { long nonDFSUsed = getNonDfsUsed(); float usedPercent = getDfsUsedPercent(); float remainingPercent = getRemainingPercent(); - String hostName = NetUtils.getHostNameOfIP(name); + String lookupName = NetUtils.getHostNameOfIP(name); buffer.append("Name: "+ name); - if(hostName != null) - buffer.append(" (" + hostName + ")"); + if (lookupName != null) { + buffer.append(" (" + lookupName + ")"); + } buffer.append("\n"); + buffer.append("Hostname: " + getHostName() + "\n"); if (!NetworkTopology.DEFAULT_RACK.equals(location)) { buffer.append("Rack: "+location+"\n"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index 984456f142d..f01cd0e3f68 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -34,16 +34,13 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.io.WritableUtils; -/************************************************** - * DatanodeDescriptor tracks stats on a given DataNode, such as - * available storage capacity, last update time, etc., and maintains a - * set of blocks stored on the datanode. - * - * This data structure is internal to the namenode. It is *not* sent - * over-the-wire to the Client or the Datanodes. Neither is it stored - * persistently in the fsImage. - **************************************************/ +/** + * This class extends the DatanodeInfo class with ephemeral information (eg + * health, capacity, what blocks are associated with the Datanode) that is + * private to the Namenode, ie this class is not exposed to clients. + */ @InterfaceAudience.Private +@InterfaceStability.Evolving public class DatanodeDescriptor extends DatanodeInfo { // Stores status of decommissioning. @@ -586,14 +583,14 @@ public void updateRegInfo(DatanodeID nodeReg) { } /** - * @return Blanacer bandwidth in bytes per second for this datanode. + * @return balancer bandwidth in bytes per second for this datanode */ public long getBalancerBandwidth() { return this.bandwidth; } /** - * @param bandwidth Blanacer bandwidth in bytes per second for this datanode. + * @param bandwidth balancer bandwidth in bytes per second for this datanode */ public void setBalancerBandwidth(long bandwidth) { this.bandwidth = bandwidth; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index f7dd2a5ac3a..586084b0c9c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -330,9 +330,7 @@ conf, new AccessControlList(conf.get(DFS_ADMIN, " "))) : new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0, conf, new AccessControlList(conf.get(DFS_ADMIN, " ")), secureResources.getListener()); - if(LOG.isDebugEnabled()) { - LOG.debug("Datanode listening on " + infoHost + ":" + tmpInfoPort); - } + LOG.info("Opened info server at " + infoHost + ":" + tmpInfoPort); if (conf.getBoolean(DFS_HTTPS_ENABLE_KEY, false)) { boolean needClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT); @@ -398,7 +396,8 @@ private void initIpcServer(Configuration conf) throws IOException { .newReflectiveBlockingService(interDatanodeProtocolXlator); DFSUtil.addPBProtocol(conf, InterDatanodeProtocolPB.class, service, ipcServer); - + LOG.info("Opened IPC server at " + ipcServer.getListenerAddress()); + // set service-level authorization security policy if (conf.getBoolean( CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) { @@ -486,14 +485,14 @@ private synchronized void shutdownDirectoryScanner() { } private void initDataXceiver(Configuration conf) throws IOException { - InetSocketAddress socAddr = DataNode.getStreamingAddr(conf); + InetSocketAddress streamingAddr = DataNode.getStreamingAddr(conf); // find free port or use privileged port provided ServerSocket ss; if(secureResources == null) { ss = (dnConf.socketWriteTimeout > 0) ? ServerSocketChannel.open().socket() : new ServerSocket(); - Server.bind(ss, socAddr, 0); + Server.bind(ss, streamingAddr, 0); } else { ss = secureResources.getStreamingSocket(); } @@ -502,8 +501,7 @@ private void initDataXceiver(Configuration conf) throws IOException { int tmpPort = ss.getLocalPort(); selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(), tmpPort); - LOG.info("Opened info server at " + tmpPort); - + LOG.info("Opened streaming server at " + selfAddr); this.threadGroup = new ThreadGroup("dataXceiverServer"); this.dataXceiverServer = new Daemon(threadGroup, new DataXceiverServer(ss, conf, this)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java index c6744f9317c..f7da29b4c9d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java @@ -69,18 +69,19 @@ public void init(DaemonContext context) throws Exception { args = context.getArguments(); // Obtain secure port for data streaming to datanode - InetSocketAddress socAddr = DataNode.getStreamingAddr(conf); + InetSocketAddress streamingAddr = DataNode.getStreamingAddr(conf); int socketWriteTimeout = conf.getInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, HdfsServerConstants.WRITE_TIMEOUT); ServerSocket ss = (socketWriteTimeout > 0) ? ServerSocketChannel.open().socket() : new ServerSocket(); - ss.bind(socAddr, 0); + ss.bind(streamingAddr, 0); // Check that we got the port we need - if(ss.getLocalPort() != socAddr.getPort()) + if (ss.getLocalPort() != streamingAddr.getPort()) { throw new RuntimeException("Unable to bind on specified streaming port in secure " + - "context. Needed " + socAddr.getPort() + ", got " + ss.getLocalPort()); + "context. Needed " + streamingAddr.getPort() + ", got " + ss.getLocalPort()); + } // Obtain secure listener for web server SelectChannelConnector listener = @@ -90,15 +91,18 @@ public void init(DaemonContext context) throws Exception { listener.setPort(infoSocAddr.getPort()); // Open listener here in order to bind to port as root listener.open(); - if(listener.getPort() != infoSocAddr.getPort()) + if (listener.getPort() != infoSocAddr.getPort()) { throw new RuntimeException("Unable to bind on specified info port in secure " + - "context. Needed " + socAddr.getPort() + ", got " + ss.getLocalPort()); + "context. Needed " + streamingAddr.getPort() + ", got " + ss.getLocalPort()); + } System.err.println("Successfully obtained privileged resources (streaming port = " + ss + " ) (http listener port = " + listener.getConnection() +")"); - if(ss.getLocalPort() >= 1023 || listener.getPort() >= 1023) + if (ss.getLocalPort() >= 1023 || listener.getPort() >= 1023) { throw new RuntimeException("Cannot start secure datanode with unprivileged ports"); - + } + System.err.println("Opened streaming server at " + streamingAddr); + System.err.println("Opened info server at " + infoSocAddr); resources = new SecureResources(ss, listener); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java index 0f0caa673b5..4d614b8d18e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java @@ -52,7 +52,7 @@ public void testDFSAddressConfig() throws IOException { String selfSocketAddr = dn.getSelfAddr().toString(); System.out.println("DN Self Socket Addr == " + selfSocketAddr); - assertTrue(selfSocketAddr.startsWith("/127.0.0.1:")); + assertTrue(selfSocketAddr.contains("/127.0.0.1:")); /*------------------------------------------------------------------------- * Shut down the datanodes, reconfigure, and bring them back up. @@ -78,7 +78,7 @@ public void testDFSAddressConfig() throws IOException { selfSocketAddr = dn.getSelfAddr().toString(); System.out.println("DN Self Socket Addr == " + selfSocketAddr); // assert that default self socket address is 127.0.0.1 - assertTrue(selfSocketAddr.startsWith("/127.0.0.1:")); + assertTrue(selfSocketAddr.contains("/127.0.0.1:")); /*------------------------------------------------------------------------- * Shut down the datanodes, reconfigure, and bring them back up. @@ -103,7 +103,7 @@ public void testDFSAddressConfig() throws IOException { selfSocketAddr = dn.getSelfAddr().toString(); System.out.println("DN Self Socket Addr == " + selfSocketAddr); // assert that default self socket address is 0.0.0.0 - assertTrue(selfSocketAddr.startsWith("/0.0.0.0:")); + assertTrue(selfSocketAddr.contains("/0.0.0.0:")); cluster.shutdown(); } From 99a68a14237b4cd1936ba5e9468d25d35dad594c Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Wed, 28 Mar 2012 20:37:34 +0000 Subject: [PATCH 6/6] HDFS-3155. Clean up FSDataset implemenation related code. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1306582 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../hdfs/server/datanode/BlockReceiver.java | 7 +- .../hdfs/server/datanode/BlockSender.java | 17 ++- .../hdfs/server/datanode/DataStorage.java | 4 +- .../hdfs/server/datanode/DatanodeUtil.java | 2 +- .../hdfs/server/datanode/FSDataset.java | 4 +- .../server/datanode/ReplicaUnderRecovery.java | 10 +- .../apache/hadoop/hdfs/MiniDFSCluster.java | 7 +- .../hadoop/hdfs/TestLeaseRecovery2.java | 8 +- .../org/apache/hadoop/hdfs/TestPipelines.java | 17 +-- .../hdfs/server/datanode/DataNodeAdapter.java | 111 ------------------ .../server/datanode/DataNodeTestUtils.java | 90 +++++++++++++- .../server/datanode/SimulatedFSDataset.java | 1 - .../hdfs/server/datanode/TestBlockReport.java | 25 ++-- .../namenode/ha/HAStressTestHarness.java | 6 +- .../hdfs/server/namenode/ha/HATestUtil.java | 4 +- .../server/namenode/ha/TestDNFencing.java | 10 +- .../namenode/ha/TestPipelinesFailover.java | 11 +- .../server/namenode/ha/TestStandbyIsHot.java | 6 +- 19 files changed, 150 insertions(+), 192 deletions(-) delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeAdapter.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 10100b875a7..7a2dea9583e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -277,6 +277,8 @@ Release 2.0.0 - UNRELEASED HDFS-3129. NetworkTopology: add test that getLeaf should check for invalid topologies (Colin Patrick McCabe via eli) + HDFS-3155. Clean up FSDataset implemenation related code. (szetszwo) + OPTIMIZATIONS HDFS-3024. Improve performance of stringification in addStoredBlock (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java index 1449b88f8fe..72591e018ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java @@ -358,9 +358,8 @@ private void verifyChunks( byte[] dataBuf, int dataOff, int len, * This does not verify the original checksums, under the assumption * that they have already been validated. */ - private void translateChunks( byte[] dataBuf, int dataOff, int len, - byte[] checksumBuf, int checksumOff ) - throws IOException { + private void translateChunks( byte[] dataBuf, int dataOff, int len, + byte[] checksumBuf, int checksumOff ) { if (len == 0) return; int numChunks = (len - 1)/bytesPerChecksum + 1; @@ -702,7 +701,7 @@ private int receivePacket(long offsetInBlock, long seqno, return lastPacketInBlock?-1:len; } - private void dropOsCacheBehindWriter(long offsetInBlock) throws IOException { + private void dropOsCacheBehindWriter(long offsetInBlock) { try { if (outFd != null && offsetInBlock > lastCacheDropOffset + CACHE_DROP_LAG_BYTES) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java index 53ee5b7c06b..6a830dbbf98 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java @@ -111,10 +111,6 @@ class BlockSender implements java.io.Closeable { /** the block to read from */ private final ExtendedBlock block; - /** the replica to read from */ - private final Replica replica; - /** The visible length of a replica. */ - private final long replicaVisibleLength; /** Stream to read block data from */ private InputStream blockIn; /** updated while using transferTo() */ @@ -189,17 +185,18 @@ class BlockSender implements java.io.Closeable { this.readaheadLength = datanode.getDnConf().readaheadLength; this.shouldDropCacheBehindRead = datanode.getDnConf().dropCacheBehindReads; + final Replica replica; + final long replicaVisibleLength; synchronized(datanode.data) { - this.replica = getReplica(block, datanode); - this.replicaVisibleLength = replica.getVisibleLength(); + replica = getReplica(block, datanode); + replicaVisibleLength = replica.getVisibleLength(); } // if there is a write in progress ChunkChecksum chunkChecksum = null; if (replica instanceof ReplicaBeingWritten) { - long minEndOffset = startOffset + length; - waitForMinLength((ReplicaBeingWritten)replica, minEndOffset); - ReplicaInPipeline rip = (ReplicaInPipeline) replica; - chunkChecksum = rip.getLastChecksumAndDataLen(); + final ReplicaBeingWritten rbw = (ReplicaBeingWritten)replica; + waitForMinLength(rbw, startOffset + length); + chunkChecksum = rbw.getLastChecksumAndDataLen(); } if (replica.getGenerationStamp() < block.getGenerationStamp()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java index 64349d86c40..16244c725bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java @@ -760,8 +760,8 @@ private static String convertMetatadataFileName(String oldFileName) { /** * Add bpStorage into bpStorageMap */ - private void addBlockPoolStorage(String bpID, BlockPoolSliceStorage bpStorage) - throws IOException { + private void addBlockPoolStorage(String bpID, BlockPoolSliceStorage bpStorage + ) { if (!this.bpStorageMap.containsKey(bpID)) { this.bpStorageMap.put(bpID, bpStorage); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java index c59929edd6c..e3eaa6126ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java @@ -32,7 +32,7 @@ class DatanodeUtil { static final String UNLINK_BLOCK_SUFFIX = ".unlinked"; - private final static String DISK_ERROR = "Possible disk error on file creation: "; + private static final String DISK_ERROR = "Possible disk error: "; /** Get the cause of an I/O exception if caused by a possible disk error * @param ioe an I/O exception diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java index 3a4a4b612ea..f8699630f28 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java @@ -1800,7 +1800,7 @@ private synchronized FinalizedReplica finalizeReplica(String bpid, ReplicaInfo replicaInfo) throws IOException { FinalizedReplica newReplicaInfo = null; if (replicaInfo.getState() == ReplicaState.RUR && - ((ReplicaUnderRecovery)replicaInfo).getOrignalReplicaState() == + ((ReplicaUnderRecovery)replicaInfo).getOriginalReplica().getState() == ReplicaState.FINALIZED) { newReplicaInfo = (FinalizedReplica) ((ReplicaUnderRecovery)replicaInfo).getOriginalReplica(); @@ -2036,7 +2036,7 @@ public void invalidate(String bpid, Block invalidBlks[]) throws IOException { ReplicaState replicaState = dinfo.getState(); if (replicaState == ReplicaState.FINALIZED || (replicaState == ReplicaState.RUR && - ((ReplicaUnderRecovery)dinfo).getOrignalReplicaState() == + ((ReplicaUnderRecovery)dinfo).getOriginalReplica().getState() == ReplicaState.FINALIZED)) { v.clearPath(bpid, parent); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java index 2e15e6fce54..d5bbf04227a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java @@ -86,14 +86,6 @@ void setRecoveryID(long recoveryId) { ReplicaInfo getOriginalReplica() { return original; } - - /** - * Get the original replica's state - * @return the original replica's state - */ - ReplicaState getOrignalReplicaState() { - return original.getState(); - } @Override //ReplicaInfo boolean isUnlinked() { @@ -170,6 +162,6 @@ public String toString() { ReplicaRecoveryInfo createInfo() { return new ReplicaRecoveryInfo(original.getBlockId(), original.getBytesOnDisk(), original.getGenerationStamp(), - getOrignalReplicaState()); + original.getState()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 6ab3f0ce66c..6717a01dabc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -80,7 +80,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.DataStorage; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; @@ -1664,7 +1663,7 @@ public void transitionToStandby(int nnIndex) throws IOException, public void triggerBlockReports() throws IOException { for (DataNode dn : getDataNodes()) { - DataNodeAdapter.triggerBlockReport(dn); + DataNodeTestUtils.triggerBlockReport(dn); } } @@ -1672,14 +1671,14 @@ public void triggerBlockReports() public void triggerDeletionReports() throws IOException { for (DataNode dn : getDataNodes()) { - DataNodeAdapter.triggerDeletionReport(dn); + DataNodeTestUtils.triggerDeletionReport(dn); } } public void triggerHeartbeats() throws IOException { for (DataNode dn : getDataNodes()) { - DataNodeAdapter.triggerHeartbeat(dn); + DataNodeTestUtils.triggerHeartbeat(dn); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java index 0222e185797..a374e50d496 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; @@ -41,7 +41,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; @@ -454,7 +454,7 @@ public void hardLeaseRecoveryRestartHelper(boolean doRename) // Make sure the DNs don't send a heartbeat for a while, so the blocks // won't actually get completed during lease recovery. for (DataNode dn : cluster.getDataNodes()) { - DataNodeAdapter.setHeartbeatsDisabledForTests(dn, true); + DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true); } // set the hard limit to be 1 second @@ -474,7 +474,7 @@ public void hardLeaseRecoveryRestartHelper(boolean doRename) // Let the DNs send heartbeats again. for (DataNode dn : cluster.getDataNodes()) { - DataNodeAdapter.setHeartbeatsDisabledForTests(dn, false); + DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false); } cluster.waitActive(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java index 1dc0b1ebd42..0d2ebc96af6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java @@ -17,6 +17,13 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.List; +import java.util.Random; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; @@ -26,22 +33,16 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.Replica; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Level; import org.junit.After; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; import org.junit.Before; import org.junit.Test; -import java.io.IOException; -import java.util.List; -import java.util.Random; - public class TestPipelines { public static final Log LOG = LogFactory.getLog(TestPipelines.class); @@ -105,7 +106,7 @@ public void pipeline_01() throws IOException { String bpid = cluster.getNamesystem().getBlockPoolId(); for (DataNode dn : cluster.getDataNodes()) { - Replica r = DataNodeAdapter.fetchReplicaInfo(dn, bpid, lb.get(0) + Replica r = DataNodeTestUtils.fetchReplicaInfo(dn, bpid, lb.get(0) .getBlock().getBlockId()); assertTrue("Replica on DN " + dn + " shouldn't be null", r != null); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeAdapter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeAdapter.java deleted file mode 100644 index 6ab878c5617..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeAdapter.java +++ /dev/null @@ -1,111 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.datanode; - -import java.io.IOException; - -import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.mockito.Mockito; - -import com.google.common.base.Preconditions; - -/** - * WARNING!! This is TEST ONLY class: it never has to be used - * for ANY development purposes. - * - * This is a utility class to expose DataNode functionality for - * unit and functional tests. - */ -public class DataNodeAdapter { - /** - * Fetch a copy of ReplicaInfo from a datanode by block id - * @param dn datanode to retrieve a replicainfo object from - * @param bpid Block pool Id - * @param blkId id of the replica's block - * @return copy of ReplicaInfo object @link{FSDataset#fetchReplicaInfo} - */ - public static ReplicaInfo fetchReplicaInfo (final DataNode dn, - final String bpid, - final long blkId) { - return ((FSDataset)dn.data).fetchReplicaInfo(bpid, blkId); - } - - public static void setHeartbeatsDisabledForTests(DataNode dn, - boolean heartbeatsDisabledForTests) { - dn.setHeartbeatsDisabledForTests(heartbeatsDisabledForTests); - } - - public static void triggerDeletionReport(DataNode dn) throws IOException { - for (BPOfferService bpos : dn.getAllBpOs()) { - bpos.triggerDeletionReportForTests(); - } - } - - public static void triggerHeartbeat(DataNode dn) throws IOException { - for (BPOfferService bpos : dn.getAllBpOs()) { - bpos.triggerHeartbeatForTests(); - } - } - - public static void triggerBlockReport(DataNode dn) throws IOException { - for (BPOfferService bpos : dn.getAllBpOs()) { - bpos.triggerBlockReportForTests(); - } - } - - public static long getPendingAsyncDeletions(DataNode dn) { - FSDataset fsd = (FSDataset)dn.getFSDataset(); - return fsd.asyncDiskService.countPendingDeletions(); - } - - /** - * Insert a Mockito spy object between the given DataNode and - * the given NameNode. This can be used to delay or wait for - * RPC calls on the datanode->NN path. - */ - public static DatanodeProtocolClientSideTranslatorPB spyOnBposToNN( - DataNode dn, NameNode nn) { - String bpid = nn.getNamesystem().getBlockPoolId(); - - BPOfferService bpos = null; - for (BPOfferService thisBpos : dn.getAllBpOs()) { - if (thisBpos.getBlockPoolId().equals(bpid)) { - bpos = thisBpos; - break; - } - } - Preconditions.checkArgument(bpos != null, - "No such bpid: %s", bpid); - - BPServiceActor bpsa = null; - for (BPServiceActor thisBpsa : bpos.getBPServiceActors()) { - if (thisBpsa.getNNSocketAddress().equals(nn.getServiceRpcAddress())) { - bpsa = thisBpsa; - break; - } - } - Preconditions.checkArgument(bpsa != null, - "No service actor to NN at %s", nn.getServiceRpcAddress()); - - DatanodeProtocolClientSideTranslatorPB origNN = bpsa.getNameNodeProxy(); - DatanodeProtocolClientSideTranslatorPB spy = Mockito.spy(origNN); - bpsa.setNameNode(spy); - return spy; - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java index 1a871dd35e3..726c5d3ce3e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java @@ -24,8 +24,13 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; +import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.mockito.Mockito; + +import com.google.common.base.Preconditions; /** * Utility class for accessing package-private DataNode information during tests. @@ -42,6 +47,64 @@ public class DataNodeTestUtils { return dn.getDNRegistrationForBP(bpid); } + public static void setHeartbeatsDisabledForTests(DataNode dn, + boolean heartbeatsDisabledForTests) { + dn.setHeartbeatsDisabledForTests(heartbeatsDisabledForTests); + } + + public static void triggerDeletionReport(DataNode dn) throws IOException { + for (BPOfferService bpos : dn.getAllBpOs()) { + bpos.triggerDeletionReportForTests(); + } + } + + public static void triggerHeartbeat(DataNode dn) throws IOException { + for (BPOfferService bpos : dn.getAllBpOs()) { + bpos.triggerHeartbeatForTests(); + } + } + + public static void triggerBlockReport(DataNode dn) throws IOException { + for (BPOfferService bpos : dn.getAllBpOs()) { + bpos.triggerBlockReportForTests(); + } + } + + /** + * Insert a Mockito spy object between the given DataNode and + * the given NameNode. This can be used to delay or wait for + * RPC calls on the datanode->NN path. + */ + public static DatanodeProtocolClientSideTranslatorPB spyOnBposToNN( + DataNode dn, NameNode nn) { + String bpid = nn.getNamesystem().getBlockPoolId(); + + BPOfferService bpos = null; + for (BPOfferService thisBpos : dn.getAllBpOs()) { + if (thisBpos.getBlockPoolId().equals(bpid)) { + bpos = thisBpos; + break; + } + } + Preconditions.checkArgument(bpos != null, + "No such bpid: %s", bpid); + + BPServiceActor bpsa = null; + for (BPServiceActor thisBpsa : bpos.getBPServiceActors()) { + if (thisBpsa.getNNSocketAddress().equals(nn.getServiceRpcAddress())) { + bpsa = thisBpsa; + break; + } + } + Preconditions.checkArgument(bpsa != null, + "No service actor to NN at %s", nn.getServiceRpcAddress()); + + DatanodeProtocolClientSideTranslatorPB origNN = bpsa.getNameNodeProxy(); + DatanodeProtocolClientSideTranslatorPB spy = Mockito.spy(origNN); + bpsa.setNameNode(spy); + return spy; + } + /** * This method is used for testing. * Examples are adding and deleting blocks directly. @@ -53,18 +116,37 @@ public static FsDatasetSpi getFSDataset(DataNode dn) { return dn.getFSDataset(); } + public static FSDataset getFsDatasetImpl(DataNode dn) { + return (FSDataset)dn.getFSDataset(); + } + public static File getFile(DataNode dn, String bpid, long bid) { - return ((FSDataset)dn.getFSDataset()).getFile(bpid, bid); + return getFsDatasetImpl(dn).getFile(bpid, bid); } public static File getBlockFile(DataNode dn, String bpid, Block b ) throws IOException { - return ((FSDataset)dn.getFSDataset()).getBlockFile(bpid, b); + return getFsDatasetImpl(dn).getBlockFile(bpid, b); } public static boolean unlinkBlock(DataNode dn, ExtendedBlock block, int numLinks ) throws IOException { - ReplicaInfo info = ((FSDataset)dn.getFSDataset()).getReplicaInfo(block); - return info.unlinkBlock(numLinks); + return getFsDatasetImpl(dn).getReplicaInfo(block).unlinkBlock(numLinks); + } + + public static long getPendingAsyncDeletions(DataNode dn) { + return getFsDatasetImpl(dn).asyncDiskService.countPendingDeletions(); + } + + /** + * Fetch a copy of ReplicaInfo from a datanode by block id + * @param dn datanode to retrieve a replicainfo object from + * @param bpid Block pool Id + * @param blkId id of the replica's block + * @return copy of ReplicaInfo object @link{FSDataset#fetchReplicaInfo} + */ + public static ReplicaInfo fetchReplicaInfo(final DataNode dn, + final String bpid, final long blkId) { + return getFsDatasetImpl(dn).fetchReplicaInfo(bpid, blkId); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java index a12ac722a77..8d9ee07ea02 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java @@ -38,7 +38,6 @@ import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; -import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolumeSet; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java index 38c631381e2..985900030ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java @@ -17,6 +17,17 @@ */ package org.apache.hadoop.hdfs.server.datanode; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.FilenameFilter; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; +import java.util.concurrent.CountDownLatch; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; @@ -40,27 +51,17 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; import org.apache.log4j.Level; import org.junit.After; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; -import java.io.File; -import java.io.FilenameFilter; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Random; -import java.util.concurrent.CountDownLatch; - /** * This test simulates a variety of situations when blocks are being * intentionally orrupted, unexpectedly modified, and so on before a block @@ -561,7 +562,7 @@ protected Object passThrough(InvocationOnMock invocation) // from this node. DataNode dn = cluster.getDataNodes().get(0); DatanodeProtocolClientSideTranslatorPB spy = - DataNodeAdapter.spyOnBposToNN(dn, nn); + DataNodeTestUtils.spyOnBposToNN(dn, nn); Mockito.doAnswer(delayer) .when(spy).blockReport( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HAStressTestHarness.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HAStressTestHarness.java index 39667eddf17..81c45f37894 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HAStressTestHarness.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HAStressTestHarness.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread; import org.apache.hadoop.test.MultithreadedTestUtil.TestContext; @@ -83,8 +83,8 @@ public void addReplicationTriggerThread(final int interval) { @Override public void doAnAction() throws Exception { for (DataNode dn : cluster.getDataNodes()) { - DataNodeAdapter.triggerDeletionReport(dn); - DataNodeAdapter.triggerHeartbeat(dn); + DataNodeTestUtils.triggerDeletionReport(dn); + DataNodeTestUtils.triggerHeartbeat(dn); } for (int i = 0; i < 2; i++) { NameNode nn = cluster.getNameNode(i); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java index bf919cea7f8..7224b430d12 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.test.GenericTestUtils; @@ -96,7 +96,7 @@ static void waitForDNDeletions(final MiniDFSCluster cluster) @Override public Boolean get() { for (DataNode dn : cluster.getDataNodes()) { - if (DataNodeAdapter.getPendingAsyncDeletions(dn) > 0) { + if (DataNodeTestUtils.getPendingAsyncDeletions(dn) > 0) { return false; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java index ea769c057e1..5e657ded489 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; import java.io.IOException; import java.io.PrintWriter; @@ -47,7 +47,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; @@ -61,7 +60,6 @@ import org.apache.log4j.Level; import org.junit.After; import org.junit.Before; -import org.junit.Ignore; import org.junit.Test; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; @@ -72,9 +70,7 @@ public class TestDNFencing { - protected static final Log LOG = LogFactory.getLog( - TestDNFencing.class); - private static final String TEST_FILE_DATA = "hello highly available world"; + protected static final Log LOG = LogFactory.getLog(TestDNFencing.class); private static final String TEST_FILE = "/testStandbyIsHot"; private static final Path TEST_FILE_PATH = new Path(TEST_FILE); private static final int SMALL_BLOCK = 1024; @@ -497,7 +493,7 @@ protected Object passThrough(InvocationOnMock invocation) DataNode dn = cluster.getDataNodes().get(0); DatanodeProtocolClientSideTranslatorPB spy = - DataNodeAdapter.spyOnBposToNN(dn, nn2); + DataNodeTestUtils.spyOnBposToNN(dn, nn2); Mockito.doAnswer(delayer) .when(spy).blockReport( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java index c9bae53a28a..815be593599 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java @@ -17,7 +17,10 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.IOException; import java.security.PrivilegedExceptionAction; @@ -45,7 +48,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.io.IOUtils; @@ -54,9 +57,7 @@ import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; import org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread; import org.apache.hadoop.test.MultithreadedTestUtil.TestContext; - import org.apache.log4j.Level; - import org.junit.Test; import org.mockito.Mockito; @@ -297,7 +298,7 @@ public void testFailoverRightBeforeCommitSynchronization() throws Exception { // active. DataNode primaryDN = cluster.getDataNode(expectedPrimary.getIpcPort()); DatanodeProtocolClientSideTranslatorPB nnSpy = - DataNodeAdapter.spyOnBposToNN(primaryDN, nn0); + DataNodeTestUtils.spyOnBposToNN(primaryDN, nn0); // Delay the commitBlockSynchronization call DelayAnswer delayer = new DelayAnswer(LOG); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java index ce5814b0dd0..ddfd573b4c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java @@ -35,14 +35,14 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; +import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; @@ -225,7 +225,7 @@ public Boolean get() { LOG.info("Got " + numReplicas + " locs: " + locs); if (numReplicas > expectedReplicas) { for (DataNode dn : cluster.getDataNodes()) { - DataNodeAdapter.triggerDeletionReport(dn); + DataNodeTestUtils.triggerDeletionReport(dn); } } return numReplicas == expectedReplicas;