From 83fd3160dae6faaf64a499b0227d3f021b345f01 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Fri, 10 Feb 2012 19:15:15 +0000 Subject: [PATCH 01/12] HDFS-2931. Switch DataNode's BlockVolumeChoosingPolicy to private-audience. Contributed by harsh git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1242891 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hdfs/server/datanode/BlockVolumeChoosingPolicy.java | 8 +++++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 6a70de043a4..c9377e27e2c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -216,6 +216,9 @@ Release 0.23.2 - UNRELEASED IMPROVEMENTS + HDFS-2931. Switch DataNode's BlockVolumeChoosingPolicy to private-audience. + (harsh via szetszwo) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java index 49e0f464d91..31cf30a925d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java @@ -28,10 +28,12 @@ import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterfa * BlockVolumeChoosingPolicy allows a DataNode to * specify what policy is to be used while choosing * a volume for a block request. - * + * + * Note: This is an evolving i/f and is only for + * advanced use. + * ***************************************************/ -@InterfaceAudience.Public -@InterfaceStability.Evolving +@InterfaceAudience.Private public interface BlockVolumeChoosingPolicy { /** From 56b6f4a8fc2fa9f72617922f18eaa6a1005d10ec Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Fri, 10 Feb 2012 19:55:15 +0000 Subject: [PATCH 02/12] Cleaning up hadoop-mapreduce-project/CHANGES.txt a bit. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1242902 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 83811654e0b..36ab06d98c1 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -3,15 +3,18 @@ Hadoop MapReduce Change Log Trunk (unreleased changes) INCOMPATIBLE CHANGES + MAPREDUCE-3545. Remove Avro RPC. (suresh) NEW FEATURES + MAPREDUCE-778. Rumen Anonymizer. (Amar Kamat and Chris Douglas via amarrk) MAPREDUCE-2669. Add new examples for Mean, Median, and Standard Deviation. (Plamen Jeliazkov via shv) IMPROVEMENTS + MAPREDUCE-3481. [Gridmix] Improve Gridmix STRESS mode. (amarrk) MAPREDUCE-3597. [Rumen] Rumen should provide APIs to access all the @@ -51,10 +54,6 @@ Trunk (unreleased changes) MAPREDUCE-2944. Improve checking of input for JobClient.displayTasks() (XieXianshan via harsh) BUG FIXES - MAPREDUCE-3770. Zombie.getJobConf() results into NPE. (amarrk) - - MAPREDUCE-3804. yarn webapp interface vulnerable to cross scripting attacks - (Dave Thompson via bobby) MAPREDUCE-3194. "mapred mradmin" command is broken in mrv2 (Jason Lowe via bobby) @@ -98,12 +97,10 @@ Release 0.23.2 - UNRELEASED OPTIMIZATIONS BUG FIXES + MAPREDUCE-3680. FifoScheduler web service rest API can print out invalid JSON. (B Anil Kumar via tgraves) - MAPREDUCE-3840. JobEndNotifier doesn't use the proxyToUse during connecting - (Ravi Prakash via bobby) - Release 0.23.1 - 2012-02-08 INCOMPATIBLE CHANGES @@ -744,6 +741,9 @@ Release 0.23.1 - 2012-02-08 MAPREDUCE-3808. Fixed an NPE in FileOutputCommitter for jobs with maps but no reduces. (Robert Joseph Evans via vinodkv) + MAPREDUCE-3804. yarn webapp interface vulnerable to cross scripting attacks + (Dave Thompson via bobby) + MAPREDUCE-3354. Changed scripts so that jobhistory server is started by bin/mapred instead of bin/yarn. (Jonathan Eagles via acmurthy) @@ -795,6 +795,11 @@ Release 0.23.1 - 2012-02-08 MAPREDUCE-3828. Ensure that urls in single-node mode are correct. (sseth via acmurthy) + MAPREDUCE-3770. Zombie.getJobConf() results into NPE. (amarrk) + + MAPREDUCE-3840. JobEndNotifier doesn't use the proxyToUse during connecting + (Ravi Prakash via bobby) + Release 0.23.0 - 2011-11-01 INCOMPATIBLE CHANGES From 0bf146b0dd078c2262f63ae303818f878a442e87 Mon Sep 17 00:00:00 2001 From: Thomas Graves Date: Fri, 10 Feb 2012 23:24:08 +0000 Subject: [PATCH 03/12] MAPREDUCE-3843. Job summary log file found missing on the RM host (Anupam Seth via tgraves) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1242976 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 ++ .../bin/mr-jobhistory-daemon.sh | 8 +++- .../src/site/apt/ClusterSetup.apt.vm | 44 +++++++++---------- 3 files changed, 31 insertions(+), 24 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 36ab06d98c1..a50ab73c6ef 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -800,6 +800,9 @@ Release 0.23.1 - 2012-02-08 MAPREDUCE-3840. JobEndNotifier doesn't use the proxyToUse during connecting (Ravi Prakash via bobby) + MAPREDUCE-3843. Job summary log file found missing on the RM host + (Anupam Seth via tgraves) + Release 0.23.0 - 2011-11-01 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh b/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh index 6fc3ee7e863..2272ae9564d 100644 --- a/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh +++ b/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh @@ -20,6 +20,9 @@ # # Environment Variables # +# HADOOP_LOGFILE Hadoop log file. +# HADOOP_ROOT_LOGGER Hadoop root logger. +# HADOOP_JHS_LOGGER Hadoop JobSummary logger. # YARN_CONF_DIR Alternate conf dir. Default is ${YARN_HOME}/conf. # YARN_LOG_DIR Where log files are stored. PWD by default. # YARN_MASTER host:path where hadoop code should be rsync'd from @@ -86,8 +89,9 @@ if [ "$YARN_PID_DIR" = "" ]; then fi # some variables -export YARN_LOGFILE=yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.log -export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,DRFA} +export HADOOP_LOGFILE=yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.log +export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-INFO,DRFA} +export HADOOP_JHS_LOGGER=${HADOOP_JHS_LOGGER:-INFO,JSA} log=$YARN_LOG_DIR/yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.out pid=$YARN_PID_DIR/yarn-$YARN_IDENT_STRING-$command.pid diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm index 67db4b13aea..eca68a234bf 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm @@ -437,32 +437,32 @@ Hadoop MapReduce Next Generation - Cluster Setup Format a new distributed filesystem: ---- - $ $HADOOP_PREFIX_HOME/bin/hdfs namenode -format + $ $HADOOP_PREFIX/bin/hdfs namenode -format ---- Start the HDFS with the following command, run on the designated NameNode: ---- - $ $HADOOP_PREFIX_HOME/bin/hdfs start namenode --config $HADOOP_CONF_DIR + $ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs start namenode ---- Run a script to start DataNodes on all slaves: ---- - $ $HADOOP_PREFIX_HOME/bin/hdfs start datanode --config $HADOOP_CONF_DIR + $ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs start datanode ---- Start the YARN with the following command, run on the designated ResourceManager: ---- - $ $YARN_HOME/bin/yarn start resourcemanager --config $HADOOP_CONF_DIR + $ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start resourcemanager ---- Run a script to start NodeManagers on all slaves: ---- - $ $YARN_HOME/bin/yarn start nodemanager --config $HADOOP_CONF_DIR + $ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start nodemanager ---- Start a standalone WebAppProxy server. If multiple servers @@ -476,7 +476,7 @@ Hadoop MapReduce Next Generation - Cluster Setup designated server: ---- - $ $YARN_HOME/bin/mapred start historyserver --config $YARN_CONF_DIR + $ $HADOOP_PREFIX/sbin/mr-jobhistory-daemon.sh start historyserver --config $HADOOP_CONF_DIR ---- * Hadoop Shutdown @@ -485,26 +485,26 @@ Hadoop MapReduce Next Generation - Cluster Setup NameNode: ---- - $ $HADOOP_PREFIX_HOME/bin/hdfs stop namenode --config $HADOOP_CONF_DIR + $ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs stop namenode ---- Run a script to stop DataNodes on all slaves: ---- - $ $HADOOP_PREFIX_HOME/bin/hdfs stop datanode --config $HADOOP_CONF_DIR + $ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs stop datanode ---- Stop the ResourceManager with the following command, run on the designated ResourceManager: ---- - $ $YARN_HOME/bin/yarn stop resourcemanager --config $HADOOP_CONF_DIR + $ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR stop resourcemanager ---- Run a script to stop NodeManagers on all slaves: ---- - $ $YARN_HOME/bin/yarn stop nodemanager --config $HADOOP_CONF_DIR + $ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR stop nodemanager ---- Stop the WebAppProxy server. If multiple servers are used with load @@ -519,7 +519,7 @@ Hadoop MapReduce Next Generation - Cluster Setup designated server: ---- - $ $YARN_HOME/bin/mapred stop historyserver --config $YARN_CONF_DIR + $ $HADOOP_PREFIX/sbin/mr-jobhistory-daemon.sh stop historyserver --config $HADOOP_CONF_DIR ---- @@ -978,34 +978,34 @@ KVNO Timestamp Principal Format a new distributed filesystem as : ---- -[hdfs]$ $HADOOP_PREFIX_HOME/bin/hdfs namenode -format +[hdfs]$ $HADOOP_PREFIX/bin/hdfs namenode -format ---- Start the HDFS with the following command, run on the designated NameNode as : ---- -[hdfs]$ $HADOOP_PREFIX_HOME/bin/hdfs start namenode --config $HADOOP_CONF_DIR +[hdfs]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs start namenode ---- Run a script to start DataNodes on all slaves as with a special environment variable <<>> set to : ---- -[root]$ HADOOP_SECURE_DN_USER=hdfs $HADOOP_PREFIX_HOME/bin/hdfs start datanode --config $HADOOP_CONF_DIR +[root]$ HADOOP_SECURE_DN_USER=hdfs $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs start datanode ---- Start the YARN with the following command, run on the designated ResourceManager as : ---- -[yarn]$ $YARN_HOME/bin/yarn start resourcemanager --config $HADOOP_CONF_DIR +[yarn]$ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start resourcemanager ---- Run a script to start NodeManagers on all slaves as : ---- -[yarn]$ $YARN_HOME/bin/yarn start nodemanager --config $HADOOP_CONF_DIR +[yarn]$ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start nodemanager ---- Start a standalone WebAppProxy server. Run on the WebAppProxy @@ -1020,7 +1020,7 @@ KVNO Timestamp Principal designated server as : ---- -[mapred]$ $YARN_HOME/bin/mapred start historyserver --config $YARN_CONF_DIR +[mapred]$ $HADOOP_PREFIX/sbin/mr-jobhistory-daemon.sh start historyserver --config $HADOOP_CONF_DIR ---- * Hadoop Shutdown @@ -1029,26 +1029,26 @@ KVNO Timestamp Principal as : ---- -[hdfs]$ $HADOOP_PREFIX_HOME/bin/hdfs stop namenode --config $HADOOP_CONF_DIR +[hdfs]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs stop namenode ---- Run a script to stop DataNodes on all slaves as : ---- -[root]$ $HADOOP_PREFIX_HOME/bin/hdfs stop datanode --config $HADOOP_CONF_DIR +[root]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs stop datanode ---- Stop the ResourceManager with the following command, run on the designated ResourceManager as : ---- -[yarn]$ $YARN_HOME/bin/yarn stop resourcemanager --config $HADOOP_CONF_DIR +[yarn]$ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR stop resourcemanager ---- Run a script to stop NodeManagers on all slaves as : ---- -[yarn]$ $YARN_HOME/bin/yarn stop nodemanager --config $HADOOP_CONF_DIR +[yarn]$ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR stop nodemanager ---- Stop the WebAppProxy server. Run on the WebAppProxy server as @@ -1063,7 +1063,7 @@ KVNO Timestamp Principal designated server as : ---- -[mapred]$ $YARN_HOME/bin/mapred stop historyserver --config $YARN_CONF_DIR +[mapred]$ $HADOOP_PREFIX/sbin/mr-jobhistory-daemon.sh stop historyserver --config $HADOOP_CONF_DIR ---- * {Web Interfaces} From 27035f532d7f4aca3e650c0c6a581ea2873367fe Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Sat, 11 Feb 2012 00:03:00 +0000 Subject: [PATCH 04/12] HADOOP-8051 HttpFS documentation it is not wired to the generated site (tucu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1242987 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 2 ++ .../hadoop-hdfs-httpfs/src/site/site.xml | 25 ++++++++----------- hadoop-project/src/site/site.xml | 1 + 3 files changed, 13 insertions(+), 15 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 319931b45b8..1d4c4948f8c 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -174,6 +174,8 @@ Release 0.23.2 - UNRELEASED HADOOP-8035 Hadoop Maven site is inefficient and runs phases redundantly (abayer via tucu) + HADOOP-8051 HttpFS documentation it is not wired to the generated site (tucu) + Release 0.23.1 - 2012-02-08 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/site.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/site.xml index d6424ebc2f9..01b35e0ae15 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/site.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/site.xml @@ -14,21 +14,16 @@ --> - + + org.apache.maven.skins + maven-stylus-skin + 1.2 + - -   - - - - org.apache.maven.skins - maven-stylus-skin - 1.2 - - - - - - + + + + + diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml index ce5a4e7320a..27f9b7b9834 100644 --- a/hadoop-project/src/site/site.xml +++ b/hadoop-project/src/site/site.xml @@ -55,6 +55,7 @@ + From 8be9441b9b13bea6e23c2cbcf638162c93052740 Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Sat, 11 Feb 2012 01:20:59 +0000 Subject: [PATCH 05/12] HDFS-2878. Fix TestBlockRecovery and move it back into main test directory. Contributed by Todd Lipcon. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1242995 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hdfs/server/datanode/BPOfferService.java | 3 +- .../hadoop/hdfs/server/datanode/DataNode.java | 15 +++- .../server/datanode/TestBlockRecovery.java | 68 +++++++++++++++---- 4 files changed, 72 insertions(+), 17 deletions(-) rename hadoop-hdfs-project/hadoop-hdfs/src/test/{unit => java}/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java (89%) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index c9377e27e2c..9a5bbcbe045 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -129,6 +129,9 @@ Trunk (unreleased changes) HDFS-2486. Remove unnecessary priority level checks in UnderReplicatedBlocks. (Uma Maheswara Rao G via szetszwo) + HDFS-2878. Fix TestBlockRecovery and move it back into main test directory. + (todd) + OPTIMIZATIONS HDFS-2477. Optimize computing the diff between a block report and the namenode state. (Tomasz Nykiel via hairong) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java index 111fbee2852..5b1ed7c5a5f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java @@ -229,8 +229,7 @@ class BPOfferService implements Runnable { private void connectToNNAndHandshake() throws IOException { // get NN proxy - bpNamenode = new DatanodeProtocolClientSideTranslatorPB(nnAddr, - dn.getConf()); + bpNamenode = dn.connectToNN(nnAddr); // First phase of the handshake with NN - get the namespace // info. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index b7a91696b64..031a57eaa20 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -1015,6 +1015,14 @@ public class DataNode extends Configured SocketChannel.open().socket() : new Socket(); } + /** + * Connect to the NN. This is separated out for easier testing. + */ + DatanodeProtocolClientSideTranslatorPB connectToNN( + InetSocketAddress nnAddr) throws IOException { + return new DatanodeProtocolClientSideTranslatorPB(nnAddr, conf); + } + public static InterDatanodeProtocol createInterDataNodeProtocolProxy( DatanodeID datanodeid, final Configuration conf, final int socketTimeout) throws IOException { @@ -1982,8 +1990,10 @@ public class DataNode extends Configured public DatanodeProtocolClientSideTranslatorPB getBPNamenode(String bpid) throws IOException { BPOfferService bpos = blockPoolManager.get(bpid); - if(bpos == null || bpos.bpNamenode == null) { - throw new IOException("cannot find a namnode proxy for bpid=" + bpid); + if (bpos == null) { + throw new IOException("No block pool offer service for bpid=" + bpid); + } else if (bpos.bpNamenode == null) { + throw new IOException("cannot find a namenode proxy for bpid=" + bpid); } return bpos.bpNamenode; } @@ -2325,5 +2335,4 @@ public class DataNode extends Configured boolean shouldRun() { return shouldRun; } - } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java similarity index 89% rename from hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java rename to hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java index 7dc5e86e688..cb4244132b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java @@ -22,6 +22,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -31,6 +32,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; +import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode.BlockRecord; @@ -39,23 +41,30 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; +import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Daemon; +import org.apache.hadoop.util.DataChecksum; import org.apache.log4j.Level; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; import static org.junit.Assert.fail; import static org.mockito.Mockito.*; import java.io.File; import java.io.IOException; +import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Collection; import java.util.List; @@ -72,6 +81,8 @@ public class TestBlockRecovery { private final static long RECOVERY_ID = 3000L; private final static String CLUSTER_ID = "testClusterID"; private final static String POOL_ID = "BP-TEST"; + private final static InetSocketAddress NN_ADDR = new InetSocketAddress( + "localhost", 5020); private final static long BLOCK_ID = 1000L; private final static long GEN_STAMP = 2000L; private final static long BLOCK_LEN = 3000L; @@ -80,9 +91,6 @@ public class TestBlockRecovery { private final static ExtendedBlock block = new ExtendedBlock(POOL_ID, BLOCK_ID, BLOCK_LEN, GEN_STAMP); - private final NamespaceInfo nsifno = - new NamespaceInfo(1,CLUSTER_ID, POOL_ID, 2, 3); - static { ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL); ((Log4JLogger)LOG).getLogger().setLevel(Level.ALL); @@ -99,21 +107,54 @@ public class TestBlockRecovery { conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0"); conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0"); - FileSystem.setDefaultUri(conf, "hdfs://localhost:5020"); + conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0); + FileSystem.setDefaultUri(conf, + "hdfs://" + NN_ADDR.getHostName() + ":" + NN_ADDR.getPort()); ArrayList dirs = new ArrayList(); File dataDir = new File(DATA_DIR); FileUtil.fullyDelete(dataDir); dataDir.mkdirs(); dirs.add(dataDir); - DatanodeProtocol namenode = mock(DatanodeProtocol.class); + final DatanodeProtocolClientSideTranslatorPB namenode = + mock(DatanodeProtocolClientSideTranslatorPB.class); + + Mockito.doAnswer(new Answer() { + @Override + public DatanodeRegistration answer(InvocationOnMock invocation) + throws Throwable { + return (DatanodeRegistration) invocation.getArguments()[0]; + } + }).when(namenode).registerDatanode( + Mockito.any(DatanodeRegistration.class), + Mockito.any(DatanodeStorage[].class)); + when(namenode.versionRequest()).thenReturn(new NamespaceInfo (1, CLUSTER_ID, POOL_ID, 1L, 1)); - when(namenode.sendHeartbeat(any(DatanodeRegistration.class), anyLong(), - anyLong(), anyLong(), anyLong(), anyInt(), anyInt(), anyInt())) + + when(namenode.sendHeartbeat( + Mockito.any(DatanodeRegistration.class), + Mockito.any(StorageReport[].class), + Mockito.anyInt(), + Mockito.anyInt(), + Mockito.anyInt())) .thenReturn(new DatanodeCommand[0]); - dn = new DataNode(conf, dirs, null); - - DataNodeTestUtils.setBPNamenodeByIndex(dn, nsifno, POOL_ID, namenode); + + dn = new DataNode(conf, dirs, null) { + @Override + DatanodeProtocolClientSideTranslatorPB connectToNN( + InetSocketAddress nnAddr) throws IOException { + Assert.assertEquals(NN_ADDR, nnAddr); + return namenode; + } + }; + dn.runDatanodeDaemon(); + while (!dn.isDatanodeFullyStarted()) { + try { + Thread.sleep(50); + } catch (InterruptedException e) { + fail("Interrupted starting DN"); + } + } } /** @@ -355,9 +396,11 @@ public class TestBlockRecovery { private Collection initRecoveringBlocks() throws IOException { Collection blocks = new ArrayList(1); + DatanodeInfo mockOtherDN = new DatanodeInfo( + new DatanodeID("127.0.0.1", "storage-1234", 0, 0)); DatanodeInfo[] locs = new DatanodeInfo[] { new DatanodeInfo(dn.getDNRegistrationForBP(block.getBlockPoolId())), - mock(DatanodeInfo.class) }; + mockOtherDN }; RecoveringBlock rBlock = new RecoveringBlock(block, locs, RECOVERY_ID); blocks.add(rBlock); return blocks; @@ -495,7 +538,8 @@ public class TestBlockRecovery { ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(block); BlockWriteStreams streams = null; try { - streams = replicaInfo.createStreams(true, 0, 0); + streams = replicaInfo.createStreams(true, + DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 512)); streams.checksumOut.write('a'); dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID+1)); try { From 6bec4035bd481eeb29f78eba2790c35455b5d840 Mon Sep 17 00:00:00 2001 From: Harsh J Date: Sat, 11 Feb 2012 14:10:04 +0000 Subject: [PATCH 06/12] HADOOP-8055. Hadoop tarball distribution lacks a core-site.xml (harsh) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1243065 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 2 ++ .../hadoop-common/src/main/conf/core-site.xml | 20 +++++++++++++++++++ 2 files changed, 22 insertions(+) create mode 100644 hadoop-common-project/hadoop-common/src/main/conf/core-site.xml diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 1d4c4948f8c..c3bd7b98c87 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -375,6 +375,8 @@ Release 0.23.1 - 2012-02-08 HADOOP-8013. ViewFileSystem does not honor setVerifyChecksum (Daryn Sharp via bobby) + HADOOP-8055. Hadoop tarball distribution lacks a core-site.xml (harsh) + Release 0.23.0 - 2011-11-01 INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/conf/core-site.xml b/hadoop-common-project/hadoop-common/src/main/conf/core-site.xml new file mode 100644 index 00000000000..d2ddf893e49 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/conf/core-site.xml @@ -0,0 +1,20 @@ + + + + + + + + From 726f6e32ac570dbaeb42fa2747dca01877dfc7cb Mon Sep 17 00:00:00 2001 From: Harsh J Date: Sat, 11 Feb 2012 16:57:07 +0000 Subject: [PATCH 07/12] Amend HADOOP-8055. Placed changes log in a released section, corrected to 0.23.2. (harsh) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1243096 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index c3bd7b98c87..e0dbe146d0d 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -176,6 +176,8 @@ Release 0.23.2 - UNRELEASED HADOOP-8051 HttpFS documentation it is not wired to the generated site (tucu) + HADOOP-8055. Hadoop tarball distribution lacks a core-site.xml (harsh) + Release 0.23.1 - 2012-02-08 INCOMPATIBLE CHANGES @@ -375,8 +377,6 @@ Release 0.23.1 - 2012-02-08 HADOOP-8013. ViewFileSystem does not honor setVerifyChecksum (Daryn Sharp via bobby) - HADOOP-8055. Hadoop tarball distribution lacks a core-site.xml (harsh) - Release 0.23.0 - 2011-11-01 INCOMPATIBLE CHANGES From 8b730c867f976aa250409389b77bbd1da30fc613 Mon Sep 17 00:00:00 2001 From: Harsh J Date: Sat, 11 Feb 2012 17:13:30 +0000 Subject: [PATCH 08/12] HDFS-2869. Fix an error in the webhdfs docs for the mkdir op (harsh) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1243104 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../src/main/docs/src/documentation/content/xdocs/webhdfs.xml | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 9a5bbcbe045..90c1490c32b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -230,6 +230,8 @@ Release 0.23.2 - UNRELEASED HDFS-2764. TestBackupNode is racy. (atm) + HDFS-2869. Fix an error in the webhdfs docs for the mkdir op (harsh) + Release 0.23.1 - 2012-02-08 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/webhdfs.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/webhdfs.xml index 32a26f0c4a0..43764ca2758 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/webhdfs.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/webhdfs.xml @@ -349,7 +349,7 @@ Hello, webhdfs user!
  • Submit a HTTP PUT request. -curl -i -X PUT "http://<HOST>:<PORT>/<PATH>?op=MKDIRS[&permission=<OCTAL>]" + curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=MKDIRS[&permission=<OCTAL>]" The client receives a response with a boolean JSON object: From 00ee0585abce44690e9784b21a1fe31ce30e06e6 Mon Sep 17 00:00:00 2001 From: Matthew Foley Date: Sun, 12 Feb 2012 07:47:35 +0000 Subject: [PATCH 09/12] HADOOP-8052. Hadoop Metrics2 should emit Float.MAX_VALUE (instead of Double.MAX_VALUE) to avoid making Ganglia's gmetad core. Contributed by Varun Kapoor. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1243207 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 6 +++++- .../apache/hadoop/metrics2/util/SampleStat.java | 16 ++++++++++++---- .../hadoop/metrics2/util/TestSampleStat.java | 8 ++++---- 3 files changed, 21 insertions(+), 9 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index e0dbe146d0d..dc1dcda4015 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -176,7 +176,11 @@ Release 0.23.2 - UNRELEASED HADOOP-8051 HttpFS documentation it is not wired to the generated site (tucu) - HADOOP-8055. Hadoop tarball distribution lacks a core-site.xml (harsh) + HADOOP-8055. Hadoop tarball distribution lacks a core-site.xml (harsh) + + HADOOP-8052. Hadoop Metrics2 should emit Float.MAX_VALUE (instead of + Double.MAX_VALUE) to avoid making Ganglia's gmetad core. (Varun Kapoor + via mattf) Release 0.23.1 - 2012-02-08 diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java index f154269698a..589062a691c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java @@ -143,8 +143,16 @@ public class SampleStat { @SuppressWarnings("PublicInnerClass") public static class MinMax { - private double min = Double.MAX_VALUE; - private double max = Double.MIN_VALUE; + // Float.MAX_VALUE is used rather than Double.MAX_VALUE, even though the + // min and max variables are of type double. + // Float.MAX_VALUE is big enough, and using Double.MAX_VALUE makes + // Ganglia core due to buffer overflow. + // The same reasoning applies to the MIN_VALUE counterparts. + static final double DEFAULT_MIN_VALUE = Float.MAX_VALUE; + static final double DEFAULT_MAX_VALUE = Float.MIN_VALUE; + + private double min = DEFAULT_MIN_VALUE; + private double max = DEFAULT_MAX_VALUE; public void add(double value) { if (value > max) max = value; @@ -155,8 +163,8 @@ public class SampleStat { public double max() { return max; } public void reset() { - min = Double.MAX_VALUE; - max = Double.MIN_VALUE; + min = DEFAULT_MIN_VALUE; + max = DEFAULT_MAX_VALUE; } public void reset(MinMax other) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/util/TestSampleStat.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/util/TestSampleStat.java index 36ca6bb1664..0fb0ad8ace9 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/util/TestSampleStat.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/util/TestSampleStat.java @@ -36,8 +36,8 @@ public class TestSampleStat { assertEquals("mean", 0.0, stat.mean(), EPSILON); assertEquals("variance", 0.0, stat.variance(), EPSILON); assertEquals("stddev", 0.0, stat.stddev(), EPSILON); - assertEquals("min", Double.MAX_VALUE, stat.min(), EPSILON); - assertEquals("max", Double.MIN_VALUE, stat.max(), EPSILON); + assertEquals("min", SampleStat.MinMax.DEFAULT_MIN_VALUE, stat.min(), EPSILON); + assertEquals("max", SampleStat.MinMax.DEFAULT_MAX_VALUE, stat.max(), EPSILON); stat.add(3); assertEquals("num samples", 1L, stat.numSamples()); @@ -60,8 +60,8 @@ public class TestSampleStat { assertEquals("mean", 0.0, stat.mean(), EPSILON); assertEquals("variance", 0.0, stat.variance(), EPSILON); assertEquals("stddev", 0.0, stat.stddev(), EPSILON); - assertEquals("min", Double.MAX_VALUE, stat.min(), EPSILON); - assertEquals("max", Double.MIN_VALUE, stat.max(), EPSILON); + assertEquals("min", SampleStat.MinMax.DEFAULT_MIN_VALUE, stat.min(), EPSILON); + assertEquals("max", SampleStat.MinMax.DEFAULT_MAX_VALUE, stat.max(), EPSILON); } } From 5c46ba4d4813f6e6c594a3258a2844cc8c99cd42 Mon Sep 17 00:00:00 2001 From: Mahadev Konar Date: Mon, 13 Feb 2012 05:37:11 +0000 Subject: [PATCH 10/12] MAPREDUCE-3852. Test TestLinuxResourceCalculatorPlugin failing. (Thomas Graves via mahadev) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1243418 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 +++ hadoop-project/pom.xml | 19 +++++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index a50ab73c6ef..d9f76e09d6a 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -101,6 +101,9 @@ Release 0.23.2 - UNRELEASED MAPREDUCE-3680. FifoScheduler web service rest API can print out invalid JSON. (B Anil Kumar via tgraves) + MAPREDUCE-3852. Test TestLinuxResourceCalculatorPlugin failing. (Thomas + Graves via mahadev) + Release 0.23.1 - 2012-02-08 INCOMPATIBLE CHANGES diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 0abdc7b12a4..e4bd5044bfe 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -733,6 +733,25 @@ + + org.apache.maven.plugins + maven-antrun-plugin + + + create-testdirs + validate + + run + + + + + + + + + + org.apache.maven.plugins maven-surefire-plugin From 52004aa8e909bbc04413779e5110c34567973338 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Mon, 13 Feb 2012 19:07:04 +0000 Subject: [PATCH 11/12] HDFS-776. Fix exception handling in Balancer. Contributed by Uma Maheswara Rao G git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1243654 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../server/balancer/NameNodeConnector.java | 24 ++++++++++++------- .../TestBalancerWithMultipleNameNodes.java | 2 -- 3 files changed, 19 insertions(+), 10 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 90c1490c32b..ca180549634 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -232,6 +232,9 @@ Release 0.23.2 - UNRELEASED HDFS-2869. Fix an error in the webhdfs docs for the mkdir op (harsh) + HDFS-776. Fix exception handling in Balancer. (Uma Maheswara Rao G + via szetszwo) + Release 0.23.1 - 2012-02-08 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java index ebd9783fc14..83822e4c31e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java @@ -125,6 +125,10 @@ class NameNodeConnector { if (!isBlockTokenEnabled) { return BlockTokenSecretManager.DUMMY_TOKEN; } else { + if (!shouldRun) { + throw new IOException( + "Can not get access token. BlockKeyUpdater is not running"); + } return blockTokenSecretManager.generateToken(null, eb, EnumSet.of(BlockTokenSecretManager.AccessMode.REPLACE, BlockTokenSecretManager.AccessMode.COPY)); @@ -221,16 +225,20 @@ class NameNodeConnector { */ class BlockKeyUpdater implements Runnable { public void run() { - while (shouldRun) { - try { - blockTokenSecretManager.setKeys(namenode.getBlockKeys()); - } catch (Exception e) { - LOG.error("Failed to set keys", e); - } - try { + try { + while (shouldRun) { + try { + blockTokenSecretManager.setKeys(namenode.getBlockKeys()); + } catch (IOException e) { + LOG.error("Failed to set keys", e); + } Thread.sleep(keyUpdaterInterval); - } catch (InterruptedException ie) { } + } catch (InterruptedException e) { + LOG.info("InterruptedException in block key updater thread", e); + } catch (Throwable e) { + LOG.error("Exception in block key updater thread", e); + shouldRun = false; } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java index 6ca0ffe7b31..6d06da49683 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java @@ -57,14 +57,12 @@ public class TestBalancerWithMultipleNameNodes { ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.OFF); ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.OFF); ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF); -// ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.OFF); } private static final long CAPACITY = 500L; private static final String RACK0 = "/rack0"; private static final String RACK1 = "/rack1"; - private static final String RACK2 = "/rack2"; private static final String FILE_NAME = "/tmp.txt"; private static final Path FILE_PATH = new Path(FILE_NAME); From a70bc6c6a268e774306acb71550e84e85a989fc6 Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Mon, 13 Feb 2012 20:11:43 +0000 Subject: [PATCH 12/12] HDFS-2815. Namenode sometimes oes not come out of safemode during NN crash + restart. Contributed by Uma Maheswara Rao. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1243673 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 9 +-------- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index ca180549634..0ea808e5167 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -235,6 +235,9 @@ Release 0.23.2 - UNRELEASED HDFS-776. Fix exception handling in Balancer. (Uma Maheswara Rao G via szetszwo) + HDFS-2815. Namenode sometimes oes not come out of safemode during + NN crash + restart. (Uma Maheswara Rao via suresh) + Release 0.23.1 - 2012-02-08 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 1d554bf468d..f58eff57c47 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1960,7 +1960,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats, boolean enforcePermission) throws AccessControlException, SafeModeException, UnresolvedLinkException, IOException { - boolean deleteNow = false; ArrayList collectedBlocks = new ArrayList(); writeLock(); @@ -1978,10 +1977,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats, if (!dir.delete(src, collectedBlocks)) { return false; } - deleteNow = collectedBlocks.size() <= BLOCK_DELETION_INCREMENT; - if (deleteNow) { // Perform small deletes right away - removeBlocks(collectedBlocks); - } } finally { writeUnlock(); } @@ -1990,9 +1985,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, writeLock(); try { - if (!deleteNow) { - removeBlocks(collectedBlocks); // Incremental deletion of blocks - } + removeBlocks(collectedBlocks); // Incremental deletion of blocks } finally { writeUnlock(); }