diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index a888da2b0b1..8719849634c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -73,6 +73,9 @@ HDFS-6584: Archival Storage HDFS-6864. Archival Storage: add user documentation. (szetszwo via jing9) + HDFS-7088. Archival Storage: fix TestBalancer and + TestBalancerWithMultipleNameNodes. (szetszwo via jing9) + Trunk (Unreleased) INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java index 9e08d510a60..d27f33f0e87 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java @@ -18,14 +18,16 @@ package org.apache.hadoop.hdfs.server.balancer; import java.io.Closeable; -import java.io.DataOutputStream; import java.io.IOException; import java.io.OutputStream; import java.net.InetAddress; import java.net.URI; -import java.util.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Map; -import com.google.common.annotations.VisibleForTesting; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -34,7 +36,6 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; @@ -48,6 +49,8 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RemoteException; +import com.google.common.annotations.VisibleForTesting; + /** * The class provides utilities for accessing a NameNode. */ @@ -56,7 +59,7 @@ public class NameNodeConnector implements Closeable { private static final Log LOG = LogFactory.getLog(NameNodeConnector.class); private static final int MAX_NOT_CHANGED_ITERATIONS = 5; - private static boolean createIdFile = true; + private static boolean write2IdFile = true; /** Create {@link NameNodeConnector} for the given namenodes. */ public static List newNameNodeConnectors( @@ -88,8 +91,8 @@ public class NameNodeConnector implements Closeable { } @VisibleForTesting - public static void setCreateIdFile(boolean create) { - createIdFile = create; + public static void setWrite2IdFile(boolean write2IdFile) { + NameNodeConnector.write2IdFile = write2IdFile; } private final URI nameNodeUri; @@ -127,8 +130,8 @@ public class NameNodeConnector implements Closeable { this.keyManager = new KeyManager(blockpoolID, namenode, defaults.getEncryptDataTransfer(), conf); // if it is for test, we do not create the id file - out = createIdFile ? checkAndMarkRunning() : null; - if (createIdFile && out == null) { + out = checkAndMarkRunning(); + if (out == null) { // Exit if there is another one running. throw new IOException("Another " + name + " is running."); } @@ -199,8 +202,10 @@ public class NameNodeConnector implements Closeable { private OutputStream checkAndMarkRunning() throws IOException { try { final FSDataOutputStream out = fs.create(idPath); - out.writeBytes(InetAddress.getLocalHost().getHostName()); - out.hflush(); + if (write2IdFile) { + out.writeBytes(InetAddress.getLocalHost().getHostName()); + out.hflush(); + } return out; } catch(RemoteException e) { if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index 72597d2b759..dbc3212a22b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -89,7 +89,14 @@ public class TestBalancer { private static final Random r = new Random(); static { + initTestSetup(); + } + + public static void initTestSetup() { Dispatcher.setBlockMoveWaitTime(1000L) ; + + // do not create id file since it occupies the disk space + NameNodeConnector.setWrite2IdFile(false); } static void initConf(Configuration conf) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java index d9d70d1a3ce..bd9136655f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java @@ -44,7 +44,7 @@ public class TestBalancerWithHANameNodes { ClientProtocol client; static { - Dispatcher.setBlockMoveWaitTime(1000L); + TestBalancer.initTestSetup(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java index a16a9791009..6ee6e545416 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java @@ -73,7 +73,7 @@ public class TestBalancerWithMultipleNameNodes { private static final Random RANDOM = new Random(); static { - Dispatcher.setBlockMoveWaitTime(1000L) ; + TestBalancer.initTestSetup(); } /** Common objects used in various methods. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java index 9961a2e2704..7af3a0e7d7d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java @@ -75,7 +75,7 @@ public class TestBalancerWithNodeGroup { static final int DEFAULT_BLOCK_SIZE = 100; static { - Dispatcher.setBlockMoveWaitTime(1000L) ; + TestBalancer.initTestSetup(); } static Configuration createConf() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java index eeea62b5b45..ad813cb7fe8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java @@ -30,7 +30,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; @@ -52,7 +51,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; import org.apache.hadoop.hdfs.server.balancer.Dispatcher; import org.apache.hadoop.hdfs.server.balancer.ExitStatus; -import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector; +import org.apache.hadoop.hdfs.server.balancer.TestBalancer; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; @@ -99,10 +98,8 @@ public class TestStorageMover { HOT = DEFAULT_POLICIES.getPolicy("HOT"); WARM = DEFAULT_POLICIES.getPolicy("WARM"); COLD = DEFAULT_POLICIES.getPolicy("COLD"); - Dispatcher.setBlockMoveWaitTime(1000L); + TestBalancer.initTestSetup(); Dispatcher.setDelayAfterErrors(1000L); - // do not create id file since we will eat up all the disk space - NameNodeConnector.setCreateIdFile(false); } /**