BufferedFSInputStream
* with the specified buffer size,
@@ -97,4 +99,13 @@ implements Seekable, PositionedReadable {
public void readFully(long position, byte[] buffer) throws IOException {
((FSInputStream)in).readFully(position, buffer);
}
+
+ @Override
+ public FileDescriptor getFileDescriptor() throws IOException {
+ if (in instanceof HasFileDescriptor) {
+ return ((HasFileDescriptor) in).getFileDescriptor();
+ } else {
+ return null;
+ }
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
index 3b14cc77e1f..e47dffb082c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.Public
@InterfaceStability.Stable
public class FSDataInputStream extends DataInputStream
- implements Seekable, PositionedReadable, Closeable, ByteBufferReadable {
+ implements Seekable, PositionedReadable, Closeable, ByteBufferReadable, HasFileDescriptor {
public FSDataInputStream(InputStream in)
throws IOException {
@@ -125,4 +125,15 @@ public class FSDataInputStream extends DataInputStream
throw new UnsupportedOperationException("Byte-buffer read unsupported by input stream");
}
+
+ @Override
+ public FileDescriptor getFileDescriptor() throws IOException {
+ if (in instanceof HasFileDescriptor) {
+ return ((HasFileDescriptor) in).getFileDescriptor();
+ } else if (in instanceof FileInputStream) {
+ return ((FileInputStream) in).getFD();
+ } else {
+ return null;
+ }
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HasFileDescriptor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HasFileDescriptor.java
new file mode 100644
index 00000000000..bcf325ceca5
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HasFileDescriptor.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import java.io.FileDescriptor;
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Having a FileDescriptor
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface HasFileDescriptor {
+
+ /**
+ * @return the FileDescriptor
+ * @throws IOException
+ */
+ public FileDescriptor getFileDescriptor() throws IOException;
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index 58492e13181..38e991480af 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -26,6 +26,7 @@ import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
+import java.io.FileDescriptor;
import java.net.URI;
import java.nio.ByteBuffer;
import java.util.Arrays;
@@ -111,7 +112,7 @@ public class RawLocalFileSystem extends FileSystem {
/*******************************************************
* For open()'s FSInputStream.
*******************************************************/
- class LocalFSFileInputStream extends FSInputStream {
+ class LocalFSFileInputStream extends FSInputStream implements HasFileDescriptor {
private FileInputStream fis;
private long position;
@@ -181,6 +182,11 @@ public class RawLocalFileSystem extends FileSystem {
}
return value;
}
+
+ @Override
+ public FileDescriptor getFileDescriptor() throws IOException {
+ return fis.getFD();
+ }
}
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
index 7d85c016deb..1ce81fd6a22 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
@@ -88,7 +88,7 @@ public abstract class HAAdmin extends Configured implements Tool {
/** Output stream for errors, for use in tests */
protected PrintStream errOut = System.err;
- PrintStream out = System.out;
+ protected PrintStream out = System.out;
private RequestSource requestSource = RequestSource.REQUEST_BY_USER;
protected abstract HAServiceTarget resolveTarget(String string);
@@ -439,7 +439,10 @@ public abstract class HAAdmin extends Configured implements Tool {
}
private int help(String[] argv) {
- if (argv.length != 2) {
+ if (argv.length == 1) { // only -help
+ printUsage(out);
+ return 0;
+ } else if (argv.length != 2) {
printUsage(errOut, "-help");
return -1;
}
@@ -454,7 +457,7 @@ public abstract class HAAdmin extends Configured implements Tool {
return -1;
}
- errOut.println(cmd + " [" + usageInfo.args + "]: " + usageInfo.help);
+ out.println(cmd + " [" + usageInfo.args + "]: " + usageInfo.help);
return 0;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
index c02fe0d1630..35d75b72ae5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
@@ -80,6 +80,8 @@ public abstract class ZKFailoverController {
ZK_AUTH_KEY
};
+ protected static final String USAGE =
+ "Usage: java zkfc [ -formatZK [-force] [-nonInteractive] ]";
/** Unable to format the parent znode in ZK */
static final int ERR_CODE_FORMAT_DENIED = 2;
@@ -248,8 +250,7 @@ public abstract class ZKFailoverController {
}
private void printUsage() {
- System.err.println("Usage: " + this.getClass().getSimpleName() +
- " [-formatZK [-force | -nonInteractive]]");
+ System.err.println(USAGE + "\n");
}
private int formatZK(boolean force, boolean interactive)
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java
index 4ee2f5582f8..d906d9642a7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.http;
+import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -37,6 +38,11 @@ public class HttpConfig {
CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_DEFAULT);
}
+ @VisibleForTesting
+ static void setSecure(boolean secure) {
+ sslEnabled = secure;
+ }
+
public static boolean isSecure() {
return sslEnabled;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ExitUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ExitUtil.java
index a539a7a13d5..fbb58e36a9a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ExitUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ExitUtil.java
@@ -73,6 +73,15 @@ public final class ExitUtil {
firstExitException = null;
}
+ /**
+ * Reset the tracking of process termination. This is for use
+ * in unit tests where one test in the suite expects an exit
+ * but others do not.
+ */
+ public static void resetFirstExitException() {
+ firstExitException = null;
+ }
+
/**
* Terminate the current process. Note that terminate is the *only* method
* that should be used to terminate the daemon processes.
@@ -112,4 +121,4 @@ public final class ExitUtil {
public static void terminate(int status) throws ExitException {
terminate(status, "ExitException");
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
index 604ea78d0fb..4d821f96f81 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
@@ -248,4 +248,14 @@ public class TestLocalFileSystem {
}
assertEquals(1, fileSchemeCount);
}
+
+ public void testHasFileDescriptor() throws IOException {
+ Configuration conf = new Configuration();
+ LocalFileSystem fs = FileSystem.getLocal(conf);
+ Path path = new Path(TEST_ROOT_DIR, "test-file");
+ writeFile(fs, path, 1);
+ BufferedFSInputStream bis = new BufferedFSInputStream(
+ new RawLocalFileSystem().new LocalFSFileInputStream(path), 1024);
+ assertNotNull(bis.getFileDescriptor());
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAAdmin.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAAdmin.java
index a3cac1b96f0..1d8f48e2d02 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAAdmin.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAAdmin.java
@@ -40,7 +40,9 @@ public class TestHAAdmin {
private HAAdmin tool;
private ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream();
+ private ByteArrayOutputStream outBytes = new ByteArrayOutputStream();
private String errOutput;
+ private String output;
@Before
public void setup() throws IOException {
@@ -53,12 +55,14 @@ public class TestHAAdmin {
};
tool.setConf(new Configuration());
tool.errOut = new PrintStream(errOutBytes);
+ tool.out = new PrintStream(outBytes);
}
private void assertOutputContains(String string) {
- if (!errOutput.contains(string)) {
- fail("Expected output to contain '" + string + "' but was:\n" +
- errOutput);
+ if (!errOutput.contains(string) && !output.contains(string)) {
+ fail("Expected output to contain '" + string +
+ "' but err_output was:\n" + errOutput +
+ "\n and output was: \n" + output);
}
}
@@ -88,17 +92,19 @@ public class TestHAAdmin {
@Test
public void testHelp() throws Exception {
- assertEquals(-1, runTool("-help"));
+ assertEquals(0, runTool("-help"));
assertEquals(0, runTool("-help", "transitionToActive"));
assertOutputContains("Transitions the service into Active");
}
private Object runTool(String ... args) throws Exception {
errOutBytes.reset();
+ outBytes.reset();
LOG.info("Running: HAAdmin " + Joiner.on(" ").join(args));
int ret = tool.run(args);
errOutput = new String(errOutBytes.toByteArray(), Charsets.UTF_8);
- LOG.info("Output:\n" + errOutput);
+ output = new String(outBytes.toByteArray(), Charsets.UTF_8);
+ LOG.info("Err_output:\n" + errOutput + "\nOutput:\n" + output);
return ret;
}
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
index f5ab9572255..880804ec2c5 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
@@ -41,6 +41,8 @@ import java.net.URL;
* corresponding HTTPS URL.
*/
public class TestSSLHttpServer extends HttpServerFunctionalTest {
+ private static final String CONFIG_SITE_XML = "sslhttpserver-site.xml";
+
private static final String BASEDIR =
System.getProperty("test.build.dir", "target/test-dir") + "/" +
TestSSLHttpServer.class.getSimpleName();
@@ -49,8 +51,10 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest {
private static HttpServer server;
private static URL baseUrl;
+
@Before
public void setup() throws Exception {
+ HttpConfig.setSecure(true);
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
base.mkdirs();
@@ -66,11 +70,12 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest {
//we do this trick because the MR AppMaster is started in another VM and
//the HttpServer configuration is not loaded from the job.xml but from the
//site.xml files in the classpath
- Writer writer = new FileWriter(classpathDir + "/core-site.xml");
+ Writer writer = new FileWriter(new File(classpathDir, CONFIG_SITE_XML));
conf.writeXml(writer);
writer.close();
conf.setInt(HttpServer.HTTP_MAX_THREADS, 10);
+ conf.addResource(CONFIG_SITE_XML);
server = createServer("test", conf);
server.addServlet("echo", "/echo", TestHttpServer.EchoServlet.class);
server.start();
@@ -83,7 +88,8 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest {
server.stop();
String classpathDir =
KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
- new File(classpathDir + "/core-site.xml").delete();
+ new File(classpathDir, CONFIG_SITE_XML).delete();
+ HttpConfig.setSecure(false);
}
@@ -98,7 +104,9 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest {
private static String readOut(URL url) throws Exception {
StringBuilder out = new StringBuilder();
HttpsURLConnection conn = (HttpsURLConnection) url.openConnection();
- SSLFactory sslf = new SSLFactory(SSLFactory.Mode.CLIENT, new Configuration());
+ Configuration conf = new Configuration();
+ conf.addResource(CONFIG_SITE_XML);
+ SSLFactory sslf = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
sslf.init();
conn.setSSLSocketFactory(sslf.createSSLSocketFactory());
InputStream in = conn.getInputStream();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e1466388104..ce3643fb34e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -114,6 +114,12 @@ Trunk (unreleased changes)
HDFS-3789. JournalManager#format() should be able to throw IOException
(Ivan Kelly via todd)
+ HDFS-3723. Add support -h, -help to all the commands. (Jing Zhao via
+ suresh)
+
+ HDFS-3803. Change BlockPoolSliceScanner chatty INFO log to DEBUG.
+ (Andrew Purtell via suresh)
+
OPTIMIZATIONS
BUG FIXES
@@ -183,6 +189,8 @@ Trunk (unreleased changes)
HDFS-3625. Fix TestBackupNode by properly initializing edit log during
startup. (Junping Du via todd)
+ HDFS-3792. Fix two findbugs introduced by HDFS-3695 (todd)
+
Branch-2 ( Unreleased changes )
INCOMPATIBLE CHANGES
@@ -209,6 +217,8 @@ Branch-2 ( Unreleased changes )
HDFS-3637. Add support for encrypting the DataTransferProtocol. (atm)
+ HDFS-3150. Add option for clients to contact DNs via hostname. (eli)
+
IMPROVEMENTS
HDFS-3390. DFSAdmin should print full stack traces of errors when DEBUG
@@ -381,6 +391,14 @@ Branch-2 ( Unreleased changes )
HDFS-3276. initializeSharedEdits should have a -nonInteractive flag (todd)
+ HDFS-3765. namenode -initializeSharedEdits should be able to initialize
+ all shared storages. (Vinay and todd via todd)
+
+ HDFS-3802. StartupOption.name in HdfsServerConstants should be final.
+ (Jing Zhao via szetszwo)
+
+ HDFS-3796. Speed up edit log tests by avoiding fsync() (todd)
+
OPTIMIZATIONS
HDFS-2982. Startup performance suffers when there are many edit log
@@ -587,6 +605,15 @@ Branch-2 ( Unreleased changes )
IOExceptions of stream closures can mask root exceptions. (Uma Maheswara
Rao G via szetszwo)
+ HDFS-3790. test_fuse_dfs.c doesn't compile on centos 5. (Colin Patrick
+ McCabe via atm)
+
+ HDFS-3658. Fix bugs in TestDFSClientRetries and add more tests. (szetszwo)
+
+ HDFS-3794. WebHDFS OPEN returns the incorrect Content-Length in the HTTP
+ header when offset is specified and length is omitted.
+ (Ravi Prakash via szetszwo)
+
BREAKDOWN OF HDFS-3042 SUBTASKS
HDFS-2185. HDFS portion of ZK-based FailoverController (todd)
@@ -1462,6 +1489,9 @@ Release 0.23.3 - UNRELEASED
HDFS-3553. Hftp proxy tokens are broken (daryn)
+ HDFS-3718. Datanode won't shutdown because of runaway DataBlockScanner
+ thread (Kihwal Lee via daryn)
+
Release 0.23.2 - UNRELEASED
INCOMPATIBLE CHANGES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java
index d1a80f637af..ebbf80aa375 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.contrib.bkjournal;
import static org.junit.Assert.*;
+
import org.junit.Test;
import org.junit.Before;
import org.junit.After;
@@ -25,6 +26,9 @@ import org.junit.BeforeClass;
import org.junit.AfterClass;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ha.ServiceFailedException;
+import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HAUtil;
@@ -35,12 +39,16 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogTestUtil;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.ExitUtil.ExitException;
import org.apache.bookkeeper.proto.BookieServer;
@@ -48,7 +56,9 @@ import org.apache.bookkeeper.proto.BookieServer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import java.io.File;
import java.io.IOException;
+import java.net.URISyntaxException;
/**
* Integration test to ensure that the BookKeeper JournalManager
@@ -67,6 +77,11 @@ public class TestBookKeeperAsHASharedDir {
bkutil = new BKJMUtil(numBookies);
bkutil.start();
}
+
+ @Before
+ public void clearExitStatus() {
+ ExitUtil.resetFirstExitException();
+ }
@AfterClass
public static void teardownBookkeeper() throws Exception {
@@ -244,4 +259,97 @@ public class TestBookKeeperAsHASharedDir {
}
}
}
+
+ /**
+ * Use NameNode INTIALIZESHAREDEDITS to initialize the shared edits. i.e. copy
+ * the edits log segments to new bkjm shared edits.
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testInitializeBKSharedEdits() throws Exception {
+ MiniDFSCluster cluster = null;
+ try {
+ Configuration conf = new Configuration();
+ HAUtil.setAllowStandbyReads(conf, true);
+ conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
+
+ MiniDFSNNTopology topology = MiniDFSNNTopology.simpleHATopology();
+ cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology)
+ .numDataNodes(0).build();
+ cluster.waitActive();
+ // Shutdown and clear the current filebased shared dir.
+ cluster.shutdownNameNodes();
+ File shareddir = new File(cluster.getSharedEditsDir(0, 1));
+ assertTrue("Initial Shared edits dir not fully deleted",
+ FileUtil.fullyDelete(shareddir));
+
+ // Check namenodes should not start without shared dir.
+ assertCanNotStartNamenode(cluster, 0);
+ assertCanNotStartNamenode(cluster, 1);
+
+ // Configure bkjm as new shared edits dir in both namenodes
+ Configuration nn1Conf = cluster.getConfiguration(0);
+ Configuration nn2Conf = cluster.getConfiguration(1);
+ nn1Conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
+ .createJournalURI("/initializeSharedEdits").toString());
+ nn2Conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
+ .createJournalURI("/initializeSharedEdits").toString());
+ BKJMUtil.addJournalManagerDefinition(nn1Conf);
+ BKJMUtil.addJournalManagerDefinition(nn2Conf);
+
+ // Initialize the BKJM shared edits.
+ assertFalse(NameNode.initializeSharedEdits(nn1Conf));
+
+ // NameNode should be able to start and should be in sync with BKJM as
+ // shared dir
+ assertCanStartHANameNodes(cluster, conf, "/testBKJMInitialize");
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
+
+ private void assertCanNotStartNamenode(MiniDFSCluster cluster, int nnIndex) {
+ try {
+ cluster.restartNameNode(nnIndex, false);
+ fail("Should not have been able to start NN" + (nnIndex)
+ + " without shared dir");
+ } catch (IOException ioe) {
+ LOG.info("Got expected exception", ioe);
+ GenericTestUtils.assertExceptionContains(
+ "Cannot start an HA namenode with name dirs that need recovery", ioe);
+ }
+ }
+
+ private void assertCanStartHANameNodes(MiniDFSCluster cluster,
+ Configuration conf, String path) throws ServiceFailedException,
+ IOException, URISyntaxException, InterruptedException {
+ // Now should be able to start both NNs. Pass "false" here so that we don't
+ // try to waitActive on all NNs, since the second NN doesn't exist yet.
+ cluster.restartNameNode(0, false);
+ cluster.restartNameNode(1, true);
+
+ // Make sure HA is working.
+ cluster
+ .getNameNode(0)
+ .getRpcServer()
+ .transitionToActive(
+ new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));
+ FileSystem fs = null;
+ try {
+ Path newPath = new Path(path);
+ fs = HATestUtil.configureFailoverFs(cluster, conf);
+ assertTrue(fs.mkdirs(newPath));
+ HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
+ cluster.getNameNode(1));
+ assertTrue(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),
+ newPath.toString(), false).isDir());
+ } finally {
+ if (fs != null) {
+ fs.close();
+ }
+ }
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
index 6db6e75198b..7d4fb7a0610 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
@@ -86,11 +86,11 @@ class BlockReaderLocal implements BlockReader {
}
private synchronized ClientDatanodeProtocol getDatanodeProxy(
- DatanodeInfo node, Configuration conf, int socketTimeout)
- throws IOException {
+ DatanodeInfo node, Configuration conf, int socketTimeout,
+ boolean connectToDnViaHostname) throws IOException {
if (proxy == null) {
proxy = DFSUtil.createClientDatanodeProtocolProxy(node, conf,
- socketTimeout);
+ socketTimeout, connectToDnViaHostname);
}
return proxy;
}
@@ -156,14 +156,16 @@ class BlockReaderLocal implements BlockReader {
*/
static BlockReaderLocal newBlockReader(Configuration conf, String file,
ExtendedBlock blk, TokenThe balancer is a tool that balances disk space usage on an HDFS cluster
* when some datanodes become full or when new empty nodes join the cluster.
@@ -189,6 +189,13 @@ public class Balancer {
*/
public static final int MAX_NUM_CONCURRENT_MOVES = 5;
+ private static final String USAGE = "Usage: java "
+ + Balancer.class.getSimpleName()
+ + "\n\t[-policy