merge HDFS-3275
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1332528 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
08f50ba33b
commit
b2f524582c
|
@ -451,6 +451,9 @@ Release 2.0.0 - UNRELEASED
|
||||||
HDFS-3326. Append enabled log message uses the wrong variable.
|
HDFS-3326. Append enabled log message uses the wrong variable.
|
||||||
(Matthew Jacobs via eli)
|
(Matthew Jacobs via eli)
|
||||||
|
|
||||||
|
HDFS-3275. Skip format for non-file based directories.
|
||||||
|
(Amith D K via umamahesh)
|
||||||
|
|
||||||
BREAKDOWN OF HDFS-1623 SUBTASKS
|
BREAKDOWN OF HDFS-1623 SUBTASKS
|
||||||
|
|
||||||
HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
|
HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
|
||||||
|
|
|
@ -25,6 +25,7 @@ import java.io.InputStream;
|
||||||
import java.io.OutputStream;
|
import java.io.OutputStream;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
|
@ -674,10 +675,14 @@ public class NameNode {
|
||||||
initializeGenericKeys(conf, nsId, namenodeId);
|
initializeGenericKeys(conf, nsId, namenodeId);
|
||||||
checkAllowFormat(conf);
|
checkAllowFormat(conf);
|
||||||
|
|
||||||
Collection<URI> dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
|
Collection<URI> nameDirsToFormat = FSNamesystem.getNamespaceDirs(conf);
|
||||||
|
List<URI> sharedDirs = FSNamesystem.getSharedEditsDirs(conf);
|
||||||
|
List<URI> dirsToPrompt = new ArrayList<URI>();
|
||||||
|
dirsToPrompt.addAll(nameDirsToFormat);
|
||||||
|
dirsToPrompt.addAll(sharedDirs);
|
||||||
List<URI> editDirsToFormat =
|
List<URI> editDirsToFormat =
|
||||||
FSNamesystem.getNamespaceEditsDirs(conf);
|
FSNamesystem.getNamespaceEditsDirs(conf);
|
||||||
if (!confirmFormat(dirsToFormat, force, isInteractive)) {
|
if (!confirmFormat(dirsToPrompt, force, isInteractive)) {
|
||||||
return true; // aborted
|
return true; // aborted
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -689,7 +694,7 @@ public class NameNode {
|
||||||
}
|
}
|
||||||
System.out.println("Formatting using clusterid: " + clusterId);
|
System.out.println("Formatting using clusterid: " + clusterId);
|
||||||
|
|
||||||
FSImage fsImage = new FSImage(conf, dirsToFormat, editDirsToFormat);
|
FSImage fsImage = new FSImage(conf, nameDirsToFormat, editDirsToFormat);
|
||||||
FSNamesystem fsn = new FSNamesystem(conf, fsImage);
|
FSNamesystem fsn = new FSNamesystem(conf, fsImage);
|
||||||
fsImage.format(fsn, clusterId);
|
fsImage.format(fsn, clusterId);
|
||||||
return false;
|
return false;
|
||||||
|
@ -711,7 +716,18 @@ public class NameNode {
|
||||||
boolean force, boolean interactive)
|
boolean force, boolean interactive)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
for(Iterator<URI> it = dirsToFormat.iterator(); it.hasNext();) {
|
for(Iterator<URI> it = dirsToFormat.iterator(); it.hasNext();) {
|
||||||
File curDir = new File(it.next().getPath());
|
URI dirUri = it.next();
|
||||||
|
if (!dirUri.getScheme().equals(NNStorage.LOCAL_URI_SCHEME)) {
|
||||||
|
System.err.println("Skipping format for directory \"" + dirUri
|
||||||
|
+ "\". Can only format local directories with scheme \""
|
||||||
|
+ NNStorage.LOCAL_URI_SCHEME + "\".");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
// To validate only file based schemes are formatted
|
||||||
|
assert dirUri.getScheme().equals(NNStorage.LOCAL_URI_SCHEME) :
|
||||||
|
"formatting is not supported for " + dirUri;
|
||||||
|
|
||||||
|
File curDir = new File(dirUri.getPath());
|
||||||
// Its alright for a dir not to exist, or to exist (properly accessible)
|
// Its alright for a dir not to exist, or to exist (properly accessible)
|
||||||
// and be completely empty.
|
// and be completely empty.
|
||||||
if (!curDir.exists() ||
|
if (!curDir.exists() ||
|
||||||
|
|
|
@ -27,13 +27,19 @@ import static org.junit.Assert.fail;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.TestGenericJournalConf.DummyJournalManager;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
|
@ -144,4 +150,34 @@ public class TestAllowFormat {
|
||||||
NameNode.format(config);
|
NameNode.format(config);
|
||||||
LOG.info("Done verifying format will succeed with allowformat true");
|
LOG.info("Done verifying format will succeed with allowformat true");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test to skip format for non file scheme directory configured
|
||||||
|
*
|
||||||
|
* @throws Exception
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testFormatShouldBeIgnoredForNonFileBasedDirs() throws Exception {
|
||||||
|
Configuration conf = new HdfsConfiguration();
|
||||||
|
String logicalName = "mycluster";
|
||||||
|
|
||||||
|
// DFS_NAMENODE_RPC_ADDRESS_KEY are required to identify the NameNode
|
||||||
|
// is configured in HA, then only DFS_NAMENODE_SHARED_EDITS_DIR_KEY
|
||||||
|
// is considered.
|
||||||
|
String localhost = "127.0.0.1";
|
||||||
|
InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 8020);
|
||||||
|
InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 9020);
|
||||||
|
HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);
|
||||||
|
|
||||||
|
conf.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true);
|
||||||
|
conf.set(DFSUtil.addKeySuffixes(
|
||||||
|
DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX, "dummy"),
|
||||||
|
DummyJournalManager.class.getName());
|
||||||
|
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, "dummy://"
|
||||||
|
+ localhost + ":2181/ledgers");
|
||||||
|
conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");
|
||||||
|
|
||||||
|
// An internal assert is added to verify the working of test
|
||||||
|
NameNode.format(conf);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -167,6 +167,15 @@ public abstract class HATestUtil {
|
||||||
Configuration conf, String logicalName, int nsIndex) {
|
Configuration conf, String logicalName, int nsIndex) {
|
||||||
InetSocketAddress nnAddr1 = cluster.getNameNode(2 * nsIndex).getNameNodeAddress();
|
InetSocketAddress nnAddr1 = cluster.getNameNode(2 * nsIndex).getNameNodeAddress();
|
||||||
InetSocketAddress nnAddr2 = cluster.getNameNode(2 * nsIndex + 1).getNameNodeAddress();
|
InetSocketAddress nnAddr2 = cluster.getNameNode(2 * nsIndex + 1).getNameNodeAddress();
|
||||||
|
setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the required configurations for performing failover
|
||||||
|
*/
|
||||||
|
public static void setFailoverConfigurations(Configuration conf,
|
||||||
|
String logicalName, InetSocketAddress nnAddr1,
|
||||||
|
InetSocketAddress nnAddr2) {
|
||||||
String nameNodeId1 = "nn1";
|
String nameNodeId1 = "nn1";
|
||||||
String nameNodeId2 = "nn2";
|
String nameNodeId2 = "nn2";
|
||||||
String address1 = "hdfs://" + nnAddr1.getHostName() + ":" + nnAddr1.getPort();
|
String address1 = "hdfs://" + nnAddr1.getHostName() + ":" + nnAddr1.getPort();
|
||||||
|
|
Loading…
Reference in New Issue