diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index ef6af5147c6..635ee13b902 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -47,6 +47,9 @@ Release 2.0.1-alpha - UNRELEASED HDFS-3438. BootstrapStandby should not require a rollEdits on active node (todd) + HDFS-2885. Remove "federation" from the nameservice config options. + (Tsz Wo (Nicholas) via eli) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index f2d18884f5b..da627ed98d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -322,8 +322,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY = "dfs.namenode.name.cache.threshold"; public static final int DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT = 10; - public static final String DFS_FEDERATION_NAMESERVICES = "dfs.federation.nameservices"; - public static final String DFS_FEDERATION_NAMESERVICE_ID = "dfs.federation.nameservice.id"; + public static final String DFS_NAMESERVICES = "dfs.nameservices"; + public static final String DFS_NAMESERVICE_ID = "dfs.nameservice.id"; public static final String DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY = "dfs.namenode.resource.check.interval"; public static final int DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT = 5000; public static final String DFS_NAMENODE_DU_RESERVED_KEY = "dfs.namenode.resource.du.reserved"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 003eefc4581..b04ad041063 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -294,7 +294,7 @@ public class DFSUtil { * @return collection of nameservice Ids, or null if not specified */ public static Collection getNameServiceIds(Configuration conf) { - return conf.getTrimmedStringCollection(DFS_FEDERATION_NAMESERVICES); + return conf.getTrimmedStringCollection(DFS_NAMESERVICES); } /** @@ -879,7 +879,7 @@ public class DFSUtil { * Get the nameservice Id by matching the {@code addressKey} with the * the address of the local node. * - * If {@link DFSConfigKeys#DFS_FEDERATION_NAMESERVICE_ID} is not specifically + * If {@link DFSConfigKeys#DFS_NAMESERVICE_ID} is not specifically * configured, and more than one nameservice Id is configured, this method * determines the nameservice Id by matching the local node's address with the * configured addresses. When a match is found, it returns the nameservice Id @@ -891,7 +891,7 @@ public class DFSUtil { * @throws HadoopIllegalArgumentException on error */ private static String getNameServiceId(Configuration conf, String addressKey) { - String nameserviceId = conf.get(DFS_FEDERATION_NAMESERVICE_ID); + String nameserviceId = conf.get(DFS_NAMESERVICE_ID); if (nameserviceId != null) { return nameserviceId; } @@ -963,7 +963,7 @@ public class DFSUtil { if (found > 1) { // Only one address must match the local address String msg = "Configuration has multiple addresses that match " + "local node's address. Please configure the system with " - + DFS_FEDERATION_NAMESERVICE_ID + " and " + + DFS_NAMESERVICE_ID + " and " + DFS_HA_NAMENODE_ID_KEY; throw new HadoopIllegalArgumentException(msg); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java index b56892537b0..77f0597bb27 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java @@ -142,7 +142,7 @@ public class HAUtil { Preconditions.checkArgument(nsId != null, "Could not determine namespace id. Please ensure that this " + "machine is one of the machines listed as a NN RPC address, " + - "or configure " + DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID); + "or configure " + DFSConfigKeys.DFS_NAMESERVICE_ID); Collection nnIds = DFSUtil.getNameNodeIds(myConf, nsId); String myNNId = myConf.get(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java index 1454fdbd6f0..022cf58aeb5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java @@ -63,7 +63,7 @@ public class HdfsConfiguration extends Configuration { } private static void deprecate(String oldKey, String newKey) { - Configuration.addDeprecation(oldKey, new String[]{newKey}); + Configuration.addDeprecation(oldKey, newKey); } private static void addDeprecatedKeys() { @@ -102,5 +102,7 @@ public class HdfsConfiguration extends Configuration { deprecate("dfs.block.size", DFSConfigKeys.DFS_BLOCK_SIZE_KEY); deprecate("dfs.datanode.max.xcievers", DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY); deprecate("io.bytes.per.checksum", DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY); + deprecate("dfs.federation.nameservices", DFSConfigKeys.DFS_NAMESERVICES); + deprecate("dfs.federation.nameservice.id", DFSConfigKeys.DFS_NAMESERVICE_ID); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java index 3355ee269a5..2438c3f713f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java @@ -145,7 +145,7 @@ class BlockPoolManager { void refreshNamenodes(Configuration conf) throws IOException { LOG.info("Refresh request received for nameservices: " - + conf.get(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES)); + + conf.get(DFSConfigKeys.DFS_NAMESERVICES)); Map> newAddressMap = DFSUtil.getNNServiceRpcAddresses(conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index f163c7730df..2d243045ca4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -1138,7 +1138,7 @@ public class NameNode { if ((nameserviceId != null && !nameserviceId.isEmpty()) || (namenodeId != null && !namenodeId.isEmpty())) { if (nameserviceId != null) { - conf.set(DFS_FEDERATION_NAMESERVICE_ID, nameserviceId); + conf.set(DFS_NAMESERVICE_ID, nameserviceId); } if (namenodeId != null) { conf.set(DFS_HA_NAMENODE_ID_KEY, namenodeId); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 54ce2a26a34..539d2e62384 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -778,7 +778,7 @@ - dfs.federation.nameservices + dfs.nameservices Comma-separated list of nameservices. @@ -786,12 +786,12 @@ - dfs.federation.nameservice.id + dfs.nameservice.id The ID of this nameservice. If the nameservice ID is not configured or more than one nameservice is configured for - dfs.federation.nameservices it is determined automatically by + dfs.nameservices it is determined automatically by matching the local node's address with the configured address. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index af8507d5ef3..7e793a59bd9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -707,7 +707,7 @@ public class DFSTestUtil { info.nameserviceId), DFSUtil.createUri(HdfsConstants.HDFS_URI_SCHEME, info.nameNode.getNameNodeAddress()).toString()); } - conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, Joiner.on(",") + conf.set(DFSConfigKeys.DFS_NAMESERVICES, Joiner.on(",") .join(nameservices)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index b418fcf5072..1cecaebaf9f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -25,8 +25,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_FEDERATION_NAMESERVICES; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY; @@ -612,7 +612,7 @@ public class MiniDFSCluster { } } if (!allNsIds.isEmpty()) { - conf.set(DFS_FEDERATION_NAMESERVICES, Joiner.on(",").join(allNsIds)); + conf.set(DFS_NAMESERVICES, Joiner.on(",").join(allNsIds)); } int nnCounter = 0; @@ -710,7 +710,7 @@ public class MiniDFSCluster { boolean manageNameDfsDirs, int nnIndex) throws IOException { if (nameserviceId != null) { - conf.set(DFS_FEDERATION_NAMESERVICE_ID, nameserviceId); + conf.set(DFS_NAMESERVICE_ID, nameserviceId); } if (nnId != null) { conf.set(DFS_HA_NAMENODE_ID_KEY, nnId); @@ -2111,9 +2111,9 @@ public class MiniDFSCluster { nameNodes = newlist; String nameserviceId = NAMESERVICE_ID_PREFIX + (nnIndex + 1); - String nameserviceIds = conf.get(DFS_FEDERATION_NAMESERVICES); + String nameserviceIds = conf.get(DFS_NAMESERVICES); nameserviceIds += "," + nameserviceId; - conf.set(DFS_FEDERATION_NAMESERVICES, nameserviceIds); + conf.set(DFS_NAMESERVICES, nameserviceIds); String nnId = null; initNameNodeAddress(conf, nameserviceId, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java index a0948e6cbf6..783528a3d97 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java @@ -100,7 +100,7 @@ public class TestDFSUtil { private Configuration setupAddress(String key) { HdfsConfiguration conf = new HdfsConfiguration(); - conf.set(DFS_FEDERATION_NAMESERVICES, "nn1"); + conf.set(DFS_NAMESERVICES, "nn1"); conf.set(DFSUtil.addKeySuffixes(key, "nn1"), "localhost:9000"); return conf; } @@ -112,7 +112,7 @@ public class TestDFSUtil { @Test public void getNameServiceId() { HdfsConfiguration conf = new HdfsConfiguration(); - conf.set(DFS_FEDERATION_NAMESERVICE_ID, "nn1"); + conf.set(DFS_NAMESERVICE_ID, "nn1"); assertEquals("nn1", DFSUtil.getNamenodeNameServiceId(conf)); } @@ -157,7 +157,7 @@ public class TestDFSUtil { @Test(expected = HadoopIllegalArgumentException.class) public void testGetNameServiceIdException() { HdfsConfiguration conf = new HdfsConfiguration(); - conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2"); + conf.set(DFS_NAMESERVICES, "nn1,nn2"); conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"), "localhost:9000"); conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"), @@ -172,7 +172,7 @@ public class TestDFSUtil { @Test public void testGetNameServiceIds() { HdfsConfiguration conf = new HdfsConfiguration(); - conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2"); + conf.set(DFS_NAMESERVICES, "nn1,nn2"); Collection nameserviceIds = DFSUtil.getNameServiceIds(conf); Iterator it = nameserviceIds.iterator(); assertEquals(2, nameserviceIds.size()); @@ -183,11 +183,11 @@ public class TestDFSUtil { @Test public void testGetOnlyNameServiceIdOrNull() { HdfsConfiguration conf = new HdfsConfiguration(); - conf.set(DFS_FEDERATION_NAMESERVICES, "ns1,ns2"); + conf.set(DFS_NAMESERVICES, "ns1,ns2"); assertNull(DFSUtil.getOnlyNameServiceIdOrNull(conf)); - conf.set(DFS_FEDERATION_NAMESERVICES, ""); + conf.set(DFS_NAMESERVICES, ""); assertNull(DFSUtil.getOnlyNameServiceIdOrNull(conf)); - conf.set(DFS_FEDERATION_NAMESERVICES, "ns1"); + conf.set(DFS_NAMESERVICES, "ns1"); assertEquals("ns1", DFSUtil.getOnlyNameServiceIdOrNull(conf)); } @@ -199,7 +199,7 @@ public class TestDFSUtil { @Test public void testMultipleNamenodes() throws IOException { HdfsConfiguration conf = new HdfsConfiguration(); - conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2"); + conf.set(DFS_NAMESERVICES, "nn1,nn2"); // Test - configured list of namenodes are returned final String NN1_ADDRESS = "localhost:9000"; final String NN2_ADDRESS = "localhost:9001"; @@ -270,8 +270,8 @@ public class TestDFSUtil { final HdfsConfiguration conf = new HdfsConfiguration(); String nsId = "ns1"; - conf.set(DFS_FEDERATION_NAMESERVICES, nsId); - conf.set(DFS_FEDERATION_NAMESERVICE_ID, nsId); + conf.set(DFS_NAMESERVICES, nsId); + conf.set(DFS_NAMESERVICE_ID, nsId); // Set the nameservice specific keys with nameserviceId in the config key for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) { @@ -299,8 +299,8 @@ public class TestDFSUtil { String nsId = "ns1"; String nnId = "nn1"; - conf.set(DFS_FEDERATION_NAMESERVICES, nsId); - conf.set(DFS_FEDERATION_NAMESERVICE_ID, nsId); + conf.set(DFS_NAMESERVICES, nsId); + conf.set(DFS_NAMESERVICE_ID, nsId); conf.set(DFS_HA_NAMENODES_KEY_PREFIX + "." + nsId, nnId); // Set the nameservice specific keys with nameserviceId in the config key @@ -430,7 +430,7 @@ public class TestDFSUtil { conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1"); // Two nameservices, each with two NNs. - conf.set(DFS_FEDERATION_NAMESERVICES, "ns1,ns2"); + conf.set(DFS_NAMESERVICES, "ns1,ns2"); conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"), "ns1-nn1,ns1-nn2"); conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns2"), @@ -491,7 +491,7 @@ public class TestDFSUtil { final String NS1_NN2_HOST = "ns1-nn1.example.com:8020"; final String NS1_NN2_HOST_SVC = "ns1-nn2.example.com:8021"; - conf.set(DFS_FEDERATION_NAMESERVICES, "ns1"); + conf.set(DFS_NAMESERVICES, "ns1"); conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2"); conf.set(DFSUtil.addKeySuffixes( @@ -540,7 +540,7 @@ public class TestDFSUtil { final String NN1_SRVC_ADDR = "nn.example.com:8021"; final String NN2_ADDR = "nn2.example.com:8020"; - conf.set(DFS_FEDERATION_NAMESERVICES, "ns1,ns2"); + conf.set(DFS_NAMESERVICES, "ns1,ns2"); conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2"); conf.set(DFSUtil.addKeySuffixes( DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn1"), NS1_NN1_ADDR); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java index c0301ac8145..b10d27e4350 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java @@ -101,7 +101,7 @@ public class TestBlockPoolManager { @Test public void testFederationRefresh() throws Exception { Configuration conf = new Configuration(); - conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, + conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1,ns2"); addNN(conf, "ns1", "mock1:8020"); addNN(conf, "ns2", "mock1:8020"); @@ -112,7 +112,7 @@ public class TestBlockPoolManager { log.setLength(0); // Remove the first NS - conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, + conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1"); bpm.refreshNamenodes(conf); assertEquals( @@ -122,7 +122,7 @@ public class TestBlockPoolManager { // Add back an NS -- this creates a new BPOS since the old // one for ns2 should have been previously retired - conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, + conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1,ns2"); bpm.refreshNamenodes(conf); assertEquals( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java index 080f47ca9c8..fe6e8b7973c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java @@ -46,7 +46,7 @@ public class TestDeleteBlockPool { Configuration conf = new Configuration(); MiniDFSCluster cluster = null; try { - conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, + conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId1,namesServerId2"); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)) @@ -79,7 +79,7 @@ public class TestDeleteBlockPool { } Configuration nn1Conf = cluster.getConfiguration(1); - nn1Conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "namesServerId2"); + nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId2"); dn1.refreshNamenodes(nn1Conf); assertEquals(1, dn1.getAllBpOs().length); @@ -155,7 +155,7 @@ public class TestDeleteBlockPool { Configuration conf = new Configuration(); MiniDFSCluster cluster = null; try { - conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, + conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId1,namesServerId2"); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)) @@ -178,7 +178,7 @@ public class TestDeleteBlockPool { File dn1StorageDir2 = cluster.getInstanceStorageDir(0, 1); Configuration nn1Conf = cluster.getConfiguration(0); - nn1Conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "namesServerId1"); + nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId1"); dn1.refreshNamenodes(nn1Conf); Assert.assertEquals(1, dn1.getAllBpOs().length); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java index 8441e184068..cff718b86fa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java @@ -105,7 +105,7 @@ public class TestMulitipleNNDataBlockScanner { namenodesBuilder.append(","); } - conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, namenodesBuilder + conf.set(DFSConfigKeys.DFS_NAMESERVICES, namenodesBuilder .toString()); DataNode dn = cluster.getDataNodes().get(0); dn.refreshNamenodes(conf); @@ -122,7 +122,7 @@ public class TestMulitipleNNDataBlockScanner { namenodesBuilder.append(DFSUtil.getNamenodeNameServiceId(cluster .getConfiguration(2))); - conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, namenodesBuilder + conf.set(DFSConfigKeys.DFS_NAMESERVICES, namenodesBuilder .toString()); dn.refreshNamenodes(conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index 288ae8a30b4..49d0f5d3a25 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -1115,7 +1115,7 @@ public class TestCheckpoint extends TestCase { Configuration conf = new HdfsConfiguration(); String nameserviceId1 = "ns1"; String nameserviceId2 = "ns2"; - conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, nameserviceId1 + conf.set(DFSConfigKeys.DFS_NAMESERVICES, nameserviceId1 + "," + nameserviceId2); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java index 9ab31b3790b..62fac1998f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java @@ -43,7 +43,7 @@ public class TestGetImageServlet { KerberosName.setRules("RULE:[1:$1]\nRULE:[2:$1]"); // Set up generic HA configs. - conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "ns1"); + conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1"); conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX, "ns1"), "nn1,nn2"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java index 72c28769fb9..37e29679659 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java @@ -92,7 +92,7 @@ public class TestValidateConfigurationSettings { conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "ns1"); + conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1"); // Set a nameservice-specific configuration for name dir File dir = new File(MiniDFSCluster.getBaseDirectory(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java index e7101c64d86..ce93851c9cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java @@ -185,7 +185,7 @@ public abstract class HATestUtil { conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, logicalName, nameNodeId2), address2); - conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, logicalName); + conf.set(DFSConfigKeys.DFS_NAMESERVICES, logicalName); conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, logicalName), nameNodeId1 + "," + nameNodeId2); conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalName, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java index 9cd6ab7089f..563ddff69c1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java @@ -57,7 +57,7 @@ public class TestHAConfiguration { private Configuration getHAConf(String nsId, String host1, String host2) { Configuration conf = new Configuration(); - conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, nsId); + conf.set(DFSConfigKeys.DFS_NAMESERVICES, nsId); conf.set(DFSUtil.addKeySuffixes( DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX, nsId), "nn1,nn2"); @@ -75,7 +75,7 @@ public class TestHAConfiguration { public void testGetOtherNNHttpAddress() { // Use non-local addresses to avoid host address matching Configuration conf = getHAConf("ns1", "1.2.3.1", "1.2.3.2"); - conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, "ns1"); + conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID, "ns1"); // This is done by the NN before the StandbyCheckpointer is created NameNode.initializeGenericKeys(conf, "ns1", "nn1"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java index b976a9c395c..ef7770385a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java @@ -160,7 +160,7 @@ public class TestInitializeSharedEdits { @Test public void testInitializeSharedEditsConfiguresGenericConfKeys() { Configuration conf = new Configuration(); - conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "ns1"); + conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1"); conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX, "ns1"), "nn1,nn2"); conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java index 4c4d0f261c0..05e5b2227ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java @@ -64,8 +64,8 @@ public class TestDFSHAAdmin { private HdfsConfiguration getHAConf() { HdfsConfiguration conf = new HdfsConfiguration(); - conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, NSID); - conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, NSID); + conf.set(DFSConfigKeys.DFS_NAMESERVICES, NSID); + conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID, NSID); conf.set(DFSUtil.addKeySuffixes( DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX, NSID), "nn1,nn2"); conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java index 93de1d2e5c4..d55a2583b99 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java @@ -61,7 +61,7 @@ public class TestGetConf { } nsList.append(getNameServiceId(i)); } - conf.set(DFS_FEDERATION_NAMESERVICES, nsList.toString()); + conf.set(DFS_NAMESERVICES, nsList.toString()); } /** Set a given key with value as address, for all the nameServiceIds. diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/Federation.apt.vm b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/Federation.apt.vm index d191b5e4912..c7c877053a0 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/Federation.apt.vm +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/Federation.apt.vm @@ -131,7 +131,7 @@ HDFS Federation ** Configuration: <>: Add the following parameters to your configuration: - <<>>: Configure with list of comma separated + <<>>: Configure with list of comma separated NameServiceIDs. This will be used by Datanodes to determine all the Namenodes in the cluster. @@ -164,7 +164,7 @@ HDFS Federation ---- - dfs.federation.nameservices + dfs.nameservices ns1,ns2 @@ -233,8 +233,7 @@ HDFS Federation Follow the following steps: - * Add configuration parameter <<>> to - the configuration. + * Add configuration parameter <<>> to the configuration. * Update the configuration with NameServiceID suffix. Configuration key names have changed post release 0.20. You must use new configuration diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HDFSHighAvailability.apt.vm b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HDFSHighAvailability.apt.vm index 94fb8541446..fedb24e384a 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HDFSHighAvailability.apt.vm +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HDFSHighAvailability.apt.vm @@ -147,12 +147,12 @@ HDFS High Availability <> configuration file. The order in which you set these configurations is unimportant, but the values - you choose for <> and + you choose for <> and <> will determine the keys of those that follow. Thus, you should decide on these values before setting the rest of the configuration options. - * <> - the logical name for this new nameservice + * <> - the logical name for this new nameservice Choose a logical name for this nameservice, for example "mycluster", and use this logical name for the value of this config option. The name you choose is @@ -165,7 +165,7 @@ HDFS High Availability ---- - dfs.federation.nameservices + dfs.nameservices mycluster ----