@@ -235,7 +235,7 @@
|
{nameserviceId} |
{namenodeId} |
-
{webAddress} |
+
{webScheme}://{webAddress} |
{lastHeartbeat} |
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java
index fd5b23ba85d..31c1bffe1ef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java
@@ -140,7 +140,8 @@ public final class FederationTestUtils {
Random rand = new Random();
NamenodeStatusReport report = new NamenodeStatusReport(ns, nn,
"localhost:" + rand.nextInt(10000), "localhost:" + rand.nextInt(10000),
- "localhost:" + rand.nextInt(10000), "testwebaddress-" + ns + nn);
+ "localhost:" + rand.nextInt(10000), "http",
+ "testwebaddress-" + ns + nn);
if (state == null) {
// Unavailable, no additional info
return report;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java
index a770e36fc4d..f9bc6fa4e7b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java
@@ -859,7 +859,7 @@ public class MiniRouterDFSCluster {
NamenodeStatusReport report = new NamenodeStatusReport(
nn.nameserviceId, nn.namenodeId,
nn.getRpcAddress(), nn.getServiceAddress(),
- nn.getLifelineAddress(), nn.getWebAddress());
+ nn.getLifelineAddress(), "http", nn.getWebAddress());
FSImage fsImage = nn.namenode.getNamesystem().getFSImage();
NamespaceInfo nsInfo = fsImage.getStorage().getNamespaceInfo();
report.setNamespaceInfo(nsInfo);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java
index bfa56a2b5d2..8b5fb5498a0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java
@@ -539,8 +539,10 @@ public class MockNamenode {
String nsId = nn.getNameserviceId();
String rpcAddress = "localhost:" + nn.getRPCPort();
String httpAddress = "localhost:" + nn.getHTTPPort();
+ String scheme = "http";
NamenodeStatusReport report = new NamenodeStatusReport(
- nsId, null, rpcAddress, rpcAddress, rpcAddress, httpAddress);
+ nsId, null, rpcAddress, rpcAddress,
+ rpcAddress, scheme, httpAddress);
if (unavailableSubclusters.contains(nsId)) {
LOG.info("Register {} as UNAVAILABLE", nsId);
report.setRegistrationValid(false);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
index 752e6bc50b1..131dd74b8e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
@@ -155,12 +155,14 @@ public class MockResolver
return Collections.unmodifiableList(new ArrayList<>(namenodes));
}
+ @SuppressWarnings("checkstyle:ParameterNumber")
private static class MockNamenodeContext
implements FederationNamenodeContext {
private String namenodeId;
private String nameserviceId;
+ private String webScheme;
private String webAddress;
private String rpcAddress;
private String serviceAddress;
@@ -170,11 +172,12 @@ public class MockResolver
private long dateModified;
MockNamenodeContext(
- String rpc, String service, String lifeline, String web,
+ String rpc, String service, String lifeline, String scheme, String web,
String ns, String nn, FederationNamenodeServiceState state) {
this.rpcAddress = rpc;
this.serviceAddress = service;
this.lifelineAddress = lifeline;
+ this.webScheme = scheme;
this.webAddress = web;
this.namenodeId = nn;
this.nameserviceId = ns;
@@ -202,6 +205,11 @@ public class MockResolver
return lifelineAddress;
}
+ @Override
+ public String getWebScheme() {
+ return webScheme;
+ }
+
@Override
public String getWebAddress() {
return webAddress;
@@ -242,8 +250,9 @@ public class MockResolver
MockNamenodeContext context = new MockNamenodeContext(
report.getRpcAddress(), report.getServiceAddress(),
- report.getLifelineAddress(), report.getWebAddress(),
- report.getNameserviceId(), report.getNamenodeId(), report.getState());
+ report.getLifelineAddress(), report.getWebScheme(),
+ report.getWebAddress(), report.getNameserviceId(),
+ report.getNamenodeId(), report.getState());
String nsId = report.getNameserviceId();
String bpId = report.getBlockPoolId();
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestMetricsBase.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestMetricsBase.java
index 429695a9a0f..4759d05f820 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestMetricsBase.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestMetricsBase.java
@@ -250,7 +250,8 @@ public class TestMetricsBase {
MembershipState record =
MembershipState.newInstance(routerId, ns, nn, "testcluster",
"testblock-" + ns, "testrpc-" + ns + nn, "testservice-" + ns + nn,
- "testlifeline-" + ns + nn, "testweb-" + ns + nn, state, false);
+ "testlifeline-" + ns + nn, "http", "testweb-" + ns + nn,
+ state, false);
NamenodeHeartbeatRequest request =
NamenodeHeartbeatRequest.newInstance(record);
NamenodeHeartbeatResponse response =
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestRBFMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestRBFMetrics.java
index ad56c327414..e1d1d8ec28c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestRBFMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestRBFMetrics.java
@@ -156,6 +156,7 @@ public class TestRBFMetrics extends TestMetricsBase {
stats.getNumOfEnteringMaintenanceDataNodes());
assertEquals(json.getLong("numOfBlocks"), stats.getNumOfBlocks());
assertEquals(json.getString("rpcAddress"), mockEntry.getRpcAddress());
+ assertEquals(json.getString("webScheme"), mockEntry.getWebScheme());
assertEquals(json.getString("webAddress"), mockEntry.getWebAddress());
nnsFound++;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/order/TestLocalResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/order/TestLocalResolver.java
index 795e3ff64b3..08e75b2d309 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/order/TestLocalResolver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/order/TestLocalResolver.java
@@ -127,7 +127,7 @@ public class TestLocalResolver {
private MembershipState newMembershipState(String addr, String nsId) {
return MembershipState.newInstance(
"routerId", nsId, "nn0", "cluster0", "blockPool0",
- addr + ":8001", addr + ":8002", addr + ":8003", addr + ":8004",
+ addr + ":8001", addr + ":8002", addr + ":8003", "http", addr + ":8004",
FederationNamenodeServiceState.ACTIVE, false);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeWebScheme.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeWebScheme.java
new file mode 100644
index 00000000000..ab507aaf9ec
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeWebScheme.java
@@ -0,0 +1,205 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.server.federation.MockNamenode;
+import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
+import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
+import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext;
+import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
+import org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver;
+import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
+import org.apache.hadoop.http.HttpConfig;
+import org.apache.hadoop.util.StringUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+
+import static java.util.Arrays.asList;
+import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.getStateStoreConfiguration;
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Test the scheme of Http address of Namenodes displayed in Router.
+ * This feature is managed by {@link DFSConfigKeys#DFS_HTTP_POLICY_KEY}
+ */
+public class TestRouterNamenodeWebScheme {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestRouterNamenodeWebScheme.class);
+
+ /** Router for the test. */
+ private Router router;
+ /** Namenodes in the cluster. */
+ private Map> nns = new HashMap<>();
+ /** Nameservices in the federated cluster. */
+ private List nsIds = asList("ns0", "ns1");
+
+ @Before
+ public void setup() throws Exception {
+ LOG.info("Initialize the Mock Namenodes to monitor");
+ for (String nsId : nsIds) {
+ nns.put(nsId, new HashMap<>());
+ for (String nnId : asList("nn0", "nn1")) {
+ nns.get(nsId).put(nnId, new MockNamenode(nsId));
+ }
+ }
+
+ LOG.info("Set nn0 to active for all nameservices");
+ for (Map nnNS : nns.values()) {
+ nnNS.get("nn0").transitionToActive();
+ nnNS.get("nn1").transitionToStandby();
+ }
+ }
+
+ @After
+ public void cleanup() throws Exception {
+ for (Map nnNS : nns.values()) {
+ for (MockNamenode nn : nnNS.values()) {
+ nn.stop();
+ }
+ }
+ nns.clear();
+
+ if (router != null) {
+ router.stop();
+ }
+ }
+
+ /**
+ * Get the configuration of the cluster which contains all the Namenodes and
+ * their addresses.
+ * @return Configuration containing all the Namenodes.
+ */
+ private Configuration getNamenodesConfig() {
+ final Configuration conf = new HdfsConfiguration();
+ conf.set(DFSConfigKeys.DFS_NAMESERVICES,
+ StringUtils.join(",", nns.keySet()));
+ for (String nsId : nns.keySet()) {
+ Set nnIds = nns.get(nsId).keySet();
+
+ StringBuilder sb = new StringBuilder();
+ sb.append(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX);
+ sb.append(".").append(nsId);
+ conf.set(sb.toString(), StringUtils.join(",", nnIds));
+
+ for (String nnId : nnIds) {
+ final MockNamenode nn = nns.get(nsId).get(nnId);
+
+ sb = new StringBuilder();
+ sb.append(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
+ sb.append(".").append(nsId);
+ sb.append(".").append(nnId);
+ conf.set(sb.toString(), "localhost:" + nn.getRPCPort());
+
+ sb = new StringBuilder();
+ sb.append(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
+ sb.append(".").append(nsId);
+ sb.append(".").append(nnId);
+ conf.set(sb.toString(), "localhost:" + nn.getHTTPPort());
+ }
+ }
+ return conf;
+ }
+
+ @Test
+ public void testWebSchemeHttp() throws IOException {
+ testWebScheme(HttpConfig.Policy.HTTP_ONLY, "http");
+ }
+
+ @Test
+ public void testWebSchemeHttps() throws IOException {
+ testWebScheme(HttpConfig.Policy.HTTPS_ONLY, "https");
+ }
+
+ private void testWebScheme(HttpConfig.Policy httpPolicy,
+ String expectedScheme) throws IOException {
+ Configuration nsConf = getNamenodesConfig();
+
+ // Setup the State Store for the Router to use
+ Configuration stateStoreConfig = getStateStoreConfiguration();
+ stateStoreConfig.setClass(
+ RBFConfigKeys.FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS,
+ MembershipNamenodeResolver.class, ActiveNamenodeResolver.class);
+ stateStoreConfig.setClass(
+ RBFConfigKeys.FEDERATION_FILE_RESOLVER_CLIENT_CLASS,
+ MountTableResolver.class, FileSubclusterResolver.class);
+
+ Configuration routerConf = new RouterConfigBuilder(nsConf)
+ .enableLocalHeartbeat(true)
+ .heartbeat()
+ .stateStore()
+ .rpc()
+ .build();
+
+ // set "dfs.http.policy" to "HTTPS_ONLY"
+ routerConf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, httpPolicy.name());
+
+ // Specify namenodes (ns1.nn0,ns1.nn1) to monitor
+ routerConf.set(RBFConfigKeys.DFS_ROUTER_RPC_ADDRESS_KEY, "0.0.0.0:0");
+ routerConf.set(RBFConfigKeys.DFS_ROUTER_MONITOR_NAMENODE,
+ "ns1.nn0,ns1.nn1");
+ routerConf.addResource(stateStoreConfig);
+
+ // Specify local node (ns0.nn1) to monitor
+ routerConf.set(DFSConfigKeys.DFS_NAMESERVICE_ID, "ns0");
+ routerConf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");
+
+ // Start the Router with the namenodes to monitor
+ router = new Router();
+ router.init(routerConf);
+ router.start();
+
+ // Manually trigger the heartbeat and update the values
+ Collection heartbeatServices =
+ router.getNamenodeHeartbeatServices();
+ for (NamenodeHeartbeatService service : heartbeatServices) {
+ service.periodicInvoke();
+ }
+ MembershipNamenodeResolver resolver =
+ (MembershipNamenodeResolver) router.getNamenodeResolver();
+ resolver.loadCache(true);
+
+ // Check that the webSchemes are "https"
+ final List namespaceInfo = new ArrayList<>();
+ for (String nsId : nns.keySet()) {
+ List extends FederationNamenodeContext> nnReports =
+ resolver.getNamenodesForNameserviceId(nsId);
+ namespaceInfo.addAll(nnReports);
+ }
+ for (FederationNamenodeContext nnInfo : namespaceInfo) {
+ assertEquals("Unexpected scheme for Policy: " + httpPolicy.name(),
+ nnInfo.getWebScheme(), expectedScheme);
+ }
+ }
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
index 8772e2fe22e..cea5212965c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
@@ -173,7 +173,7 @@ public class TestRouterRPCClientRetries {
NamenodeStatusReport report = new NamenodeStatusReport(ns0,
nnInfo.getNamenodeId(), nnInfo.getRpcAddress(),
nnInfo.getServiceAddress(), nnInfo.getLifelineAddress(),
- nnInfo.getWebAddress());
+ nnInfo.getWebScheme(), nnInfo.getWebAddress());
report.setRegistrationValid(false);
assertTrue(resolver.registerNamenode(report));
resolver.loadCache(true);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/FederationStateStoreTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/FederationStateStoreTestUtils.java
index 98f9ebcf71a..0fad76de050 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/FederationStateStoreTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/FederationStateStoreTestUtils.java
@@ -260,7 +260,8 @@ public final class FederationStateStoreTestUtils {
FederationNamenodeServiceState state) throws IOException {
MembershipState entry = MembershipState.newInstance(
"routerId", nameserviceId, namenodeId, "clusterId", "test",
- "0.0.0.0:0", "0.0.0.0:0", "0.0.0.0:0", "0.0.0.0:0", state, false);
+ "0.0.0.0:0", "0.0.0.0:0", "0.0.0.0:0", "http", "0.0.0.0:0",
+ state, false);
MembershipStats stats = MembershipStats.newInstance();
stats.setNumOfActiveDatanodes(100);
stats.setNumOfDeadDatanodes(10);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMembershipState.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMembershipState.java
index 9ec9e038531..857cc2362d7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMembershipState.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMembershipState.java
@@ -170,7 +170,7 @@ public class TestStateStoreMembershipState extends TestStateStoreBase {
router, ns,
nn, "testcluster", "testblock-" + ns, "testrpc-"+ ns + nn,
"testservice-"+ ns + nn, "testlifeline-"+ ns + nn,
- "testweb-" + ns + nn, state, false);
+ "http", "testweb-" + ns + nn, state, false);
return record;
}
@@ -238,34 +238,35 @@ public class TestStateStoreMembershipState extends TestStateStoreBase {
String lifelineAddress = "testlifelineaddress";
String blockPoolId = "testblockpool";
String clusterId = "testcluster";
+ String webScheme = "http";
String webAddress = "testwebaddress";
boolean safemode = false;
// Active
MembershipState record = MembershipState.newInstance(
ROUTERS[0], ns, nn, clusterId, blockPoolId,
- rpcAddress, serviceAddress, lifelineAddress, webAddress,
- FederationNamenodeServiceState.ACTIVE, safemode);
+ rpcAddress, serviceAddress, lifelineAddress, webScheme,
+ webAddress, FederationNamenodeServiceState.ACTIVE, safemode);
registrationList.add(record);
// Expired
record = MembershipState.newInstance(
ROUTERS[1], ns, nn, clusterId, blockPoolId,
- rpcAddress, serviceAddress, lifelineAddress, webAddress,
- FederationNamenodeServiceState.EXPIRED, safemode);
+ rpcAddress, serviceAddress, lifelineAddress, webScheme,
+ webAddress, FederationNamenodeServiceState.EXPIRED, safemode);
registrationList.add(record);
// Expired
record = MembershipState.newInstance(
ROUTERS[2], ns, nn, clusterId, blockPoolId,
- rpcAddress, serviceAddress, lifelineAddress, webAddress,
+ rpcAddress, serviceAddress, lifelineAddress, webScheme, webAddress,
FederationNamenodeServiceState.EXPIRED, safemode);
registrationList.add(record);
// Expired
record = MembershipState.newInstance(
ROUTERS[3], ns, nn, clusterId, blockPoolId,
- rpcAddress, serviceAddress, lifelineAddress, webAddress,
+ rpcAddress, serviceAddress, lifelineAddress, webScheme, webAddress,
FederationNamenodeServiceState.EXPIRED, safemode);
registrationList.add(record);
registerAndLoadRegistrations(registrationList);
@@ -293,6 +294,7 @@ public class TestStateStoreMembershipState extends TestStateStoreBase {
String lifelineAddress = "testlifelineaddress";
String blockPoolId = "testblockpool";
String clusterId = "testcluster";
+ String webScheme = "http";
String webAddress = "testwebaddress";
boolean safemode = false;
long startingTime = Time.now();
@@ -300,7 +302,7 @@ public class TestStateStoreMembershipState extends TestStateStoreBase {
// Expired
MembershipState record = MembershipState.newInstance(
ROUTERS[0], ns, nn, clusterId, blockPoolId,
- rpcAddress, webAddress, lifelineAddress, webAddress,
+ rpcAddress, webAddress, lifelineAddress, webScheme, webAddress,
FederationNamenodeServiceState.EXPIRED, safemode);
record.setDateModified(startingTime - 10000);
registrationList.add(record);
@@ -308,7 +310,7 @@ public class TestStateStoreMembershipState extends TestStateStoreBase {
// Expired
record = MembershipState.newInstance(
ROUTERS[1], ns, nn, clusterId, blockPoolId,
- rpcAddress, serviceAddress, lifelineAddress, webAddress,
+ rpcAddress, serviceAddress, lifelineAddress, webScheme, webAddress,
FederationNamenodeServiceState.EXPIRED, safemode);
record.setDateModified(startingTime);
registrationList.add(record);
@@ -316,7 +318,7 @@ public class TestStateStoreMembershipState extends TestStateStoreBase {
// Expired
record = MembershipState.newInstance(
ROUTERS[2], ns, nn, clusterId, blockPoolId,
- rpcAddress, serviceAddress, lifelineAddress, webAddress,
+ rpcAddress, serviceAddress, lifelineAddress, webScheme, webAddress,
FederationNamenodeServiceState.EXPIRED, safemode);
record.setDateModified(startingTime);
registrationList.add(record);
@@ -324,7 +326,7 @@ public class TestStateStoreMembershipState extends TestStateStoreBase {
// Expired
record = MembershipState.newInstance(
ROUTERS[3], ns, nn, clusterId, blockPoolId,
- rpcAddress, serviceAddress, lifelineAddress, webAddress,
+ rpcAddress, serviceAddress, lifelineAddress, webScheme, webAddress,
FederationNamenodeServiceState.EXPIRED, safemode);
record.setDateModified(startingTime);
registrationList.add(record);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
index b3a9fb5adc7..fe1b9a5bfa0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
@@ -127,8 +127,9 @@ public class TestStateStoreDriverBase {
generateRandomString(), generateRandomString(),
generateRandomString(), generateRandomString(),
generateRandomString(), generateRandomString(),
- generateRandomString(), generateRandomString(),
- generateRandomEnum(FederationNamenodeServiceState.class), false);
+ generateRandomString(), "http", generateRandomString(),
+ generateRandomEnum(FederationNamenodeServiceState.class),
+ false);
} else if (recordClass == MountTable.class) {
String src = "/" + generateRandomString();
Map destMap = Collections.singletonMap(
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMembershipState.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMembershipState.java
index df41f461e6d..02a42c4703a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMembershipState.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMembershipState.java
@@ -40,6 +40,7 @@ public class TestMembershipState {
private static final String LIFELINE_ADDRESS = "lifelineaddress";
private static final String WEB_ADDRESS = "webaddress";
private static final boolean SAFE_MODE = false;
+ private static final String SCHEME = "http";
private static final long DATE_CREATED = 100;
private static final long DATE_MODIFIED = 200;
@@ -68,7 +69,7 @@ public class TestMembershipState {
MembershipState record = MembershipState.newInstance(
ROUTER, NAMESERVICE, NAMENODE, CLUSTER_ID,
BLOCKPOOL_ID, RPC_ADDRESS, SERVICE_ADDRESS, LIFELINE_ADDRESS,
- WEB_ADDRESS, STATE, SAFE_MODE);
+ SCHEME, WEB_ADDRESS, STATE, SAFE_MODE);
record.setDateCreated(DATE_CREATED);
record.setDateModified(DATE_MODIFIED);
@@ -98,6 +99,7 @@ public class TestMembershipState {
assertEquals(CLUSTER_ID, record.getClusterId());
assertEquals(BLOCKPOOL_ID, record.getBlockPoolId());
assertEquals(RPC_ADDRESS, record.getRpcAddress());
+ assertEquals(SCHEME, record.getWebScheme());
assertEquals(WEB_ADDRESS, record.getWebAddress());
assertEquals(STATE, record.getState());
assertEquals(SAFE_MODE, record.getIsSafeMode());
|