HDFS-13215. RBF: Move Router to its own module.

This commit is contained in:
weiy 2018-03-19 23:23:16 -07:00
parent 30d9a5db29
commit 4aa34324b2
236 changed files with 1300 additions and 803 deletions

View File

@ -143,6 +143,7 @@
run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs-nfs/target/hadoop-hdfs-nfs-${project.version}/* . run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs-nfs/target/hadoop-hdfs-nfs-${project.version}/* .
run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-${project.version}/* . run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-${project.version}/* .
run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs-native-client/target/hadoop-hdfs-native-client-${project.version}/* . run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs-native-client/target/hadoop-hdfs-native-client-${project.version}/* .
run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs-rbf/target/hadoop-hdfs-rbf-${project.version}/* .
run cp -r $ROOT/hadoop-yarn-project/target/hadoop-yarn-project-${project.version}/* . run cp -r $ROOT/hadoop-yarn-project/target/hadoop-yarn-project-${project.version}/* .
run cp -r $ROOT/hadoop-mapreduce-project/target/hadoop-mapreduce-${project.version}/* . run cp -r $ROOT/hadoop-mapreduce-project/target/hadoop-mapreduce-${project.version}/* .
run cp -r $ROOT/hadoop-tools/hadoop-tools-dist/target/hadoop-tools-dist-${project.version}/* . run cp -r $ROOT/hadoop-tools/hadoop-tools-dist/target/hadoop-tools-dist-${project.version}/* .

View File

@ -0,0 +1,22 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<FindBugsFilter>
<Match>
<Package name="org.apache.hadoop.hdfs.federation.protocol.proto" />
</Match>
</FindBugsFilter>

View File

@ -0,0 +1,241 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project-dist</artifactId>
<version>2.10.0-SNAPSHOT</version>
<relativePath>../../hadoop-project-dist</relativePath>
</parent>
<artifactId>hadoop-hdfs-rbf</artifactId>
<version>2.10.0-SNAPSHOT</version>
<description>Apache Hadoop HDFS-RBF</description>
<name>Apache Hadoop HDFS-RBF</name>
<packaging>jar</packaging>
<properties>
<hadoop.component>hdfs</hadoop.component>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<scope>provided</scope>
<exclusions>
<exclusion>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
</exclusion>
<exclusion>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs-client</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty-util</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<scope>test</scope>
<type>test-jar</type>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<scope>test</scope>
<type>test-jar</type>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-test</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-all</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-antrun-plugin</artifactId>
<configuration>
<skipTests>false</skipTests>
</configuration>
<executions>
<execution>
<id>create-web-xmls</id>
<phase>compile</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target>
<copy file="${basedir}/src/main/webapps/proto-web.xml"
tofile="${project.build.directory}/webapps/router/WEB-INF/web.xml"
filtering="true"/>
<copy toDir="${project.build.directory}/webapps">
<fileset dir="${basedir}/src/main/webapps">
<exclude name="**/proto-web.xml"/>
</fileset>
</copy>
<replace dir="${project.build.directory}/webapps" value="${release-year}">
<include name="**/*.html"/>
<replacetoken>{release-year-token}</replacetoken>
</replace>
</target>
</configuration>
</execution>
<execution>
<id>create-log-dir</id>
<phase>process-test-resources</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target>
<copy todir="${project.build.directory}/test-classes/webapps">
<fileset dir="${project.build.directory}/webapps">
<exclude name="proto-*-web.xml"/>
<exclude name="**/proto-web.xml"/>
</fileset>
</copy>
</target>
</configuration>
</execution>
<execution>
<phase>pre-site</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<tasks>
<copy file="src/main/resources/hdfs-rbf-default.xml" todir="src/site/resources"/>
</tasks>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-maven-plugins</artifactId>
<executions>
<execution>
<id>compile-protoc</id>
<goals>
<goal>protoc</goal>
</goals>
<configuration>
<protocVersion>${protobuf.version}</protocVersion>
<protocCommand>${protoc.path}</protocCommand>
<imports>
<param>${basedir}/../hadoop-hdfs-client/src/main/proto</param>
<param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param>
<param>${basedir}/src/main/proto</param>
</imports>
<source>
<directory>${basedir}/src/main/proto</directory>
<includes>
<include>FederationProtocol.proto</include>
<include>RouterProtocol.proto</include>
</includes>
</source>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.apache.rat</groupId>
<artifactId>apache-rat-plugin</artifactId>
<configuration>
<excludes>
<exclude>.gitattributes</exclude>
<exclude>.idea/**</exclude>
<exclude>dev-support/findbugsExcludeFile.xml</exclude>
<exclude>src/main/webapps/router/robots.txt</exclude>
<exclude>src/site/resources/images/*</exclude>
</excludes>
</configuration>
</plugin>
<plugin>
<artifactId>maven-clean-plugin</artifactId>
<configuration>
<filesets>
<fileset>
<directory>src/site/resources</directory>
<includes>
<include>hdfs-rbf-default.xml</include>
</includes>
<followSymlinks>false</followSymlinks>
</fileset>
</filesets>
</configuration>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,18 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocolPB;

View File

@ -17,9 +17,9 @@
*/ */
package org.apache.hadoop.hdfs.server.federation.resolver; package org.apache.hadoop.hdfs.server.federation.resolver;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE;
import static org.apache.hadoop.hdfs.DFSConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE;
import static org.apache.hadoop.hdfs.DFSConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE_DEFAULT; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE_DEFAULT;
import static org.apache.hadoop.hdfs.server.federation.router.FederationUtil.isParentEntry; import static org.apache.hadoop.hdfs.server.federation.router.FederationUtil.isParentEntry;
import java.io.IOException; import java.io.IOException;

View File

@ -33,10 +33,10 @@ import java.util.Map.Entry;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.federation.resolver.PathLocation; import org.apache.hadoop.hdfs.server.federation.resolver.PathLocation;
import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
import org.apache.hadoop.hdfs.server.federation.router.Router; import org.apache.hadoop.hdfs.server.federation.router.Router;
import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer; import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer;
import org.apache.hadoop.hdfs.server.federation.store.MembershipStore; import org.apache.hadoop.hdfs.server.federation.store.MembershipStore;
@ -62,7 +62,7 @@ public class LocalResolver implements OrderedResolver {
/** Configuration key to set the minimum time to update the local cache.*/ /** Configuration key to set the minimum time to update the local cache.*/
public static final String MIN_UPDATE_PERIOD_KEY = public static final String MIN_UPDATE_PERIOD_KEY =
DFSConfigKeys.FEDERATION_ROUTER_PREFIX + "local-resolver.update-period"; RBFConfigKeys.FEDERATION_ROUTER_PREFIX + "local-resolver.update-period";
/** 10 seconds by default. */ /** 10 seconds by default. */
private static final long MIN_UPDATE_PERIOD_DEFAULT = private static final long MIN_UPDATE_PERIOD_DEFAULT =
TimeUnit.SECONDS.toMillis(10); TimeUnit.SECONDS.toMillis(10);

View File

@ -34,7 +34,6 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.mortbay.util.ajax.JSON; import org.mortbay.util.ajax.JSON;
@ -100,8 +99,8 @@ public class ConnectionManager {
// Configure minimum and maximum connection pools // Configure minimum and maximum connection pools
this.maxSize = this.conf.getInt( this.maxSize = this.conf.getInt(
DFSConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE, RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE,
DFSConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE_DEFAULT); RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE_DEFAULT);
// Map with the connections indexed by UGI and Namenode // Map with the connections indexed by UGI and Namenode
this.pools = new HashMap<>(); this.pools = new HashMap<>();
@ -112,13 +111,13 @@ public class ConnectionManager {
// Cleanup periods // Cleanup periods
this.poolCleanupPeriodMs = this.conf.getLong( this.poolCleanupPeriodMs = this.conf.getLong(
DFSConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_POOL_CLEAN, RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_POOL_CLEAN,
DFSConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_POOL_CLEAN_DEFAULT); RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_POOL_CLEAN_DEFAULT);
LOG.info("Cleaning connection pools every {} seconds", LOG.info("Cleaning connection pools every {} seconds",
TimeUnit.MILLISECONDS.toSeconds(this.poolCleanupPeriodMs)); TimeUnit.MILLISECONDS.toSeconds(this.poolCleanupPeriodMs));
this.connectionCleanupPeriodMs = this.conf.getLong( this.connectionCleanupPeriodMs = this.conf.getLong(
DFSConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_CLEAN_MS, RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_CLEAN_MS,
DFSConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_CLEAN_MS_DEFAULT); RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_CLEAN_MS_DEFAULT);
LOG.info("Cleaning connections every {} seconds", LOG.info("Cleaning connections every {} seconds",
TimeUnit.MILLISECONDS.toSeconds(this.connectionCleanupPeriodMs)); TimeUnit.MILLISECONDS.toSeconds(this.connectionCleanupPeriodMs));
} }

View File

@ -27,7 +27,6 @@ import java.net.URLConnection;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver; import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
@ -166,8 +165,8 @@ public final class FederationUtil {
public static FileSubclusterResolver newFileSubclusterResolver( public static FileSubclusterResolver newFileSubclusterResolver(
Configuration conf, Router router) { Configuration conf, Router router) {
Class<? extends FileSubclusterResolver> clazz = conf.getClass( Class<? extends FileSubclusterResolver> clazz = conf.getClass(
DFSConfigKeys.FEDERATION_FILE_RESOLVER_CLIENT_CLASS, RBFConfigKeys.FEDERATION_FILE_RESOLVER_CLIENT_CLASS,
DFSConfigKeys.FEDERATION_FILE_RESOLVER_CLIENT_CLASS_DEFAULT, RBFConfigKeys.FEDERATION_FILE_RESOLVER_CLIENT_CLASS_DEFAULT,
FileSubclusterResolver.class); FileSubclusterResolver.class);
return newInstance(conf, router, Router.class, clazz); return newInstance(conf, router, Router.class, clazz);
} }
@ -182,8 +181,8 @@ public final class FederationUtil {
public static ActiveNamenodeResolver newActiveNamenodeResolver( public static ActiveNamenodeResolver newActiveNamenodeResolver(
Configuration conf, StateStoreService stateStore) { Configuration conf, StateStoreService stateStore) {
Class<? extends ActiveNamenodeResolver> clazz = conf.getClass( Class<? extends ActiveNamenodeResolver> clazz = conf.getClass(
DFSConfigKeys.FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS, RBFConfigKeys.FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS,
DFSConfigKeys.FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS_DEFAULT, RBFConfigKeys.FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS_DEFAULT,
ActiveNamenodeResolver.class); ActiveNamenodeResolver.class);
return newInstance(conf, stateStore, StateStoreService.class, clazz); return newInstance(conf, stateStore, StateStoreService.class, clazz);
} }

View File

@ -17,8 +17,8 @@
*/ */
package org.apache.hadoop.hdfs.server.federation.router; package org.apache.hadoop.hdfs.server.federation.router;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HEARTBEAT_INTERVAL_MS; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_HEARTBEAT_INTERVAL_MS;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HEARTBEAT_INTERVAL_MS_DEFAULT; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_HEARTBEAT_INTERVAL_MS_DEFAULT;
import java.io.IOException; import java.io.IOException;
import java.net.InetAddress; import java.net.InetAddress;

View File

@ -0,0 +1,229 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.federation.router;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.hdfs.server.federation.metrics.FederationRPCPerformanceMonitor;
import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
import org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver;
import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
import org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl;
import org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl;
import java.util.concurrent.TimeUnit;
/**
* Config fields for router-based hdfs federation.
*/
@InterfaceAudience.Private
public class RBFConfigKeys extends CommonConfigurationKeysPublic {
// HDFS Router-based federation
public static final String FEDERATION_ROUTER_PREFIX =
"dfs.federation.router.";
public static final String DFS_ROUTER_DEFAULT_NAMESERVICE =
FEDERATION_ROUTER_PREFIX + "default.nameserviceId";
public static final String DFS_ROUTER_HANDLER_COUNT_KEY =
FEDERATION_ROUTER_PREFIX + "handler.count";
public static final int DFS_ROUTER_HANDLER_COUNT_DEFAULT = 10;
public static final String DFS_ROUTER_READER_QUEUE_SIZE_KEY =
FEDERATION_ROUTER_PREFIX + "reader.queue.size";
public static final int DFS_ROUTER_READER_QUEUE_SIZE_DEFAULT = 100;
public static final String DFS_ROUTER_READER_COUNT_KEY =
FEDERATION_ROUTER_PREFIX + "reader.count";
public static final int DFS_ROUTER_READER_COUNT_DEFAULT = 1;
public static final String DFS_ROUTER_HANDLER_QUEUE_SIZE_KEY =
FEDERATION_ROUTER_PREFIX + "handler.queue.size";
public static final int DFS_ROUTER_HANDLER_QUEUE_SIZE_DEFAULT = 100;
public static final String DFS_ROUTER_RPC_BIND_HOST_KEY =
FEDERATION_ROUTER_PREFIX + "rpc-bind-host";
public static final int DFS_ROUTER_RPC_PORT_DEFAULT = 8888;
public static final String DFS_ROUTER_RPC_ADDRESS_KEY =
FEDERATION_ROUTER_PREFIX + "rpc-address";
public static final String DFS_ROUTER_RPC_ADDRESS_DEFAULT =
"0.0.0.0:" + DFS_ROUTER_RPC_PORT_DEFAULT;
public static final String DFS_ROUTER_RPC_ENABLE =
FEDERATION_ROUTER_PREFIX + "rpc.enable";
public static final boolean DFS_ROUTER_RPC_ENABLE_DEFAULT = true;
public static final String DFS_ROUTER_METRICS_ENABLE =
FEDERATION_ROUTER_PREFIX + "metrics.enable";
public static final boolean DFS_ROUTER_METRICS_ENABLE_DEFAULT = true;
public static final String DFS_ROUTER_METRICS_CLASS =
FEDERATION_ROUTER_PREFIX + "metrics.class";
public static final Class<? extends RouterRpcMonitor>
DFS_ROUTER_METRICS_CLASS_DEFAULT =
FederationRPCPerformanceMonitor.class;
// HDFS Router heartbeat
public static final String DFS_ROUTER_HEARTBEAT_ENABLE =
FEDERATION_ROUTER_PREFIX + "heartbeat.enable";
public static final boolean DFS_ROUTER_HEARTBEAT_ENABLE_DEFAULT = true;
public static final String DFS_ROUTER_HEARTBEAT_INTERVAL_MS =
FEDERATION_ROUTER_PREFIX + "heartbeat.interval";
public static final long DFS_ROUTER_HEARTBEAT_INTERVAL_MS_DEFAULT =
TimeUnit.SECONDS.toMillis(5);
public static final String DFS_ROUTER_MONITOR_NAMENODE =
FEDERATION_ROUTER_PREFIX + "monitor.namenode";
public static final String DFS_ROUTER_MONITOR_LOCAL_NAMENODE =
FEDERATION_ROUTER_PREFIX + "monitor.localnamenode.enable";
public static final boolean DFS_ROUTER_MONITOR_LOCAL_NAMENODE_DEFAULT = true;
public static final String DFS_ROUTER_HEARTBEAT_STATE_INTERVAL_MS =
FEDERATION_ROUTER_PREFIX + "heartbeat-state.interval";
public static final long DFS_ROUTER_HEARTBEAT_STATE_INTERVAL_MS_DEFAULT =
TimeUnit.SECONDS.toMillis(5);
// HDFS Router NN client
public static final String DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE =
FEDERATION_ROUTER_PREFIX + "connection.pool-size";
public static final int DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE_DEFAULT =
64;
public static final String DFS_ROUTER_NAMENODE_CONNECTION_POOL_CLEAN =
FEDERATION_ROUTER_PREFIX + "connection.pool.clean.ms";
public static final long DFS_ROUTER_NAMENODE_CONNECTION_POOL_CLEAN_DEFAULT =
TimeUnit.MINUTES.toMillis(1);
public static final String DFS_ROUTER_NAMENODE_CONNECTION_CLEAN_MS =
FEDERATION_ROUTER_PREFIX + "connection.clean.ms";
public static final long DFS_ROUTER_NAMENODE_CONNECTION_CLEAN_MS_DEFAULT =
TimeUnit.SECONDS.toMillis(10);
// HDFS Router RPC client
public static final String DFS_ROUTER_CLIENT_THREADS_SIZE =
FEDERATION_ROUTER_PREFIX + "client.thread-size";
public static final int DFS_ROUTER_CLIENT_THREADS_SIZE_DEFAULT = 32;
public static final String DFS_ROUTER_CLIENT_MAX_ATTEMPTS =
FEDERATION_ROUTER_PREFIX + "client.retry.max.attempts";
public static final int DFS_ROUTER_CLIENT_MAX_ATTEMPTS_DEFAULT = 3;
// HDFS Router State Store connection
public static final String FEDERATION_FILE_RESOLVER_CLIENT_CLASS =
FEDERATION_ROUTER_PREFIX + "file.resolver.client.class";
public static final Class<? extends FileSubclusterResolver>
FEDERATION_FILE_RESOLVER_CLIENT_CLASS_DEFAULT =
MountTableResolver.class;
public static final String FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS =
FEDERATION_ROUTER_PREFIX + "namenode.resolver.client.class";
public static final Class<? extends ActiveNamenodeResolver>
FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS_DEFAULT =
MembershipNamenodeResolver.class;
// HDFS Router-based federation State Store
public static final String FEDERATION_STORE_PREFIX =
FEDERATION_ROUTER_PREFIX + "store.";
public static final String DFS_ROUTER_STORE_ENABLE =
FEDERATION_STORE_PREFIX + "enable";
public static final boolean DFS_ROUTER_STORE_ENABLE_DEFAULT = true;
public static final String FEDERATION_STORE_SERIALIZER_CLASS =
FEDERATION_STORE_PREFIX + "serializer";
public static final Class<StateStoreSerializerPBImpl>
FEDERATION_STORE_SERIALIZER_CLASS_DEFAULT =
StateStoreSerializerPBImpl.class;
public static final String FEDERATION_STORE_DRIVER_CLASS =
FEDERATION_STORE_PREFIX + "driver.class";
public static final Class<? extends StateStoreDriver>
FEDERATION_STORE_DRIVER_CLASS_DEFAULT = StateStoreZooKeeperImpl.class;
public static final String FEDERATION_STORE_CONNECTION_TEST_MS =
FEDERATION_STORE_PREFIX + "connection.test";
public static final long FEDERATION_STORE_CONNECTION_TEST_MS_DEFAULT =
TimeUnit.MINUTES.toMillis(1);
public static final String DFS_ROUTER_CACHE_TIME_TO_LIVE_MS =
FEDERATION_ROUTER_PREFIX + "cache.ttl";
public static final long DFS_ROUTER_CACHE_TIME_TO_LIVE_MS_DEFAULT =
TimeUnit.MINUTES.toMillis(1);
public static final String FEDERATION_STORE_MEMBERSHIP_EXPIRATION_MS =
FEDERATION_STORE_PREFIX + "membership.expiration";
public static final long FEDERATION_STORE_MEMBERSHIP_EXPIRATION_MS_DEFAULT =
TimeUnit.MINUTES.toMillis(5);
public static final String FEDERATION_STORE_ROUTER_EXPIRATION_MS =
FEDERATION_STORE_PREFIX + "router.expiration";
public static final long FEDERATION_STORE_ROUTER_EXPIRATION_MS_DEFAULT =
TimeUnit.MINUTES.toMillis(5);
// HDFS Router safe mode
public static final String DFS_ROUTER_SAFEMODE_ENABLE =
FEDERATION_ROUTER_PREFIX + "safemode.enable";
public static final boolean DFS_ROUTER_SAFEMODE_ENABLE_DEFAULT = true;
public static final String DFS_ROUTER_SAFEMODE_EXTENSION =
FEDERATION_ROUTER_PREFIX + "safemode.extension";
public static final long DFS_ROUTER_SAFEMODE_EXTENSION_DEFAULT =
TimeUnit.SECONDS.toMillis(30);
public static final String DFS_ROUTER_SAFEMODE_EXPIRATION =
FEDERATION_ROUTER_PREFIX + "safemode.expiration";
public static final long DFS_ROUTER_SAFEMODE_EXPIRATION_DEFAULT =
3 * DFS_ROUTER_CACHE_TIME_TO_LIVE_MS_DEFAULT;
// HDFS Router-based federation mount table entries
/** Maximum number of cache entries to have. */
public static final String FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE =
FEDERATION_ROUTER_PREFIX + "mount-table.max-cache-size";
/** Remove cache entries if we have more than 10k. */
public static final int FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE_DEFAULT = 10000;
// HDFS Router-based federation admin
public static final String DFS_ROUTER_ADMIN_HANDLER_COUNT_KEY =
FEDERATION_ROUTER_PREFIX + "admin.handler.count";
public static final int DFS_ROUTER_ADMIN_HANDLER_COUNT_DEFAULT = 1;
public static final int DFS_ROUTER_ADMIN_PORT_DEFAULT = 8111;
public static final String DFS_ROUTER_ADMIN_ADDRESS_KEY =
FEDERATION_ROUTER_PREFIX + "admin-address";
public static final String DFS_ROUTER_ADMIN_ADDRESS_DEFAULT =
"0.0.0.0:" + DFS_ROUTER_ADMIN_PORT_DEFAULT;
public static final String DFS_ROUTER_ADMIN_BIND_HOST_KEY =
FEDERATION_ROUTER_PREFIX + "admin-bind-host";
public static final String DFS_ROUTER_ADMIN_ENABLE =
FEDERATION_ROUTER_PREFIX + "admin.enable";
public static final boolean DFS_ROUTER_ADMIN_ENABLE_DEFAULT = true;
// HDFS Router-based federation web
public static final String DFS_ROUTER_HTTP_ENABLE =
FEDERATION_ROUTER_PREFIX + "http.enable";
public static final boolean DFS_ROUTER_HTTP_ENABLE_DEFAULT = true;
public static final String DFS_ROUTER_HTTP_ADDRESS_KEY =
FEDERATION_ROUTER_PREFIX + "http-address";
public static final int DFS_ROUTER_HTTP_PORT_DEFAULT = 50071;
public static final String DFS_ROUTER_HTTP_BIND_HOST_KEY =
FEDERATION_ROUTER_PREFIX + "http-bind-host";
public static final String DFS_ROUTER_HTTP_ADDRESS_DEFAULT =
"0.0.0.0:" + DFS_ROUTER_HTTP_PORT_DEFAULT;
public static final String DFS_ROUTER_HTTPS_ADDRESS_KEY =
FEDERATION_ROUTER_PREFIX + "https-address";
public static final int DFS_ROUTER_HTTPS_PORT_DEFAULT = 50072;
public static final String DFS_ROUTER_HTTPS_BIND_HOST_KEY =
FEDERATION_ROUTER_PREFIX + "https-bind-host";
public static final String DFS_ROUTER_HTTPS_ADDRESS_DEFAULT =
"0.0.0.0:" + DFS_ROUTER_HTTPS_PORT_DEFAULT;
// HDFS Router-based federation quota
public static final String DFS_ROUTER_QUOTA_ENABLE =
FEDERATION_ROUTER_PREFIX + "quota.enable";
public static final boolean DFS_ROUTER_QUOTA_ENABLED_DEFAULT = false;
public static final String DFS_ROUTER_QUOTA_CACHE_UPATE_INTERVAL =
FEDERATION_ROUTER_PREFIX + "quota-cache.update.interval";
public static final long DFS_ROUTER_QUOTA_CACHE_UPATE_INTERVAL_DEFAULT =
60000;
}

View File

@ -31,7 +31,6 @@ import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.server.federation.metrics.FederationMetrics; import org.apache.hadoop.hdfs.server.federation.metrics.FederationMetrics;
@ -146,8 +145,8 @@ public class Router extends CompositeService {
updateRouterState(RouterServiceState.INITIALIZING); updateRouterState(RouterServiceState.INITIALIZING);
if (conf.getBoolean( if (conf.getBoolean(
DFSConfigKeys.DFS_ROUTER_STORE_ENABLE, RBFConfigKeys.DFS_ROUTER_STORE_ENABLE,
DFSConfigKeys.DFS_ROUTER_STORE_ENABLE_DEFAULT)) { RBFConfigKeys.DFS_ROUTER_STORE_ENABLE_DEFAULT)) {
// Service that maintains the State Store connection // Service that maintains the State Store connection
this.stateStore = new StateStoreService(); this.stateStore = new StateStoreService();
addService(this.stateStore); addService(this.stateStore);
@ -167,8 +166,8 @@ public class Router extends CompositeService {
} }
if (conf.getBoolean( if (conf.getBoolean(
DFSConfigKeys.DFS_ROUTER_RPC_ENABLE, RBFConfigKeys.DFS_ROUTER_RPC_ENABLE,
DFSConfigKeys.DFS_ROUTER_RPC_ENABLE_DEFAULT)) { RBFConfigKeys.DFS_ROUTER_RPC_ENABLE_DEFAULT)) {
// Create RPC server // Create RPC server
this.rpcServer = createRpcServer(); this.rpcServer = createRpcServer();
addService(this.rpcServer); addService(this.rpcServer);
@ -176,24 +175,24 @@ public class Router extends CompositeService {
} }
if (conf.getBoolean( if (conf.getBoolean(
DFSConfigKeys.DFS_ROUTER_ADMIN_ENABLE, RBFConfigKeys.DFS_ROUTER_ADMIN_ENABLE,
DFSConfigKeys.DFS_ROUTER_ADMIN_ENABLE_DEFAULT)) { RBFConfigKeys.DFS_ROUTER_ADMIN_ENABLE_DEFAULT)) {
// Create admin server // Create admin server
this.adminServer = createAdminServer(); this.adminServer = createAdminServer();
addService(this.adminServer); addService(this.adminServer);
} }
if (conf.getBoolean( if (conf.getBoolean(
DFSConfigKeys.DFS_ROUTER_HTTP_ENABLE, RBFConfigKeys.DFS_ROUTER_HTTP_ENABLE,
DFSConfigKeys.DFS_ROUTER_HTTP_ENABLE_DEFAULT)) { RBFConfigKeys.DFS_ROUTER_HTTP_ENABLE_DEFAULT)) {
// Create HTTP server // Create HTTP server
this.httpServer = createHttpServer(); this.httpServer = createHttpServer();
addService(this.httpServer); addService(this.httpServer);
} }
if (conf.getBoolean( if (conf.getBoolean(
DFSConfigKeys.DFS_ROUTER_HEARTBEAT_ENABLE, RBFConfigKeys.DFS_ROUTER_HEARTBEAT_ENABLE,
DFSConfigKeys.DFS_ROUTER_HEARTBEAT_ENABLE_DEFAULT)) { RBFConfigKeys.DFS_ROUTER_HEARTBEAT_ENABLE_DEFAULT)) {
// Create status updater for each monitored Namenode // Create status updater for each monitored Namenode
this.namenodeHeartbeatServices = createNamenodeHeartbeatServices(); this.namenodeHeartbeatServices = createNamenodeHeartbeatServices();
@ -213,8 +212,8 @@ public class Router extends CompositeService {
// Router metrics system // Router metrics system
if (conf.getBoolean( if (conf.getBoolean(
DFSConfigKeys.DFS_ROUTER_METRICS_ENABLE, RBFConfigKeys.DFS_ROUTER_METRICS_ENABLE,
DFSConfigKeys.DFS_ROUTER_METRICS_ENABLE_DEFAULT)) { RBFConfigKeys.DFS_ROUTER_METRICS_ENABLE_DEFAULT)) {
DefaultMetricsSystem.initialize("Router"); DefaultMetricsSystem.initialize("Router");
@ -227,8 +226,8 @@ public class Router extends CompositeService {
} }
// Initial quota relevant service // Initial quota relevant service
if (conf.getBoolean(DFSConfigKeys.DFS_ROUTER_QUOTA_ENABLE, if (conf.getBoolean(RBFConfigKeys.DFS_ROUTER_QUOTA_ENABLE,
DFSConfigKeys.DFS_ROUTER_QUOTA_ENABLED_DEFAULT)) { RBFConfigKeys.DFS_ROUTER_QUOTA_ENABLED_DEFAULT)) {
this.quotaManager = new RouterQuotaManager(); this.quotaManager = new RouterQuotaManager();
this.quotaUpdateService = new RouterQuotaUpdateService(this); this.quotaUpdateService = new RouterQuotaUpdateService(this);
addService(this.quotaUpdateService); addService(this.quotaUpdateService);
@ -236,8 +235,8 @@ public class Router extends CompositeService {
// Safemode service to refuse RPC calls when the router is out of sync // Safemode service to refuse RPC calls when the router is out of sync
if (conf.getBoolean( if (conf.getBoolean(
DFSConfigKeys.DFS_ROUTER_SAFEMODE_ENABLE, RBFConfigKeys.DFS_ROUTER_SAFEMODE_ENABLE,
DFSConfigKeys.DFS_ROUTER_SAFEMODE_ENABLE_DEFAULT)) { RBFConfigKeys.DFS_ROUTER_SAFEMODE_ENABLE_DEFAULT)) {
// Create safemode monitoring service // Create safemode monitoring service
this.safemodeService = new RouterSafemodeService(this); this.safemodeService = new RouterSafemodeService(this);
addService(this.safemodeService); addService(this.safemodeService);
@ -416,8 +415,8 @@ public class Router extends CompositeService {
Map<String, NamenodeHeartbeatService> ret = new HashMap<>(); Map<String, NamenodeHeartbeatService> ret = new HashMap<>();
if (conf.getBoolean( if (conf.getBoolean(
DFSConfigKeys.DFS_ROUTER_MONITOR_LOCAL_NAMENODE, RBFConfigKeys.DFS_ROUTER_MONITOR_LOCAL_NAMENODE,
DFSConfigKeys.DFS_ROUTER_MONITOR_LOCAL_NAMENODE_DEFAULT)) { RBFConfigKeys.DFS_ROUTER_MONITOR_LOCAL_NAMENODE_DEFAULT)) {
// Create a local heartbet service // Create a local heartbet service
NamenodeHeartbeatService localHeartbeatService = NamenodeHeartbeatService localHeartbeatService =
createLocalNamenodeHearbeatService(); createLocalNamenodeHearbeatService();
@ -429,7 +428,7 @@ public class Router extends CompositeService {
// Create heartbeat services for a list specified by the admin // Create heartbeat services for a list specified by the admin
String namenodes = this.conf.get( String namenodes = this.conf.get(
DFSConfigKeys.DFS_ROUTER_MONITOR_NAMENODE); RBFConfigKeys.DFS_ROUTER_MONITOR_NAMENODE);
if (namenodes != null) { if (namenodes != null) {
for (String namenode : namenodes.split(",")) { for (String namenode : namenodes.split(",")) {
String[] namenodeSplit = namenode.split("\\."); String[] namenodeSplit = namenode.split("\\.");

View File

@ -93,8 +93,8 @@ public class RouterAdminServer extends AbstractService
this.router = router; this.router = router;
int handlerCount = this.conf.getInt( int handlerCount = this.conf.getInt(
DFSConfigKeys.DFS_ROUTER_ADMIN_HANDLER_COUNT_KEY, RBFConfigKeys.DFS_ROUTER_ADMIN_HANDLER_COUNT_KEY,
DFSConfigKeys.DFS_ROUTER_ADMIN_HANDLER_COUNT_DEFAULT); RBFConfigKeys.DFS_ROUTER_ADMIN_HANDLER_COUNT_DEFAULT);
RPC.setProtocolEngine(this.conf, RouterAdminProtocolPB.class, RPC.setProtocolEngine(this.conf, RouterAdminProtocolPB.class,
ProtobufRpcEngine.class); ProtobufRpcEngine.class);
@ -105,13 +105,13 @@ public class RouterAdminServer extends AbstractService
newReflectiveBlockingService(routerAdminProtocolTranslator); newReflectiveBlockingService(routerAdminProtocolTranslator);
InetSocketAddress confRpcAddress = conf.getSocketAddr( InetSocketAddress confRpcAddress = conf.getSocketAddr(
DFSConfigKeys.DFS_ROUTER_ADMIN_BIND_HOST_KEY, RBFConfigKeys.DFS_ROUTER_ADMIN_BIND_HOST_KEY,
DFSConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY, RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY,
DFSConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_DEFAULT, RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_DEFAULT,
DFSConfigKeys.DFS_ROUTER_ADMIN_PORT_DEFAULT); RBFConfigKeys.DFS_ROUTER_ADMIN_PORT_DEFAULT);
String bindHost = conf.get( String bindHost = conf.get(
DFSConfigKeys.DFS_ROUTER_ADMIN_BIND_HOST_KEY, RBFConfigKeys.DFS_ROUTER_ADMIN_BIND_HOST_KEY,
confRpcAddress.getHostName()); confRpcAddress.getHostName());
LOG.info("Admin server binding to {}:{}", LOG.info("Admin server binding to {}:{}",
bindHost, confRpcAddress.getPort()); bindHost, confRpcAddress.getPort());

View File

@ -23,7 +23,6 @@ import java.util.concurrent.TimeUnit;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.federation.store.CachedRecordStore; import org.apache.hadoop.hdfs.server.federation.store.CachedRecordStore;
import org.apache.hadoop.hdfs.server.federation.store.MembershipStore; import org.apache.hadoop.hdfs.server.federation.store.MembershipStore;
import org.apache.hadoop.hdfs.server.federation.store.MountTableStore; import org.apache.hadoop.hdfs.server.federation.store.MountTableStore;
@ -142,8 +141,8 @@ public class RouterHeartbeatService extends PeriodicService {
protected void serviceInit(Configuration conf) throws Exception { protected void serviceInit(Configuration conf) throws Exception {
long interval = conf.getTimeDuration( long interval = conf.getTimeDuration(
DFSConfigKeys.DFS_ROUTER_HEARTBEAT_STATE_INTERVAL_MS, RBFConfigKeys.DFS_ROUTER_HEARTBEAT_STATE_INTERVAL_MS,
DFSConfigKeys.DFS_ROUTER_HEARTBEAT_STATE_INTERVAL_MS_DEFAULT, RBFConfigKeys.DFS_ROUTER_HEARTBEAT_STATE_INTERVAL_MS_DEFAULT,
TimeUnit.MILLISECONDS); TimeUnit.MILLISECONDS);
this.setIntervalMs(interval); this.setIntervalMs(interval);

View File

@ -60,17 +60,17 @@ public class RouterHttpServer extends AbstractService {
// Get HTTP address // Get HTTP address
this.httpAddress = conf.getSocketAddr( this.httpAddress = conf.getSocketAddr(
DFSConfigKeys.DFS_ROUTER_HTTP_BIND_HOST_KEY, RBFConfigKeys.DFS_ROUTER_HTTP_BIND_HOST_KEY,
DFSConfigKeys.DFS_ROUTER_HTTP_ADDRESS_KEY, RBFConfigKeys.DFS_ROUTER_HTTP_ADDRESS_KEY,
DFSConfigKeys.DFS_ROUTER_HTTP_ADDRESS_DEFAULT, RBFConfigKeys.DFS_ROUTER_HTTP_ADDRESS_DEFAULT,
DFSConfigKeys.DFS_ROUTER_HTTP_PORT_DEFAULT); RBFConfigKeys.DFS_ROUTER_HTTP_PORT_DEFAULT);
// Get HTTPs address // Get HTTPs address
this.httpsAddress = conf.getSocketAddr( this.httpsAddress = conf.getSocketAddr(
DFSConfigKeys.DFS_ROUTER_HTTPS_BIND_HOST_KEY, RBFConfigKeys.DFS_ROUTER_HTTPS_BIND_HOST_KEY,
DFSConfigKeys.DFS_ROUTER_HTTPS_ADDRESS_KEY, RBFConfigKeys.DFS_ROUTER_HTTPS_ADDRESS_KEY,
DFSConfigKeys.DFS_ROUTER_HTTPS_ADDRESS_DEFAULT, RBFConfigKeys.DFS_ROUTER_HTTPS_ADDRESS_DEFAULT,
DFSConfigKeys.DFS_ROUTER_HTTPS_PORT_DEFAULT); RBFConfigKeys.DFS_ROUTER_HTTPS_PORT_DEFAULT);
super.serviceInit(conf); super.serviceInit(conf);
} }

View File

@ -26,7 +26,6 @@ import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.federation.store.MountTableStore; import org.apache.hadoop.hdfs.server.federation.store.MountTableStore;
import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
@ -66,8 +65,8 @@ public class RouterQuotaUpdateService extends PeriodicService {
@Override @Override
protected void serviceInit(Configuration conf) throws Exception { protected void serviceInit(Configuration conf) throws Exception {
this.setIntervalMs(conf.getTimeDuration( this.setIntervalMs(conf.getTimeDuration(
DFSConfigKeys.DFS_ROUTER_QUOTA_CACHE_UPATE_INTERVAL, RBFConfigKeys.DFS_ROUTER_QUOTA_CACHE_UPATE_INTERVAL,
DFSConfigKeys.DFS_ROUTER_QUOTA_CACHE_UPATE_INTERVAL_DEFAULT, RBFConfigKeys.DFS_ROUTER_QUOTA_CACHE_UPATE_INTERVAL_DEFAULT,
TimeUnit.MILLISECONDS)); TimeUnit.MILLISECONDS));
super.serviceInit(conf); super.serviceInit(conf);

View File

@ -46,7 +46,6 @@ import java.util.regex.Matcher;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo; import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
@ -126,8 +125,8 @@ public class RouterRpcClient {
this.connectionManager.start(); this.connectionManager.start();
int numThreads = conf.getInt( int numThreads = conf.getInt(
DFSConfigKeys.DFS_ROUTER_CLIENT_THREADS_SIZE, RBFConfigKeys.DFS_ROUTER_CLIENT_THREADS_SIZE,
DFSConfigKeys.DFS_ROUTER_CLIENT_THREADS_SIZE_DEFAULT); RBFConfigKeys.DFS_ROUTER_CLIENT_THREADS_SIZE_DEFAULT);
ThreadFactory threadFactory = new ThreadFactoryBuilder() ThreadFactory threadFactory = new ThreadFactoryBuilder()
.setNameFormat("RPC Router Client-%d") .setNameFormat("RPC Router Client-%d")
.build(); .build();
@ -140,8 +139,8 @@ public class RouterRpcClient {
HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_KEY, HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_KEY,
HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_DEFAULT); HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_DEFAULT);
int maxRetryAttempts = conf.getInt( int maxRetryAttempts = conf.getInt(
DFSConfigKeys.DFS_ROUTER_CLIENT_MAX_ATTEMPTS, RBFConfigKeys.DFS_ROUTER_CLIENT_MAX_ATTEMPTS,
DFSConfigKeys.DFS_ROUTER_CLIENT_MAX_ATTEMPTS_DEFAULT); RBFConfigKeys.DFS_ROUTER_CLIENT_MAX_ATTEMPTS_DEFAULT);
int failoverSleepBaseMillis = conf.getInt( int failoverSleepBaseMillis = conf.getInt(
HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_KEY, HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_KEY,
HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_DEFAULT); HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_DEFAULT);

View File

@ -17,14 +17,14 @@
*/ */
package org.apache.hadoop.hdfs.server.federation.router; package org.apache.hadoop.hdfs.server.federation.router;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_COUNT_DEFAULT; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_HANDLER_COUNT_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_COUNT_KEY; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_HANDLER_COUNT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_QUEUE_SIZE_DEFAULT; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_HANDLER_QUEUE_SIZE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_QUEUE_SIZE_KEY; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_HANDLER_QUEUE_SIZE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_READER_COUNT_DEFAULT; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_READER_COUNT_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_READER_COUNT_KEY; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_READER_COUNT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_READER_QUEUE_SIZE_DEFAULT; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_READER_QUEUE_SIZE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_READER_QUEUE_SIZE_KEY; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_READER_QUEUE_SIZE_KEY;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
@ -229,10 +229,10 @@ public class RouterRpcServer extends AbstractService implements ClientProtocol {
.newReflectiveBlockingService(clientProtocolServerTranslator); .newReflectiveBlockingService(clientProtocolServerTranslator);
InetSocketAddress confRpcAddress = conf.getSocketAddr( InetSocketAddress confRpcAddress = conf.getSocketAddr(
DFSConfigKeys.DFS_ROUTER_RPC_BIND_HOST_KEY, RBFConfigKeys.DFS_ROUTER_RPC_BIND_HOST_KEY,
DFSConfigKeys.DFS_ROUTER_RPC_ADDRESS_KEY, RBFConfigKeys.DFS_ROUTER_RPC_ADDRESS_KEY,
DFSConfigKeys.DFS_ROUTER_RPC_ADDRESS_DEFAULT, RBFConfigKeys.DFS_ROUTER_RPC_ADDRESS_DEFAULT,
DFSConfigKeys.DFS_ROUTER_RPC_PORT_DEFAULT); RBFConfigKeys.DFS_ROUTER_RPC_PORT_DEFAULT);
LOG.info("RPC server binding to {} with {} handlers for Router {}", LOG.info("RPC server binding to {} with {} handlers for Router {}",
confRpcAddress, handlerCount, this.router.getRouterId()); confRpcAddress, handlerCount, this.router.getRouterId());
@ -265,8 +265,8 @@ public class RouterRpcServer extends AbstractService implements ClientProtocol {
// Create metrics monitor // Create metrics monitor
Class<? extends RouterRpcMonitor> rpcMonitorClass = this.conf.getClass( Class<? extends RouterRpcMonitor> rpcMonitorClass = this.conf.getClass(
DFSConfigKeys.DFS_ROUTER_METRICS_CLASS, RBFConfigKeys.DFS_ROUTER_METRICS_CLASS,
DFSConfigKeys.DFS_ROUTER_METRICS_CLASS_DEFAULT, RBFConfigKeys.DFS_ROUTER_METRICS_CLASS_DEFAULT,
RouterRpcMonitor.class); RouterRpcMonitor.class);
this.rpcMonitor = ReflectionUtils.newInstance(rpcMonitorClass, conf); this.rpcMonitor = ReflectionUtils.newInstance(rpcMonitorClass, conf);

View File

@ -22,7 +22,6 @@ import static org.apache.hadoop.util.Time.now;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.slf4j.Logger; import org.slf4j.Logger;
@ -98,19 +97,19 @@ public class RouterSafemodeService extends PeriodicService {
// Use same interval as cache update service // Use same interval as cache update service
this.setIntervalMs(conf.getTimeDuration( this.setIntervalMs(conf.getTimeDuration(
DFSConfigKeys.DFS_ROUTER_CACHE_TIME_TO_LIVE_MS, RBFConfigKeys.DFS_ROUTER_CACHE_TIME_TO_LIVE_MS,
DFSConfigKeys.DFS_ROUTER_CACHE_TIME_TO_LIVE_MS_DEFAULT, RBFConfigKeys.DFS_ROUTER_CACHE_TIME_TO_LIVE_MS_DEFAULT,
TimeUnit.MILLISECONDS)); TimeUnit.MILLISECONDS));
this.startupInterval = conf.getTimeDuration( this.startupInterval = conf.getTimeDuration(
DFSConfigKeys.DFS_ROUTER_SAFEMODE_EXTENSION, RBFConfigKeys.DFS_ROUTER_SAFEMODE_EXTENSION,
DFSConfigKeys.DFS_ROUTER_SAFEMODE_EXTENSION_DEFAULT, RBFConfigKeys.DFS_ROUTER_SAFEMODE_EXTENSION_DEFAULT,
TimeUnit.MILLISECONDS); TimeUnit.MILLISECONDS);
LOG.info("Leave startup safe mode after {} ms", this.startupInterval); LOG.info("Leave startup safe mode after {} ms", this.startupInterval);
this.staleInterval = conf.getTimeDuration( this.staleInterval = conf.getTimeDuration(
DFSConfigKeys.DFS_ROUTER_SAFEMODE_EXPIRATION, RBFConfigKeys.DFS_ROUTER_SAFEMODE_EXPIRATION,
DFSConfigKeys.DFS_ROUTER_SAFEMODE_EXPIRATION_DEFAULT, RBFConfigKeys.DFS_ROUTER_SAFEMODE_EXPIRATION_DEFAULT,
TimeUnit.MILLISECONDS); TimeUnit.MILLISECONDS);
LOG.info("Enter safe mode after {} ms without reaching the State Store", LOG.info("Enter safe mode after {} ms without reaching the State Store",
this.staleInterval); this.staleInterval);

View File

@ -20,8 +20,8 @@ package org.apache.hadoop.hdfs.server.federation.store;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.federation.router.PeriodicService; import org.apache.hadoop.hdfs.server.federation.router.PeriodicService;
import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -55,8 +55,8 @@ public class StateStoreCacheUpdateService extends PeriodicService {
protected void serviceInit(Configuration conf) throws Exception { protected void serviceInit(Configuration conf) throws Exception {
this.setIntervalMs(conf.getTimeDuration( this.setIntervalMs(conf.getTimeDuration(
DFSConfigKeys.DFS_ROUTER_CACHE_TIME_TO_LIVE_MS, RBFConfigKeys.DFS_ROUTER_CACHE_TIME_TO_LIVE_MS,
DFSConfigKeys.DFS_ROUTER_CACHE_TIME_TO_LIVE_MS_DEFAULT, RBFConfigKeys.DFS_ROUTER_CACHE_TIME_TO_LIVE_MS_DEFAULT,
TimeUnit.MILLISECONDS)); TimeUnit.MILLISECONDS));
super.serviceInit(conf); super.serviceInit(conf);

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.hdfs.server.federation.store; package org.apache.hadoop.hdfs.server.federation.store;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.federation.router.PeriodicService; import org.apache.hadoop.hdfs.server.federation.router.PeriodicService;
import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -50,8 +50,8 @@ public class StateStoreConnectionMonitorService extends PeriodicService {
@Override @Override
protected void serviceInit(Configuration conf) throws Exception { protected void serviceInit(Configuration conf) throws Exception {
this.setIntervalMs(conf.getLong( this.setIntervalMs(conf.getLong(
DFSConfigKeys.FEDERATION_STORE_CONNECTION_TEST_MS, RBFConfigKeys.FEDERATION_STORE_CONNECTION_TEST_MS,
DFSConfigKeys.FEDERATION_STORE_CONNECTION_TEST_MS_DEFAULT)); RBFConfigKeys.FEDERATION_STORE_CONNECTION_TEST_MS_DEFAULT));
super.serviceInit(conf); super.serviceInit(conf);
} }

View File

@ -33,9 +33,9 @@ import javax.management.StandardMBean;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.federation.metrics.StateStoreMBean; import org.apache.hadoop.hdfs.server.federation.metrics.StateStoreMBean;
import org.apache.hadoop.hdfs.server.federation.metrics.StateStoreMetrics; import org.apache.hadoop.hdfs.server.federation.metrics.StateStoreMetrics;
import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver; import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
import org.apache.hadoop.hdfs.server.federation.store.impl.MembershipStoreImpl; import org.apache.hadoop.hdfs.server.federation.store.impl.MembershipStoreImpl;
import org.apache.hadoop.hdfs.server.federation.store.impl.MountTableStoreImpl; import org.apache.hadoop.hdfs.server.federation.store.impl.MountTableStoreImpl;
@ -139,8 +139,8 @@ public class StateStoreService extends CompositeService {
// Create implementation of State Store // Create implementation of State Store
Class<? extends StateStoreDriver> driverClass = this.conf.getClass( Class<? extends StateStoreDriver> driverClass = this.conf.getClass(
DFSConfigKeys.FEDERATION_STORE_DRIVER_CLASS, RBFConfigKeys.FEDERATION_STORE_DRIVER_CLASS,
DFSConfigKeys.FEDERATION_STORE_DRIVER_CLASS_DEFAULT, RBFConfigKeys.FEDERATION_STORE_DRIVER_CLASS_DEFAULT,
StateStoreDriver.class); StateStoreDriver.class);
this.driver = ReflectionUtils.newInstance(driverClass, this.conf); this.driver = ReflectionUtils.newInstance(driverClass, this.conf);
@ -159,12 +159,12 @@ public class StateStoreService extends CompositeService {
// Set expirations intervals for each record // Set expirations intervals for each record
MembershipState.setExpirationMs(conf.getLong( MembershipState.setExpirationMs(conf.getLong(
DFSConfigKeys.FEDERATION_STORE_MEMBERSHIP_EXPIRATION_MS, RBFConfigKeys.FEDERATION_STORE_MEMBERSHIP_EXPIRATION_MS,
DFSConfigKeys.FEDERATION_STORE_MEMBERSHIP_EXPIRATION_MS_DEFAULT)); RBFConfigKeys.FEDERATION_STORE_MEMBERSHIP_EXPIRATION_MS_DEFAULT));
RouterState.setExpirationMs(conf.getTimeDuration( RouterState.setExpirationMs(conf.getTimeDuration(
DFSConfigKeys.FEDERATION_STORE_ROUTER_EXPIRATION_MS, RBFConfigKeys.FEDERATION_STORE_ROUTER_EXPIRATION_MS,
DFSConfigKeys.FEDERATION_STORE_ROUTER_EXPIRATION_MS_DEFAULT, RBFConfigKeys.FEDERATION_STORE_ROUTER_EXPIRATION_MS_DEFAULT,
TimeUnit.MILLISECONDS)); TimeUnit.MILLISECONDS));
// Cache update service // Cache update service

View File

@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.federation.store.driver;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord; import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord;
import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ReflectionUtils;
@ -61,8 +61,8 @@ public abstract class StateStoreSerializer {
private static StateStoreSerializer newSerializer(final Configuration conf) { private static StateStoreSerializer newSerializer(final Configuration conf) {
Class<? extends StateStoreSerializer> serializerName = conf.getClass( Class<? extends StateStoreSerializer> serializerName = conf.getClass(
DFSConfigKeys.FEDERATION_STORE_SERIALIZER_CLASS, RBFConfigKeys.FEDERATION_STORE_SERIALIZER_CLASS,
DFSConfigKeys.FEDERATION_STORE_SERIALIZER_CLASS_DEFAULT, RBFConfigKeys.FEDERATION_STORE_SERIALIZER_CLASS_DEFAULT,
StateStoreSerializer.class); StateStoreSerializer.class);
return ReflectionUtils.newInstance(serializerName, conf); return ReflectionUtils.newInstance(serializerName, conf);
} }

View File

@ -29,7 +29,7 @@ import java.nio.charset.StandardCharsets;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord; import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -46,7 +46,7 @@ public class StateStoreFileImpl extends StateStoreFileBaseImpl {
/** Configuration keys. */ /** Configuration keys. */
public static final String FEDERATION_STORE_FILE_DIRECTORY = public static final String FEDERATION_STORE_FILE_DIRECTORY =
DFSConfigKeys.FEDERATION_STORE_PREFIX + "driver.file.directory"; RBFConfigKeys.FEDERATION_STORE_PREFIX + "driver.file.directory";
/** Root directory for the state store. */ /** Root directory for the state store. */
private String rootDirectory; private String rootDirectory;

View File

@ -33,7 +33,7 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord; import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord;
import org.slf4j.Logger; import org.slf4j.Logger;
@ -51,7 +51,7 @@ public class StateStoreFileSystemImpl extends StateStoreFileBaseImpl {
/** Configuration keys. */ /** Configuration keys. */
public static final String FEDERATION_STORE_FS_PATH = public static final String FEDERATION_STORE_FS_PATH =
DFSConfigKeys.FEDERATION_STORE_PREFIX + "driver.fs.path"; RBFConfigKeys.FEDERATION_STORE_PREFIX + "driver.fs.path";
/** File system to back the State Store. */ /** File system to back the State Store. */
private FileSystem fs; private FileSystem fs;

View File

@ -29,7 +29,7 @@ import java.util.List;
import org.apache.curator.framework.CuratorFramework; import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.imps.CuratorFrameworkState; import org.apache.curator.framework.imps.CuratorFrameworkState;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver; import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord; import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord;
import org.apache.hadoop.hdfs.server.federation.store.records.Query; import org.apache.hadoop.hdfs.server.federation.store.records.Query;
@ -58,7 +58,7 @@ public class StateStoreZooKeeperImpl extends StateStoreSerializableImpl {
/** Configuration keys. */ /** Configuration keys. */
public static final String FEDERATION_STORE_ZK_DRIVER_PREFIX = public static final String FEDERATION_STORE_ZK_DRIVER_PREFIX =
DFSConfigKeys.FEDERATION_STORE_PREFIX + "driver.zk."; RBFConfigKeys.FEDERATION_STORE_PREFIX + "driver.zk.";
public static final String FEDERATION_STORE_ZK_PARENT_PATH = public static final String FEDERATION_STORE_ZK_PARENT_PATH =
FEDERATION_STORE_ZK_DRIVER_PREFIX + "parent-path"; FEDERATION_STORE_ZK_DRIVER_PREFIX + "parent-path";
public static final String FEDERATION_STORE_ZK_PARENT_PATH_DEFAULT = public static final String FEDERATION_STORE_ZK_PARENT_PATH_DEFAULT =

Some files were not shown because too many files have changed in this diff Show More