HADOOP-15821. Move YARN Registry to Hadoop Registry.

Contributed by Íñigo Goiri
This commit is contained in:
Eric Yang 2018-10-19 19:46:48 -04:00
parent 00254d7b8c
commit e2a9fa8448
116 changed files with 516 additions and 369 deletions

View File

@ -128,6 +128,7 @@ run cp -p "${ROOT}/README.txt" .
# Remaining projects will copy only libraries which are not present already in 'share' directory. # Remaining projects will copy only libraries which are not present already in 'share' directory.
run copy "${ROOT}/hadoop-common-project/hadoop-common/target/hadoop-common-${VERSION}" . run copy "${ROOT}/hadoop-common-project/hadoop-common/target/hadoop-common-${VERSION}" .
run copy "${ROOT}/hadoop-common-project/hadoop-nfs/target/hadoop-nfs-${VERSION}" . run copy "${ROOT}/hadoop-common-project/hadoop-nfs/target/hadoop-nfs-${VERSION}" .
run copy "${ROOT}/hadoop-common-project/hadoop-registry/target/hadoop-registry-${VERSION}" .
run copy "${ROOT}/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-hdfs-${VERSION}" . run copy "${ROOT}/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-hdfs-${VERSION}" .
run copy "${ROOT}/hadoop-hdfs-project/hadoop-hdfs-nfs/target/hadoop-hdfs-nfs-${VERSION}" . run copy "${ROOT}/hadoop-hdfs-project/hadoop-hdfs-nfs/target/hadoop-hdfs-nfs-${VERSION}" .
run copy "${ROOT}/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-${VERSION}" . run copy "${ROOT}/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-${VERSION}" .

View File

@ -0,0 +1,41 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
<id>hadoop-registry-dist</id>
<formats>
<format>dir</format>
</formats>
<includeBaseDirectory>false</includeBaseDirectory>
<fileSets>
<fileSet>
<directory>target</directory>
<outputDirectory>/share/hadoop/common</outputDirectory>
<includes>
<include>${project.artifactId}-${project.version}.jar</include>
</includes>
</fileSet>
</fileSets>
<dependencySets>
<dependencySet>
<useProjectArtifact>false</useProjectArtifact>
<outputDirectory>/share/hadoop/common/lib</outputDirectory>
</dependencySet>
</dependencySets>
</assembly>

View File

@ -477,7 +477,7 @@
</exclusion> </exclusion>
<exclusion> <exclusion>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-registry</artifactId> <artifactId>hadoop-registry</artifactId>
</exclusion> </exclusion>
<exclusion> <exclusion>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>

View File

@ -43,6 +43,7 @@ function hadoop_usage
hadoop_add_subcommand "jnipath" client "prints the java.library.path" hadoop_add_subcommand "jnipath" client "prints the java.library.path"
hadoop_add_subcommand "kerbname" client "show auth_to_local principal conversion" hadoop_add_subcommand "kerbname" client "show auth_to_local principal conversion"
hadoop_add_subcommand "key" client "manage keys via the KeyProvider" hadoop_add_subcommand "key" client "manage keys via the KeyProvider"
hadoop_add_subcommand "registrydns" daemon "run the registry DNS server"
hadoop_add_subcommand "trace" client "view and modify Hadoop tracing settings" hadoop_add_subcommand "trace" client "view and modify Hadoop tracing settings"
hadoop_add_subcommand "version" client "print the version" hadoop_add_subcommand "version" client "print the version"
hadoop_add_subcommand "kdiag" client "Diagnose Kerberos Problems" hadoop_add_subcommand "kdiag" client "Diagnose Kerberos Problems"
@ -155,6 +156,11 @@ function hadoopcmd_case
key) key)
HADOOP_CLASSNAME=org.apache.hadoop.crypto.key.KeyShell HADOOP_CLASSNAME=org.apache.hadoop.crypto.key.KeyShell
;; ;;
registrydns)
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
HADOOP_SECURE_CLASSNAME='org.apache.hadoop.registry.server.dns.PrivilegedRegistryDNSStarter'
HADOOP_CLASSNAME='org.apache.hadoop.registry.server.dns.RegistryDNSServer'
;;
trace) trace)
HADOOP_CLASSNAME=org.apache.hadoop.tracing.TraceAdmin HADOOP_CLASSNAME=org.apache.hadoop.tracing.TraceAdmin
;; ;;

View File

@ -437,3 +437,16 @@ esac
# #
# For example, to limit who can execute the namenode command, # For example, to limit who can execute the namenode command,
# export HDFS_NAMENODE_USER=hdfs # export HDFS_NAMENODE_USER=hdfs
###
# Registry DNS specific parameters
###
# For privileged registry DNS, user to run as after dropping privileges
# This will replace the hadoop.id.str Java property in secure mode.
# export HADOOP_REGISTRYDNS_SECURE_USER=yarn
# Supplemental options for privileged registry DNS
# By default, Hadoop uses jsvc which needs to know to launch a
# server jvm.
# export HADOOP_REGISTRYDNS_SECURE_EXTRA_OPTS="-jvm server"

View File

@ -19,7 +19,7 @@
# Introduction and concepts # Introduction and concepts
This document describes a YARN service registry built to address two problems: This document describes a Hadoop service registry built to address two problems:
1. How can clients talk to YARN-deployed services and the components which form 1. How can clients talk to YARN-deployed services and the components which form
such services? such services?

View File

@ -15,7 +15,7 @@
limitations under the License. limitations under the License.
--> -->
# YARN Service Registry # Hadoop Service Registry
The Service registry is a service which can be deployed in a Hadoop cluster The Service registry is a service which can be deployed in a Hadoop cluster
to allow deployed applications to register themselves and the means of to allow deployed applications to register themselves and the means of
@ -24,7 +24,8 @@ and use the binding information to connect with the services's network-accessibl
endpoints, be they REST, IPC, Web UI, Zookeeper quorum+path or some other protocol. endpoints, be they REST, IPC, Web UI, Zookeeper quorum+path or some other protocol.
Currently, all the registry data is stored in a zookeeper cluster. Currently, all the registry data is stored in a zookeeper cluster.
* [Architecture](yarn-registry.html) * [Architecture](hadoop-registry.html)
* [Configuration](registry-configuration.html) * [Configuration](registry-configuration.html)
* [Using the YARN Service registry](using-the-yarn-service-registry.html) * [Using the Hadoop Service registry](using-the-hadoop-service-registry.html)
* [Security](registry-security.html) * [Security](registry-security.html)
* [Registry DNS](registry-dns.html)

View File

@ -15,12 +15,12 @@
# Registry Configuration # Registry Configuration
The YARN service registry is built on top of Apache Zookeeper. The Hadoop service registry is built on top of Apache Zookeeper.
It is configured by way of a Hadoop `Configuration` class: It is configured by way of a Hadoop `Configuration` class:
the instance used to create the service controls the behavior of the client. the instance used to create the service controls the behavior of the client.
This document lists the configuration parameters which control the This document lists the configuration parameters which control the
registry client and its deployment in the YARN Resource Manager. registry client.
The default values of all these settings are defined in `core-default.xml`. The default values of all these settings are defined in `core-default.xml`.
The values in this file may not match those listed in this document. The values in this file may not match those listed in this document.

View File

@ -15,13 +15,13 @@
# Registry DNS Server # Registry DNS Server
<!-- MACRO{toc|fromDepth=0|toDepth=3} --> <!-- MACRO{toc|fromDepth=0|toDepth=3} -->
The document describes the internals of Registry DNS server. It is based on the [YARN service registry](../registry/index.html) which is backed by a zookeeper cluster. The document describes the internals of Registry DNS server. It is based on the [Hadoop service registry](../registry/index.html) which is backed by a zookeeper cluster.
## Introduction ## Introduction
The Registry DNS Server provides a standard DNS interface to the information posted into the YARN Registry by deployed applications. The DNS service serves the following functions: The Registry DNS Server provides a standard DNS interface to the information posted into the Hadoop Registry by deployed applications. The DNS service serves the following functions:
1. **Exposing existing service-discovery information via DNS** - Information provided in 1. **Exposing existing service-discovery information via DNS** - Information provided in
the current YARN service registrys records will be converted into DNS entries, thus the current Hadoop service registrys records will be converted into DNS entries, thus
allowing users to discover information about YARN applications using standard DNS allowing users to discover information about YARN applications using standard DNS
client mechanisms (e.g. a DNS SRV Record specifying the hostname and port client mechanisms (e.g. a DNS SRV Record specifying the hostname and port
number for services). number for services).
@ -32,7 +32,7 @@ http://solr-0.solr-service.devuser.yarncluster:8983/solr/admin/collections?actio
## Service Properties ## Service Properties
The existing YARN Service Registry is leveraged as the source of information for the DNS Service. The existing Hadoop Service Registry is leveraged as the source of information for the DNS Service.
The following core functions are supported by the DNS-Server: The following core functions are supported by the DNS-Server:
@ -139,21 +139,21 @@ RegistryDNS service configured as a forwarder).
By default, the DNS server runs on non-privileged port `5335`. Start the server By default, the DNS server runs on non-privileged port `5335`. Start the server
with: with:
``` ```
yarn --daemon start registrydns hadoop --daemon start registrydns
``` ```
If the DNS server is configured to use the standard privileged port `53`, the If the DNS server is configured to use the standard privileged port `53`, the
environment variables YARN\_REGISTRYDNS\_SECURE\_USER and environment variables HADOOP\_REGISTRYDNS\_SECURE\_USER and
YARN\_REGISTRYDNS\_SECURE\_EXTRA\_OPTS must be uncommented in the yarn-env.sh HADOOP\_REGISTRYDNS\_SECURE\_EXTRA\_OPTS must be uncommented in the hadoop-env.sh
file. The DNS server should then be launched as root and jsvc will be used to file. The DNS server should then be launched as root and jsvc will be used to
reduce the privileges of the daemon after the port has been bound. reduce the privileges of the daemon after the port has been bound.
## Configuration ## Configuration
The Registry DNS server reads its configuration properties from the yarn-site.xml file. The following are the DNS associated configuration properties: The Registry DNS server reads its configuration properties from the core-site.xml file. The following are the DNS associated configuration properties:
| Name | Description | | Name | Description |
| ------------ | ------------- | | ------------ | ------------- |
|hadoop.registry.zk.quorum| A comma separated list of hostname:port pairs defining the zookeeper quorum for the [YARN registry](../registry/registry-configuration.html). | |hadoop.registry.zk.quorum| A comma separated list of hostname:port pairs defining the zookeeper quorum for the [Hadoop registry](../registry/registry-configuration.html). |
| hadoop.registry.dns.enabled | The DNS functionality is enabled for the cluster. Default is false. | | hadoop.registry.dns.enabled | The DNS functionality is enabled for the cluster. Default is false. |
| hadoop.registry.dns.domain-name | The domain name for Hadoop cluster associated records. | | hadoop.registry.dns.domain-name | The domain name for Hadoop cluster associated records. |
| hadoop.registry.dns.bind-address | Address associated with the network interface to which the DNS listener should bind. | | hadoop.registry.dns.bind-address | Address associated with the network interface to which the DNS listener should bind. |
@ -193,8 +193,32 @@ The Registry DNS server reads its configuration properties from the yarn-site.xm
</property> </property>
<property> <property>
<description>A comma separated list of hostname:port pairs defining the zookeeper quorum for the YARN registry</description> <description>A comma separated list of hostname:port pairs defining the zookeeper quorum for the Hadoop registry</description>
<name>hadoop.registry.zk.quorum</name> <name>hadoop.registry.zk.quorum</name>
<value>localhost:2181</value> <value>localhost:2181</value>
</property> </property>
``` ```
To configure Registry DNS to serve reverse lookup for `172.17.0.0/24`
```
<property>
<description>The network mask associated with the zone IP range. If specified, it is utilized to ascertain the
IP range possible and come up with an appropriate reverse zone name.</description>
<name>hadoop.registry.dns.zone-mask</name>
<value>255.255.255.0</value>
</property>
<property>
<description>An indicator of the IP range associated with the cluster containers. The setting is utilized for the
generation of the reverse zone name.</description>
<name>hadoop.registry.dns.zone-subnet</name>
<value>172.17.0.0</value>
</property>
```
## Make your cluster use Registry DNS
You can edit the `/etc/resolv.conf` to make your system use the registry DNS such as below, where `192.168.154.3` is the ip address of your DNS host. It should appear before any nameservers that would return NXDOMAIN for lookups in the domain used by the cluster.
```
nameserver 192.168.154.3
```
Alternatively, if you have a corporate DNS in your organization, you can configure zone forwarding so that the Registry DNS resolves hostnames for the domain used by the cluster.

View File

@ -15,9 +15,9 @@
limitations under the License. limitations under the License.
--> -->
# Using the YARN Service Registry # Using the Hadoop Service Registry
The YARN service registry can be used in a numbe of ways :- The Hadoop service registry can be used in a number of ways :-
1. To register dynamic YARN-deployed applications with entries that match the 1. To register dynamic YARN-deployed applications with entries that match the
lifespan of the YARN application. lifespan of the YARN application.
@ -26,7 +26,7 @@ The YARN service registry can be used in a numbe of ways :-
or an individual container. or an individual container.
1. To look up static or dynamic applications and the mechanisms to communicate 1. To look up static or dynamic applications and the mechanisms to communicate
with them. with them.
Those mechanisms can incude: HTTP(S) URLs, Zookeeper paths, Those mechanisms can include: HTTP(S) URLs, Zookeeper paths,
hostnames and ports and even paths in a Hadoop filesystem to hostnames and ports and even paths in a Hadoop filesystem to
configuration data. configuration data.
1. On a secure cluster, to verify that a service binding has been published 1. On a secure cluster, to verify that a service binding has been published

View File

@ -0,0 +1,298 @@
<?xml version="1.0"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<artifactId>hadoop-project</artifactId>
<groupId>org.apache.hadoop</groupId>
<version>3.3.0-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>hadoop-registry</artifactId>
<version>3.3.0-SNAPSHOT</version>
<name>Apache Hadoop Registry</name>
<dependencies>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-auth</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-annotations</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
</dependency>
<!-- needed for TimedOutTestsListener -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<!-- Mini KDC is used for testing -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-minikdc</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-client</artifactId>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-framework</artifactId>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-recipes</artifactId>
</dependency>
<dependency>
<groupId>commons-cli</groupId>
<artifactId>commons-cli</artifactId>
</dependency>
<dependency>
<groupId>commons-daemon</groupId>
<artifactId>commons-daemon</artifactId>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
</dependency>
<dependency>
<groupId>commons-net</groupId>
<artifactId>commons-net</artifactId>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
</dependency>
<dependency>
<groupId>dnsjava</groupId>
<artifactId>dnsjava</artifactId>
</dependency>
</dependencies>
<build>
<!--
Include all files in src/main/resources. By default, do not apply property
substitution (filtering=false), but do apply property substitution to
yarn-version-info.properties (filtering=true). This will substitute the
version information correctly, but prevent Maven from altering other files
like yarn-default.xml.
-->
<resources>
<resource>
<directory>${basedir}/src/main/resources</directory>
<excludes>
<exclude>yarn-version-info.properties</exclude>
</excludes>
<filtering>false</filtering>
</resource>
<resource>
<directory>${basedir}/src/main/resources</directory>
<includes>
<include>yarn-version-info.properties</include>
</includes>
<filtering>true</filtering>
</resource>
</resources>
<plugins>
<plugin>
<groupId>org.apache.rat</groupId>
<artifactId>apache-rat-plugin</artifactId>
<configuration>
<excludes>
<exclude>src/main/resources/.keep</exclude>
</excludes>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-maven-plugins</artifactId>
<executions>
<execution>
<id>version-info</id>
<phase>generate-resources</phase>
<goals>
<goal>version-info</goal>
</goals>
<configuration>
<source>
<directory>${basedir}/src/main</directory>
<includes>
<include>java/**/*.java</include>
<!--
<include>proto/**/*.proto</include>
-->
</includes>
</source>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<artifactId>maven-jar-plugin</artifactId>
<executions>
<execution>
<goals>
<goal>test-jar</goal>
</goals>
<phase>test-compile</phase>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<reuseForks>false</reuseForks>
<forkedProcessTimeoutInSeconds>900</forkedProcessTimeoutInSeconds>
<argLine>-Xmx1024m -XX:+HeapDumpOnOutOfMemoryError</argLine>
<environmentVariables>
<!-- HADOOP_HOME required for tests on Windows to find winutils -->
<HADOOP_HOME>${hadoop.common.build.dir}</HADOOP_HOME>
<!-- configurable option to turn JAAS debugging on during test runs -->
<HADOOP_JAAS_DEBUG>true</HADOOP_JAAS_DEBUG>
<LD_LIBRARY_PATH>${env.LD_LIBRARY_PATH}:${project.build.directory}/native/target/usr/local/lib:${hadoop.common.build.dir}/native/target/usr/local/lib</LD_LIBRARY_PATH>
<MALLOC_ARENA_MAX>4</MALLOC_ARENA_MAX>
</environmentVariables>
<systemPropertyVariables>
<hadoop.log.dir>${project.build.directory}/log</hadoop.log.dir>
<hadoop.tmp.dir>${project.build.directory}/tmp</hadoop.tmp.dir>
<!-- TODO: all references in testcases should be updated to this default -->
<test.build.dir>${test.build.dir}</test.build.dir>
<test.build.data>${test.build.data}</test.build.data>
<test.build.webapps>${test.build.webapps}</test.build.webapps>
<test.cache.data>${test.cache.data}</test.cache.data>
<test.build.classes>${test.build.classes}</test.build.classes>
<java.net.preferIPv4Stack>true</java.net.preferIPv4Stack>
<java.security.krb5.conf>${project.build.directory}/test-classes/krb5.conf</java.security.krb5.conf>
<java.security.egd>${java.security.egd}</java.security.egd>
<require.test.libhadoop>${require.test.libhadoop}</require.test.libhadoop>
</systemPropertyVariables>
<includes>
<include>**/Test*.java</include>
</includes>
<excludes>
<exclude>**/${test.exclude}.java</exclude>
<exclude>${test.exclude.pattern}</exclude>
<exclude>**/Test*$*.java</exclude>
</excludes>
</configuration>
</plugin>
</plugins>
</build>
<profiles>
<profile>
<id>dist</id>
<activation>
<activeByDefault>false</activeByDefault>
</activation>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-assemblies</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
<executions>
<execution>
<id>dist</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
<configuration>
<finalName>${project.artifactId}-${project.version}
</finalName>
<appendAssemblyId>false</appendAssemblyId>
<attach>false</attach>
<descriptors>
<descriptor>../../hadoop-assemblies/src/main/resources/assemblies/hadoop-registry-dist.xml</descriptor>
</descriptors>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
</profiles>
</project>

View File

@ -31,8 +31,6 @@ public interface RegistryConstants {
/** /**
* prefix for registry configuration options: {@value}. * prefix for registry configuration options: {@value}.
* Why <code>hadoop.</code> and not YARN? It can
* live outside YARN
*/ */
String REGISTRY_PREFIX = "hadoop.registry."; String REGISTRY_PREFIX = "hadoop.registry.";
@ -177,7 +175,7 @@ public interface RegistryConstants {
String KEY_REGISTRY_ZK_ROOT = ZK_PREFIX + "root"; String KEY_REGISTRY_ZK_ROOT = ZK_PREFIX + "root";
/** /**
* Default root of the yarn registry: {@value}. * Default root of the Hadoop registry: {@value}.
*/ */
String DEFAULT_ZK_REGISTRY_ROOT = "/registry"; String DEFAULT_ZK_REGISTRY_ROOT = "/registry";

View File

@ -44,7 +44,6 @@
import org.apache.hadoop.registry.client.types.RegistryPathStatus; import org.apache.hadoop.registry.client.types.RegistryPathStatus;
import org.apache.hadoop.registry.client.types.ServiceRecord; import org.apache.hadoop.registry.client.types.ServiceRecord;
import org.apache.hadoop.service.CompositeService; import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -81,7 +80,7 @@ protected void serviceInit(Configuration conf) {
+ fs.getClass().getCanonicalName()); + fs.getClass().getCanonicalName());
} catch (IOException e) { } catch (IOException e) {
LOG.error("Failed to get FileSystem for registry", e); LOG.error("Failed to get FileSystem for registry", e);
throw new YarnRuntimeException(e); throw new RuntimeException(e);
} }
} }

View File

@ -0,0 +1,41 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.conf;
import org.apache.hadoop.conf.Configuration;
/**
* Intermediate configuration class to import the keys from YarnConfiguration
* in yarn-default.xml and yarn-site.xml. Once hadoop-yarn-registry is totally
* deprecated, this should be deprecated.
*/
public class RegistryConfiguration extends Configuration {
static {
Configuration.addDefaultResource("yarn-default.xml");
Configuration.addDefaultResource("yarn-site.xml");
}
/**
* Default constructor which relies on the static method to import the YARN
* settings.
*/
public RegistryConfiguration() {
super();
}
}

View File

@ -0,0 +1,22 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Configuration for the Hadoop Service Registry.
*/
package org.apache.hadoop.registry.conf;

View File

@ -19,10 +19,11 @@
import org.apache.commons.daemon.Daemon; import org.apache.commons.daemon.Daemon;
import org.apache.commons.daemon.DaemonContext; import org.apache.commons.daemon.DaemonContext;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.registry.client.api.DNSOperationsFactory; import org.apache.hadoop.registry.client.api.DNSOperationsFactory;
import org.apache.hadoop.registry.conf.RegistryConfiguration;
import org.apache.hadoop.util.GenericOptionsParser; import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -37,7 +38,7 @@ public class PrivilegedRegistryDNSStarter implements Daemon {
private static final Logger LOG = private static final Logger LOG =
LoggerFactory.getLogger(PrivilegedRegistryDNSStarter.class); LoggerFactory.getLogger(PrivilegedRegistryDNSStarter.class);
private YarnConfiguration conf; private Configuration conf;
private RegistryDNS registryDNS; private RegistryDNS registryDNS;
private RegistryDNSServer registryDNSServer; private RegistryDNSServer registryDNSServer;
@ -45,7 +46,7 @@ public class PrivilegedRegistryDNSStarter implements Daemon {
public void init(DaemonContext context) throws Exception { public void init(DaemonContext context) throws Exception {
String[] args = context.getArguments(); String[] args = context.getArguments();
StringUtils.startupShutdownMessage(RegistryDNSServer.class, args, LOG); StringUtils.startupShutdownMessage(RegistryDNSServer.class, args, LOG);
conf = new YarnConfiguration(); conf = new RegistryConfiguration();
new GenericOptionsParser(conf, args); new GenericOptionsParser(conf, args);
int port = conf.getInt(KEY_DNS_PORT, DEFAULT_DNS_PORT); int port = conf.getInt(KEY_DNS_PORT, DEFAULT_DNS_PORT);

View File

@ -412,7 +412,7 @@ private void initializeReverseLookupZone(Configuration conf)
Boolean shouldSplitReverseZone = conf.getBoolean(KEY_DNS_SPLIT_REVERSE_ZONE, Boolean shouldSplitReverseZone = conf.getBoolean(KEY_DNS_SPLIT_REVERSE_ZONE,
DEFAULT_DNS_SPLIT_REVERSE_ZONE); DEFAULT_DNS_SPLIT_REVERSE_ZONE);
if (shouldSplitReverseZone) { if (shouldSplitReverseZone) {
int subnetCount = ReverseZoneUtils.getSubnetCountForReverseZones(conf); long subnetCount = ReverseZoneUtils.getSubnetCountForReverseZones(conf);
addSplitReverseZones(conf, subnetCount); addSplitReverseZones(conf, subnetCount);
// Single reverse zone // Single reverse zone
} else { } else {
@ -434,7 +434,7 @@ private void initializeReverseLookupZone(Configuration conf)
* @throws IOException if the DNSSEC key can not be read. * @throws IOException if the DNSSEC key can not be read.
*/ */
@VisibleForTesting @VisibleForTesting
protected void addSplitReverseZones(Configuration conf, int subnetCount) protected void addSplitReverseZones(Configuration conf, long subnetCount)
throws IOException { throws IOException {
String subnet = conf.get(KEY_DNS_ZONE_SUBNET); String subnet = conf.get(KEY_DNS_ZONE_SUBNET);
String range = conf.get(KEY_DNS_SPLIT_REVERSE_ZONE_RANGE); String range = conf.get(KEY_DNS_SPLIT_REVERSE_ZONE_RANGE);
@ -513,7 +513,7 @@ private Name getReverseZoneName(SubnetUtils utils, String networkAddress) {
Name reverseZoneName = null; Name reverseZoneName = null;
boolean isLargeNetwork = false; boolean isLargeNetwork = false;
if (utils != null) { if (utils != null) {
isLargeNetwork = utils.getInfo().getAddressCount() > 256; isLargeNetwork = utils.getInfo().getAddressCountLong() > 256;
} }
final String[] bytes = networkAddress.split("\\."); final String[] bytes = networkAddress.split("\\.");
if (bytes.length == 4) { if (bytes.length == 4) {

View File

@ -27,13 +27,13 @@
import org.apache.hadoop.registry.client.impl.zk.RegistryOperationsService; import org.apache.hadoop.registry.client.impl.zk.RegistryOperationsService;
import org.apache.hadoop.registry.client.types.RegistryPathStatus; import org.apache.hadoop.registry.client.types.RegistryPathStatus;
import org.apache.hadoop.registry.client.types.ServiceRecord; import org.apache.hadoop.registry.client.types.ServiceRecord;
import org.apache.hadoop.registry.conf.RegistryConfiguration;
import org.apache.hadoop.service.CompositeService; import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.service.launcher.HadoopUncaughtExceptionHandler;
import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.GenericOptionsParser; import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -237,8 +237,8 @@ static RegistryDNSServer launchDNSServer(Configuration conf,
RegistryDNS rdns) { RegistryDNS rdns) {
RegistryDNSServer dnsServer = null; RegistryDNSServer dnsServer = null;
Thread Thread.setDefaultUncaughtExceptionHandler(
.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler()); new HadoopUncaughtExceptionHandler());
try { try {
dnsServer = new RegistryDNSServer("RegistryDNSServer", rdns); dnsServer = new RegistryDNSServer("RegistryDNSServer", rdns);
ShutdownHookManager.get().addShutdownHook( ShutdownHookManager.get().addShutdownHook(
@ -260,7 +260,7 @@ static RegistryDNSServer launchDNSServer(Configuration conf,
*/ */
public static void main(String[] args) throws IOException { public static void main(String[] args) throws IOException {
StringUtils.startupShutdownMessage(RegistryDNSServer.class, args, LOG); StringUtils.startupShutdownMessage(RegistryDNSServer.class, args, LOG);
YarnConfiguration conf = new YarnConfiguration(); Configuration conf = new RegistryConfiguration();
new GenericOptionsParser(conf, args); new GenericOptionsParser(conf, args);
launchDNSServer(conf, null); launchDNSServer(conf, null);
} }

View File

@ -76,7 +76,7 @@ protected static String getReverseZoneNetworkAddress(String baseIp, int range,
* @param conf the Hadoop configuration. * @param conf the Hadoop configuration.
* @return The number of subnets given the range and netmask. * @return The number of subnets given the range and netmask.
*/ */
protected static int getSubnetCountForReverseZones(Configuration conf) { protected static long getSubnetCountForReverseZones(Configuration conf) {
String subnet = conf.get(KEY_DNS_ZONE_SUBNET); String subnet = conf.get(KEY_DNS_ZONE_SUBNET);
String mask = conf.get(KEY_DNS_ZONE_MASK); String mask = conf.get(KEY_DNS_ZONE_MASK);
String range = conf.get(KEY_DNS_SPLIT_REVERSE_ZONE_RANGE); String range = conf.get(KEY_DNS_SPLIT_REVERSE_ZONE_RANGE);
@ -96,11 +96,11 @@ protected static int getSubnetCountForReverseZones(Configuration conf) {
throw new IllegalArgumentException(msg); throw new IllegalArgumentException(msg);
} }
int ipCount; long ipCount;
try { try {
SubnetUtils subnetUtils = new SubnetUtils(subnet, mask); SubnetUtils subnetUtils = new SubnetUtils(subnet, mask);
subnetUtils.setInclusiveHostCount(true); subnetUtils.setInclusiveHostCount(true);
ipCount = subnetUtils.getInfo().getAddressCount(); ipCount = subnetUtils.getInfo().getAddressCountLong();
} catch (IllegalArgumentException e) { } catch (IllegalArgumentException e) {
LOG.error("The subnet or mask is invalid: Subnet: {} Mask: {}", subnet, LOG.error("The subnet or mask is invalid: Subnet: {} Mask: {}", subnet,

View File

@ -1,4 +1,4 @@
---------------------------- MODULE yarnregistry ---------------------------- ---------------------------- MODULE hadoopregistry ----------------------------
EXTENDS FiniteSets, Sequences, Naturals, TLC EXTENDS FiniteSets, Sequences, Naturals, TLC
@ -27,7 +27,7 @@ EXTENDS FiniteSets, Sequences, Naturals, TLC
============================================================================ ============================================================================
This defines the YARN registry in terms of operations on sets of records. This defines the Hadoop registry in terms of operations on sets of records.
Every registry entry is represented as a record containing both the path and the data. Every registry entry is represented as a record containing both the path and the data.

View File

@ -21,8 +21,8 @@
import org.apache.commons.io.FileUtils; import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.Service; import org.apache.hadoop.service.Service;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.registry.client.api.RegistryConstants; import org.apache.hadoop.registry.client.api.RegistryConstants;
import org.apache.hadoop.registry.conf.RegistryConfiguration;
import org.apache.hadoop.registry.server.services.AddingCompositeService; import org.apache.hadoop.registry.server.services.AddingCompositeService;
import org.apache.hadoop.registry.server.services.MicroZookeeperService; import org.apache.hadoop.registry.server.services.MicroZookeeperService;
import org.apache.hadoop.registry.server.services.MicroZookeeperServiceKeys; import org.apache.hadoop.registry.server.services.MicroZookeeperServiceKeys;
@ -76,7 +76,7 @@ public static void createZKServer() throws Exception {
FileUtils.deleteDirectory(zkDir); FileUtils.deleteDirectory(zkDir);
assertTrue(zkDir.mkdirs()); assertTrue(zkDir.mkdirs());
zookeeper = new MicroZookeeperService("InMemoryZKService"); zookeeper = new MicroZookeeperService("InMemoryZKService");
YarnConfiguration conf = new YarnConfiguration(); Configuration conf = new RegistryConfiguration();
conf.set(MicroZookeeperServiceKeys.KEY_ZKSERVICE_DIR, zkDir.getAbsolutePath()); conf.set(MicroZookeeperServiceKeys.KEY_ZKSERVICE_DIR, zkDir.getAbsolutePath());
zookeeper.init(conf); zookeeper.init(conf);
zookeeper.start(); zookeeper.start();
@ -100,8 +100,8 @@ public String getConnectString() {
return zookeeper.getConnectionString(); return zookeeper.getConnectionString();
} }
public YarnConfiguration createRegistryConfiguration() { public Configuration createRegistryConfiguration() {
YarnConfiguration conf = new YarnConfiguration(); Configuration conf = new RegistryConfiguration();
conf.setInt(RegistryConstants.KEY_REGISTRY_ZK_CONNECTION_TIMEOUT, 1000); conf.setInt(RegistryConstants.KEY_REGISTRY_ZK_CONNECTION_TIMEOUT, 1000);
conf.setInt(RegistryConstants.KEY_REGISTRY_ZK_RETRY_INTERVAL, 500); conf.setInt(RegistryConstants.KEY_REGISTRY_ZK_RETRY_INTERVAL, 500);
conf.setInt(RegistryConstants.KEY_REGISTRY_ZK_RETRY_TIMES, 10); conf.setInt(RegistryConstants.KEY_REGISTRY_ZK_RETRY_TIMES, 10);

View File

@ -19,7 +19,8 @@
package org.apache.hadoop.registry.client.impl; package org.apache.hadoop.registry.client.impl;
import org.apache.hadoop.service.ServiceOperations; import org.apache.hadoop.service.ServiceOperations;
import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.registry.conf.RegistryConfiguration;
import org.apache.hadoop.registry.server.services.MicroZookeeperService; import org.apache.hadoop.registry.server.services.MicroZookeeperService;
import org.junit.After; import org.junit.After;
import org.junit.Assert; import org.junit.Assert;
@ -50,7 +51,7 @@ public void destroyZKServer() throws IOException {
@Test @Test
public void testTempDirSupport() throws Throwable { public void testTempDirSupport() throws Throwable {
YarnConfiguration conf = new YarnConfiguration(); Configuration conf = new RegistryConfiguration();
zookeeper = new MicroZookeeperService("t1"); zookeeper = new MicroZookeeperService("t1");
zookeeper.init(conf); zookeeper.init(conf);
zookeeper.start(); zookeeper.start();

View File

@ -49,6 +49,7 @@
import java.io.File; import java.io.File;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.nio.charset.Charset;
import java.security.Principal; import java.security.Principal;
import java.util.HashSet; import java.util.HashSet;
import java.util.Properties; import java.util.Properties;
@ -219,7 +220,7 @@ public static void setupKDCAndPrincipals() throws Exception {
BOB_LOCALHOST, keytab_bob)); BOB_LOCALHOST, keytab_bob));
jaasFile = new File(kdcWorkDir, "jaas.txt"); jaasFile = new File(kdcWorkDir, "jaas.txt");
FileUtils.write(jaasFile, jaas.toString()); FileUtils.write(jaasFile, jaas.toString(), Charset.defaultCharset());
LOG.info("\n"+ jaas); LOG.info("\n"+ jaas);
RegistrySecurity.bindJVMtoJAASFile(jaasFile); RegistrySecurity.bindJVMtoJAASFile(jaasFile);
} }

View File

@ -20,6 +20,7 @@
import java.io.File; import java.io.File;
import java.lang.reflect.Constructor; import java.lang.reflect.Constructor;
import java.lang.reflect.Method; import java.lang.reflect.Method;
import java.nio.charset.Charset;
import java.security.Principal; import java.security.Principal;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
import java.util.HashMap; import java.util.HashMap;
@ -88,7 +89,8 @@ public void testClientLogin() throws Throwable {
logLoginDetails(ALICE_LOCALHOST, client); logLoginDetails(ALICE_LOCALHOST, client);
String confFilename = System.getProperty(Environment.JAAS_CONF_KEY); String confFilename = System.getProperty(Environment.JAAS_CONF_KEY);
assertNotNull("Unset: "+ Environment.JAAS_CONF_KEY, confFilename); assertNotNull("Unset: "+ Environment.JAAS_CONF_KEY, confFilename);
String config = FileUtils.readFileToString(new File(confFilename)); String config = FileUtils.readFileToString(new File(confFilename),
Charset.defaultCharset());
LOG.info("{}=\n{}", confFilename, config); LOG.info("{}=\n{}", confFilename, config);
RegistrySecurity.setZKSaslClientProperties(ALICE, ALICE_CLIENT_CONTEXT); RegistrySecurity.setZKSaslClientProperties(ALICE, ALICE_CLIENT_CONTEXT);
} finally { } finally {
@ -127,7 +129,8 @@ public LoginContext createLoginContextZookeeperLocalhost() throws
@Test @Test
public void testKerberosAuth() throws Throwable { public void testKerberosAuth() throws Throwable {
File krb5conf = getKdc().getKrb5conf(); File krb5conf = getKdc().getKrb5conf();
String krbConfig = FileUtils.readFileToString(krb5conf); String krbConfig = FileUtils.readFileToString(krb5conf,
Charset.defaultCharset());
LOG.info("krb5.conf at {}:\n{}", krb5conf, krbConfig); LOG.info("krb5.conf at {}:\n{}", krb5conf, krbConfig);
Subject subject = new Subject(); Subject subject = new Subject();
Class<?> kerb5LoginClass = Class<?> kerb5LoginClass =

Some files were not shown because too many files have changed in this diff Show More