Compare commits
68 Commits
trunk
...
branch-3.3
Author | SHA1 | Date |
---|---|---|
Brahma Reddy Battula | aa96f1871b | |
Brahma Reddy Battula | b064f09bd6 | |
Akira Ajisaka | a883752df1 | |
Vinayakumar B | 8382e31c0c | |
Eric Yang | 2c70c0f74c | |
Vinayakumar B | e6887bd0e2 | |
Wilfred Spiegelenburg | 542c478a8c | |
Eric Yang | 54bd2424f7 | |
Chen Liang | 940a422525 | |
Surendra Singh Lilhore | 02d04e5b0f | |
Ayush Saxena | 69eeb673d8 | |
Akira Ajisaka | 0263e61076 | |
Akira Ajisaka | 140bb86d97 | |
Wei-Chiu Chuang | 3d8e392eb4 | |
Szilard Nemeth | adfb68cb22 | |
Konstantin V Shvachko | 89f1095454 | |
Szilard Nemeth | 7e632d5470 | |
Eric Yang | 3bf7cd0030 | |
Ayush Saxena | 80fec40499 | |
Akira Ajisaka | 855e9acc8a | |
Aryan Gupta | 1272418f35 | |
Szilard Nemeth | 5f2e0d5729 | |
Akira Ajisaka | 472385e4e6 | |
Szilard Nemeth | 7a3f190d89 | |
Szilard Nemeth | 61ca459c74 | |
Akira Ajisaka | 497c7a1680 | |
Mehakmeet Singh | 2471ba8b5c | |
Sneha Vijayarajan | 32fb174da2 | |
Szilard Nemeth | 1340518cd8 | |
Mingliang Liu | 47b330dc34 | |
Mingliang Liu | 5b92d73a74 | |
bilaharith | f53ded6185 | |
Mukund Thakur | 98fdbb820e | |
Mehakmeet Singh | f74a571fdf | |
Akira Ajisaka | 54a64e542e | |
Akira Ajisaka | 5459dd64fd | |
Steve Loughran | 0982f56f3a | |
Steve Loughran | de9a6b4588 | |
Sammi Chen | 9c81b17153 | |
Surendra Singh Lilhore | a6c718fd0f | |
Masatake Iwasaki | de5d43300a | |
Jonathan Hung | 49ae9b2137 | |
Mukund Thakur | 96d7ceb39a | |
Stephen O'Donnell | aaad947c74 | |
Vinayakumar B | b4ba9bed7c | |
Mukund Thakur | 94da630cd2 | |
Szilard Nemeth | 4bd37f2283 | |
Szilard Nemeth | f473473355 | |
sunlisheng | 7aad965198 | |
bilaharith | 6bae8c46a8 | |
Steve Loughran | 68a9562848 | |
Eric E Payne | b397a3a875 | |
Jonathan Hung | e1dd78143b | |
Steve Loughran | e4331a73c9 | |
Wei-Chiu Chuang | 749a5b81da | |
Szilard Nemeth | 061a7a8a41 | |
Sunil G | 94df6b513b | |
Szilard Nemeth | ef01e9d40e | |
Akira Ajisaka | e8520fd132 | |
Wilfred Spiegelenburg | 618a9208f7 | |
Szilard Nemeth | d293e120eb | |
Szilard Nemeth | 7abc6221a3 | |
Akira Ajisaka | cb806221e9 | |
Akira Ajisaka | f537a51288 | |
Masatake Iwasaki | 0a90df76bc | |
Akira Ajisaka | d501c6a43d | |
Wanqiang Ji | 215a62e99f | |
Akira Ajisaka | a2f4dcb470 |
|
@ -35,7 +35,7 @@ pipeline {
|
|||
DOCKERFILE = "${SOURCEDIR}/dev-support/docker/Dockerfile"
|
||||
YETUS='yetus'
|
||||
// Branch or tag name. Yetus release tags are 'rel/X.Y.Z'
|
||||
YETUS_VERSION='rel/0.11.1'
|
||||
YETUS_VERSION='rel/0.12.0'
|
||||
}
|
||||
|
||||
parameters {
|
||||
|
@ -61,7 +61,7 @@ pipeline {
|
|||
steps {
|
||||
withCredentials(
|
||||
[usernamePassword(credentialsId: 'apache-hadoop-at-github.com',
|
||||
passwordVariable: 'GITHUB_PASSWORD',
|
||||
passwordVariable: 'GITHUB_TOKEN',
|
||||
usernameVariable: 'GITHUB_USER'),
|
||||
usernamePassword(credentialsId: 'hadoopqa-at-asf-jira',
|
||||
passwordVariable: 'JIRA_PASSWORD',
|
||||
|
@ -105,8 +105,7 @@ pipeline {
|
|||
YETUS_ARGS+=("--html-report-file=${WORKSPACE}/${PATCHDIR}/report.html")
|
||||
|
||||
# enable writing back to Github
|
||||
YETUS_ARGS+=(--github-password="${GITHUB_PASSWORD}")
|
||||
YETUS_ARGS+=(--github-user=${GITHUB_USER})
|
||||
YETUS_ARGS+=(--github-token="${GITHUB_TOKEN}")
|
||||
|
||||
# enable writing back to ASF JIRA
|
||||
YETUS_ARGS+=(--jira-password="${JIRA_PASSWORD}")
|
||||
|
|
|
@ -651,10 +651,12 @@ function signartifacts
|
|||
|
||||
big_console_header "Signing the release"
|
||||
|
||||
for i in ${ARTIFACTS_DIR}/*; do
|
||||
run cd "${ARTIFACTS_DIR}"
|
||||
for i in *; do
|
||||
${GPG} --use-agent --armor --output "${i}.asc" --detach-sig "${i}"
|
||||
sha512sum --tag "${i}" > "${i}.sha512"
|
||||
done
|
||||
run cd "${BASEDIR}"
|
||||
|
||||
if [[ "${ASFRELEASE}" = true ]]; then
|
||||
echo "Fetching the Apache Hadoop KEYS file..."
|
||||
|
|
|
@ -166,7 +166,9 @@ RUN apt-get -q update \
|
|||
# Install pylint at fixed version (2.0.0 removed python2 support)
|
||||
# https://github.com/PyCQA/pylint/issues/2294
|
||||
####
|
||||
RUN pip2 install pylint==1.9.2
|
||||
RUN pip2 install \
|
||||
configparser==4.0.2 \
|
||||
pylint==1.9.2
|
||||
|
||||
####
|
||||
# Install dateutil.parser
|
||||
|
|
|
@ -174,7 +174,9 @@ RUN apt-get -q update \
|
|||
# Install pylint at fixed version (2.0.0 removed python2 support)
|
||||
# https://github.com/PyCQA/pylint/issues/2294
|
||||
####
|
||||
RUN pip2 install pylint==1.9.2
|
||||
RUN pip2 install \
|
||||
configparser==4.0.2 \
|
||||
pylint==1.9.2
|
||||
|
||||
####
|
||||
# Install dateutil.parser
|
||||
|
|
|
@ -23,11 +23,11 @@
|
|||
<parent>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-project</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<relativePath>../hadoop-project</relativePath>
|
||||
</parent>
|
||||
<artifactId>hadoop-assemblies</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<name>Apache Hadoop Assemblies</name>
|
||||
<description>Apache Hadoop Assemblies</description>
|
||||
|
||||
|
|
|
@ -56,6 +56,7 @@
|
|||
<exclude>**/build/**</exclude>
|
||||
<exclude>**/file:/**</exclude>
|
||||
<exclude>**/SecurityAuth.audit*</exclude>
|
||||
<exclude>patchprocess/**</exclude>
|
||||
</excludes>
|
||||
</fileSet>
|
||||
</fileSets>
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
<parent>
|
||||
<artifactId>hadoop-main</artifactId>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<artifactId>hadoop-build-tools</artifactId>
|
||||
|
|
|
@ -18,11 +18,11 @@
|
|||
<parent>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-project</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<relativePath>../../hadoop-project</relativePath>
|
||||
</parent>
|
||||
<artifactId>hadoop-client-api</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<description>Apache Hadoop Client</description>
|
||||
|
|
|
@ -18,11 +18,11 @@
|
|||
<parent>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-project</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<relativePath>../../hadoop-project</relativePath>
|
||||
</parent>
|
||||
<artifactId>hadoop-client-check-invariants</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
<description>
|
||||
|
|
|
@ -18,11 +18,11 @@
|
|||
<parent>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-project</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<relativePath>../../hadoop-project</relativePath>
|
||||
</parent>
|
||||
<artifactId>hadoop-client-check-test-invariants</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
<description>
|
||||
|
|
|
@ -18,11 +18,11 @@
|
|||
<parent>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-project</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<relativePath>../../hadoop-project</relativePath>
|
||||
</parent>
|
||||
<artifactId>hadoop-client-integration-tests</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
|
||||
<description>Checks that we can use the generated artifacts</description>
|
||||
<name>Apache Hadoop Client Packaging Integration Tests</name>
|
||||
|
|
|
@ -18,11 +18,11 @@
|
|||
<parent>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-project</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<relativePath>../../hadoop-project</relativePath>
|
||||
</parent>
|
||||
<artifactId>hadoop-client-minicluster</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<description>Apache Hadoop Minicluster for Clients</description>
|
||||
|
@ -811,15 +811,25 @@
|
|||
<exclude>*/**</exclude>
|
||||
</excludes>
|
||||
</filter>
|
||||
<!-- Jetty 9.4.x: jetty-client and jetty-xml are depended by org.eclipse.jetty.websocket:websocket-client.
|
||||
But we are only excluding jetty-client not jetty-xml because HttpServer2 implicitly uses the shaded package name.
|
||||
-->
|
||||
<!-- Jetty 9.4.x: jetty-client and jetty-xml are depended by org.eclipse.jetty.websocket:websocket-client.-->
|
||||
<filter>
|
||||
<artifact>org.eclipse.jetty:jetty-client</artifact>
|
||||
<excludes>
|
||||
<exclude>*/**</exclude>
|
||||
</excludes>
|
||||
</filter>
|
||||
<filter>
|
||||
<artifact>org.eclipse.jetty:jetty-xml</artifact>
|
||||
<excludes>
|
||||
<exclude>*/**</exclude>
|
||||
</excludes>
|
||||
</filter>
|
||||
<filter>
|
||||
<artifact>org.eclipse.jetty:jetty-http</artifact>
|
||||
<excludes>
|
||||
<exclude>*/**</exclude>
|
||||
</excludes>
|
||||
</filter>
|
||||
</filters>
|
||||
|
||||
<!-- relocate classes from mssql-jdbc -->
|
||||
|
|
|
@ -18,11 +18,11 @@
|
|||
<parent>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-project</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<relativePath>../../hadoop-project</relativePath>
|
||||
</parent>
|
||||
<artifactId>hadoop-client-runtime</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<description>Apache Hadoop Client</description>
|
||||
|
@ -158,12 +158,8 @@
|
|||
<!-- the jdk ships part of the javax.annotation namespace, so if we want to relocate this we'll have to care it out by class :( -->
|
||||
<exclude>com.google.code.findbugs:jsr305</exclude>
|
||||
<exclude>io.dropwizard.metrics:metrics-core</exclude>
|
||||
<exclude>org.eclipse.jetty.websocket:*</exclude>
|
||||
<exclude>org.eclipse.jetty:jetty-servlet</exclude>
|
||||
<exclude>org.eclipse.jetty:jetty-security</exclude>
|
||||
<exclude>org.eclipse.jetty:jetty-client</exclude>
|
||||
<exclude>org.eclipse.jetty:jetty-http</exclude>
|
||||
<exclude>org.eclipse.jetty:jetty-xml</exclude>
|
||||
<exclude>org.ow2.asm:*</exclude>
|
||||
<!-- Leave bouncycastle unshaded because it's signed with a special Oracle certificate so it can be a custom JCE security provider -->
|
||||
<exclude>org.bouncycastle:*</exclude>
|
||||
|
@ -213,6 +209,13 @@
|
|||
<exclude>about.html</exclude>
|
||||
</excludes>
|
||||
</filter>
|
||||
<filter>
|
||||
<!-- skip jetty license info already incorporated into LICENSE/NOTICE -->
|
||||
<artifact>org.eclipse.jetty.websocket:*</artifact>
|
||||
<excludes>
|
||||
<exclude>about.html</exclude>
|
||||
</excludes>
|
||||
</filter>
|
||||
<filter>
|
||||
<!-- skip docs on formats used in kerby -->
|
||||
<artifact>org.apache.kerby:kerb-util</artifact>
|
||||
|
|
|
@ -18,11 +18,11 @@
|
|||
<parent>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-project-dist</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<relativePath>../../hadoop-project-dist</relativePath>
|
||||
</parent>
|
||||
<artifactId>hadoop-client</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
|
||||
<description>Apache Hadoop Client aggregation pom with dependencies exposed</description>
|
||||
<name>Apache Hadoop Client Aggregator</name>
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
<parent>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-project</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<relativePath>../hadoop-project</relativePath>
|
||||
</parent>
|
||||
<artifactId>hadoop-client-modules</artifactId>
|
||||
|
|
|
@ -18,11 +18,11 @@
|
|||
<parent>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-project</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<relativePath>../../hadoop-project</relativePath>
|
||||
</parent>
|
||||
<artifactId>hadoop-cloud-storage</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<description>Apache Hadoop Cloud Storage</description>
|
||||
|
@ -128,5 +128,10 @@
|
|||
<artifactId>hadoop-openstack</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-cos</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
||||
|
|
|
@ -15,4 +15,9 @@
|
|||
limitations under the License.
|
||||
-->
|
||||
<FindBugsFilter>
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.fs.cosn.CosNInputStream.ReadBuffer"/>
|
||||
<Method name="getBuffer"/>
|
||||
<Bug pattern="EI_EXPOSE_REP"/>h_LIB
|
||||
</Match>
|
||||
</FindBugsFilter>
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
<parent>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-project</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<relativePath>../../hadoop-project</relativePath>
|
||||
</parent>
|
||||
<artifactId>hadoop-cos</artifactId>
|
||||
|
@ -81,6 +81,22 @@
|
|||
<forkedProcessTimeoutInSeconds>3600</forkedProcessTimeoutInSeconds>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-dependency-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>deplist</id>
|
||||
<phase>compile</phase>
|
||||
<goals>
|
||||
<goal>list</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<outputFile>${project.basedir}/target/hadoop-cloud-storage-deps/${project.artifactId}.cloud-storage-optional.txt</outputFile>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
|
@ -93,8 +109,8 @@
|
|||
|
||||
<dependency>
|
||||
<groupId>com.qcloud</groupId>
|
||||
<artifactId>cos_api</artifactId>
|
||||
<version>5.4.9</version>
|
||||
<artifactId>cos_api-bundle</artifactId>
|
||||
<version>5.6.19</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
|
||||
|
|
|
@ -63,7 +63,6 @@ public final class BufferPool {
|
|||
|
||||
private File createDir(String dirPath) throws IOException {
|
||||
File dir = new File(dirPath);
|
||||
if (null != dir) {
|
||||
if (!dir.exists()) {
|
||||
LOG.debug("Buffer dir: [{}] does not exists. create it first.",
|
||||
dirPath);
|
||||
|
@ -86,10 +85,6 @@ public final class BufferPool {
|
|||
} else {
|
||||
LOG.debug("buffer dir: {} already exists.", dirPath);
|
||||
}
|
||||
} else {
|
||||
throw new IOException("creating buffer dir: " + dir.getAbsolutePath()
|
||||
+ "unsuccessfully.");
|
||||
}
|
||||
|
||||
return dir;
|
||||
}
|
||||
|
|
|
@ -80,7 +80,6 @@ public class CosNFileReadTask implements Runnable {
|
|||
public void run() {
|
||||
int retries = 0;
|
||||
RetryPolicy.RetryAction retryAction;
|
||||
LOG.info(Thread.currentThread().getName() + "read ...");
|
||||
try {
|
||||
this.readBuffer.lock();
|
||||
do {
|
||||
|
|
|
@ -22,15 +22,16 @@ import java.lang.reflect.Constructor;
|
|||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.lang.reflect.Modifier;
|
||||
import java.net.URI;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import com.qcloud.cos.auth.COSCredentialsProvider;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.cosn.auth.COSCredentialProviderList;
|
||||
import org.apache.hadoop.fs.cosn.auth.EnvironmentVariableCredentialProvider;
|
||||
import org.apache.hadoop.fs.cosn.auth.SimpleCredentialProvider;
|
||||
import org.apache.hadoop.fs.cosn.auth.COSCredentialsProviderList;
|
||||
import org.apache.hadoop.fs.cosn.auth.EnvironmentVariableCredentialsProvider;
|
||||
import org.apache.hadoop.fs.cosn.auth.SimpleCredentialsProvider;
|
||||
|
||||
/**
|
||||
* Utility methods for CosN code.
|
||||
|
@ -48,21 +49,23 @@ public final class CosNUtils {
|
|||
private CosNUtils() {
|
||||
}
|
||||
|
||||
public static COSCredentialProviderList createCosCredentialsProviderSet(
|
||||
public static COSCredentialsProviderList createCosCredentialsProviderSet(
|
||||
URI uri,
|
||||
Configuration conf) throws IOException {
|
||||
COSCredentialProviderList credentialProviderList =
|
||||
new COSCredentialProviderList();
|
||||
COSCredentialsProviderList credentialProviderList =
|
||||
new COSCredentialsProviderList();
|
||||
|
||||
Class<?>[] cosClasses = CosNUtils.loadCosProviderClasses(
|
||||
conf,
|
||||
CosNConfigKeys.COSN_CREDENTIALS_PROVIDER);
|
||||
if (0 == cosClasses.length) {
|
||||
credentialProviderList.add(new SimpleCredentialProvider(conf));
|
||||
credentialProviderList.add(new EnvironmentVariableCredentialProvider());
|
||||
credentialProviderList.add(
|
||||
new SimpleCredentialsProvider(uri, conf));
|
||||
credentialProviderList.add(
|
||||
new EnvironmentVariableCredentialsProvider(uri, conf));
|
||||
} else {
|
||||
for (Class<?> credClass : cosClasses) {
|
||||
credentialProviderList.add(createCOSCredentialProvider(
|
||||
conf,
|
||||
credentialProviderList.add(createCOSCredentialProvider(uri, conf,
|
||||
credClass));
|
||||
}
|
||||
}
|
||||
|
@ -83,16 +86,17 @@ public final class CosNUtils {
|
|||
}
|
||||
|
||||
public static COSCredentialsProvider createCOSCredentialProvider(
|
||||
URI uri,
|
||||
Configuration conf,
|
||||
Class<?> credClass) throws IOException {
|
||||
COSCredentialsProvider credentialsProvider;
|
||||
if (!COSCredentialsProvider.class.isAssignableFrom(credClass)) {
|
||||
throw new IllegalArgumentException(
|
||||
"class " + credClass + " " + NOT_COS_CREDENTIAL_PROVIDER);
|
||||
throw new IllegalArgumentException("class " + credClass + " " +
|
||||
NOT_COS_CREDENTIAL_PROVIDER);
|
||||
}
|
||||
if (Modifier.isAbstract(credClass.getModifiers())) {
|
||||
throw new IllegalArgumentException(
|
||||
"class " + credClass + " " + ABSTRACT_CREDENTIAL_PROVIDER);
|
||||
throw new IllegalArgumentException("class " + credClass + " " +
|
||||
ABSTRACT_CREDENTIAL_PROVIDER);
|
||||
}
|
||||
LOG.debug("Credential Provider class: " + credClass.getName());
|
||||
|
||||
|
@ -112,8 +116,18 @@ public final class CosNUtils {
|
|||
return credentialsProvider;
|
||||
}
|
||||
|
||||
Method factory = getFactoryMethod(
|
||||
credClass, COSCredentialsProvider.class, "getInstance");
|
||||
// new credClass(uri, conf)
|
||||
constructor = getConstructor(credClass, URI.class,
|
||||
Configuration.class);
|
||||
if (null != constructor) {
|
||||
credentialsProvider =
|
||||
(COSCredentialsProvider) constructor.newInstance(uri,
|
||||
conf);
|
||||
return credentialsProvider;
|
||||
}
|
||||
|
||||
Method factory = getFactoryMethod(credClass,
|
||||
COSCredentialsProvider.class, "getInstance");
|
||||
if (null != factory) {
|
||||
credentialsProvider = (COSCredentialsProvider) factory.invoke(null);
|
||||
return credentialsProvider;
|
||||
|
|
|
@ -34,6 +34,7 @@ import com.qcloud.cos.COSClient;
|
|||
import com.qcloud.cos.ClientConfig;
|
||||
import com.qcloud.cos.auth.BasicCOSCredentials;
|
||||
import com.qcloud.cos.auth.COSCredentials;
|
||||
import com.qcloud.cos.endpoint.SuffixEndpointBuilder;
|
||||
import com.qcloud.cos.exception.CosClientException;
|
||||
import com.qcloud.cos.exception.CosServiceException;
|
||||
import com.qcloud.cos.http.HttpProtocol;
|
||||
|
@ -64,7 +65,7 @@ import org.slf4j.LoggerFactory;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.cosn.auth.COSCredentialProviderList;
|
||||
import org.apache.hadoop.fs.cosn.auth.COSCredentialsProviderList;
|
||||
import org.apache.hadoop.util.VersionInfo;
|
||||
import org.apache.http.HttpStatus;
|
||||
|
||||
|
@ -89,9 +90,9 @@ class CosNativeFileSystemStore implements NativeFileSystemStore {
|
|||
* @throws IOException Initialize the COS client failed,
|
||||
* caused by incorrect options.
|
||||
*/
|
||||
private void initCOSClient(Configuration conf) throws IOException {
|
||||
COSCredentialProviderList credentialProviderList =
|
||||
CosNUtils.createCosCredentialsProviderSet(conf);
|
||||
private void initCOSClient(URI uri, Configuration conf) throws IOException {
|
||||
COSCredentialsProviderList credentialProviderList =
|
||||
CosNUtils.createCosCredentialsProviderSet(uri, conf);
|
||||
String region = conf.get(CosNConfigKeys.COSN_REGION_KEY);
|
||||
String endpointSuffix = conf.get(
|
||||
CosNConfigKeys.COSN_ENDPOINT_SUFFIX_KEY);
|
||||
|
@ -113,7 +114,7 @@ class CosNativeFileSystemStore implements NativeFileSystemStore {
|
|||
ClientConfig config;
|
||||
if (null == region) {
|
||||
config = new ClientConfig(new Region(""));
|
||||
config.setEndPointSuffix(endpointSuffix);
|
||||
config.setEndpointBuilder(new SuffixEndpointBuilder(endpointSuffix));
|
||||
} else {
|
||||
config = new ClientConfig(new Region(region));
|
||||
}
|
||||
|
@ -146,7 +147,7 @@ class CosNativeFileSystemStore implements NativeFileSystemStore {
|
|||
@Override
|
||||
public void initialize(URI uri, Configuration conf) throws IOException {
|
||||
try {
|
||||
initCOSClient(conf);
|
||||
initCOSClient(uri, conf);
|
||||
this.bucketName = uri.getHost();
|
||||
} catch (Exception e) {
|
||||
handleException(e, "");
|
||||
|
@ -174,8 +175,8 @@ class CosNativeFileSystemStore implements NativeFileSystemStore {
|
|||
|
||||
PutObjectResult putObjectResult =
|
||||
(PutObjectResult) callCOSClientWithRetry(putObjectRequest);
|
||||
LOG.debug("Store file successfully. COS key: [{}], ETag: [{}], "
|
||||
+ "MD5: [{}].", key, putObjectResult.getETag(), new String(md5Hash));
|
||||
LOG.debug("Store file successfully. COS key: [{}], ETag: [{}].",
|
||||
key, putObjectResult.getETag());
|
||||
} catch (Exception e) {
|
||||
String errMsg = String.format("Store file failed. COS key: [%s], "
|
||||
+ "exception: [%s]", key, e.toString());
|
||||
|
@ -196,8 +197,7 @@ class CosNativeFileSystemStore implements NativeFileSystemStore {
|
|||
public void storeFile(String key, File file, byte[] md5Hash)
|
||||
throws IOException {
|
||||
LOG.info("Store file from local path: [{}]. file length: [{}] COS key: " +
|
||||
"[{}] MD5: [{}].", file.getCanonicalPath(), file.length(), key,
|
||||
new String(md5Hash));
|
||||
"[{}]", file.getCanonicalPath(), file.length(), key);
|
||||
storeFileWithRetry(key, new BufferedInputStream(new FileInputStream(file)),
|
||||
md5Hash, file.length());
|
||||
}
|
||||
|
@ -218,7 +218,7 @@ class CosNativeFileSystemStore implements NativeFileSystemStore {
|
|||
byte[] md5Hash,
|
||||
long contentLength) throws IOException {
|
||||
LOG.info("Store file from input stream. COS key: [{}], "
|
||||
+ "length: [{}], MD5: [{}].", key, contentLength, md5Hash);
|
||||
+ "length: [{}].", key, contentLength);
|
||||
storeFileWithRetry(key, inputStream, md5Hash, contentLength);
|
||||
}
|
||||
|
||||
|
@ -250,7 +250,11 @@ class CosNativeFileSystemStore implements NativeFileSystemStore {
|
|||
public PartETag uploadPart(File file, String key, String uploadId,
|
||||
int partNum) throws IOException {
|
||||
InputStream inputStream = new FileInputStream(file);
|
||||
try {
|
||||
return uploadPart(inputStream, key, uploadId, partNum, file.length());
|
||||
} finally {
|
||||
inputStream.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.cosn.auth;
|
||||
|
||||
import com.qcloud.cos.auth.COSCredentialsProvider;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.net.URI;
|
||||
|
||||
/**
|
||||
* The base class for COS credential providers which take a URI or
|
||||
* configuration in their constructor.
|
||||
*/
|
||||
public abstract class AbstractCOSCredentialsProvider
|
||||
implements COSCredentialsProvider {
|
||||
private final URI uri;
|
||||
private final Configuration conf;
|
||||
|
||||
public AbstractCOSCredentialsProvider(@Nullable URI uri,
|
||||
Configuration conf) {
|
||||
this.uri = uri;
|
||||
this.conf = conf;
|
||||
}
|
||||
|
||||
public URI getUri() {
|
||||
return uri;
|
||||
}
|
||||
|
||||
public Configuration getConf() {
|
||||
return conf;
|
||||
}
|
||||
}
|
|
@ -28,7 +28,6 @@ import com.google.common.base.Preconditions;
|
|||
import com.qcloud.cos.auth.AnonymousCOSCredentials;
|
||||
import com.qcloud.cos.auth.COSCredentials;
|
||||
import com.qcloud.cos.auth.COSCredentialsProvider;
|
||||
import com.qcloud.cos.exception.CosClientException;
|
||||
import com.qcloud.cos.utils.StringUtils;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
|
@ -37,10 +36,10 @@ import org.slf4j.LoggerFactory;
|
|||
/**
|
||||
* a list of cos credentials provider.
|
||||
*/
|
||||
public class COSCredentialProviderList implements
|
||||
public class COSCredentialsProviderList implements
|
||||
COSCredentialsProvider, AutoCloseable {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(COSCredentialProviderList.class);
|
||||
LoggerFactory.getLogger(COSCredentialsProviderList.class);
|
||||
|
||||
private static final String NO_COS_CREDENTIAL_PROVIDERS =
|
||||
"No COS Credential Providers";
|
||||
|
@ -48,17 +47,17 @@ public class COSCredentialProviderList implements
|
|||
"Credentials requested after provider list was closed";
|
||||
|
||||
private final List<COSCredentialsProvider> providers =
|
||||
new ArrayList<>(1);
|
||||
new ArrayList<COSCredentialsProvider>(1);
|
||||
private boolean reuseLastProvider = true;
|
||||
private COSCredentialsProvider lastProvider;
|
||||
|
||||
private final AtomicInteger refCount = new AtomicInteger(1);
|
||||
private final AtomicBoolean isClosed = new AtomicBoolean(false);
|
||||
|
||||
public COSCredentialProviderList() {
|
||||
public COSCredentialsProviderList() {
|
||||
}
|
||||
|
||||
public COSCredentialProviderList(
|
||||
public COSCredentialsProviderList(
|
||||
Collection<COSCredentialsProvider> providers) {
|
||||
this.providers.addAll(providers);
|
||||
}
|
||||
|
@ -77,7 +76,7 @@ public class COSCredentialProviderList implements
|
|||
}
|
||||
}
|
||||
|
||||
public COSCredentialProviderList share() {
|
||||
public COSCredentialsProviderList share() {
|
||||
Preconditions.checkState(!this.closed(), "Provider list is closed");
|
||||
this.refCount.incrementAndGet();
|
||||
return this;
|
||||
|
@ -100,23 +99,31 @@ public class COSCredentialProviderList implements
|
|||
}
|
||||
|
||||
for (COSCredentialsProvider provider : this.providers) {
|
||||
try {
|
||||
COSCredentials credentials = provider.getCredentials();
|
||||
if (!StringUtils.isNullOrEmpty(credentials.getCOSAccessKeyId())
|
||||
if (null != credentials
|
||||
&& !StringUtils.isNullOrEmpty(credentials.getCOSAccessKeyId())
|
||||
&& !StringUtils.isNullOrEmpty(credentials.getCOSSecretKey())
|
||||
|| credentials instanceof AnonymousCOSCredentials) {
|
||||
this.lastProvider = provider;
|
||||
return credentials;
|
||||
}
|
||||
} catch (CosClientException e) {
|
||||
LOG.warn("No credentials provided by {}: {}", provider, e.toString());
|
||||
}
|
||||
}
|
||||
|
||||
throw new NoAuthWithCOSException(
|
||||
"No COS Credentials provided by " + this.providers.toString());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void refresh() {
|
||||
if (this.closed()) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (COSCredentialsProvider cosCredentialsProvider : this.providers) {
|
||||
cosCredentialsProvider.refresh();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws Exception {
|
||||
if (this.closed()) {
|
||||
|
@ -135,5 +142,4 @@ public class COSCredentialProviderList implements
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -20,16 +20,24 @@ package org.apache.hadoop.fs.cosn.auth;
|
|||
import com.qcloud.cos.auth.BasicCOSCredentials;
|
||||
import com.qcloud.cos.auth.COSCredentials;
|
||||
import com.qcloud.cos.auth.COSCredentialsProvider;
|
||||
import com.qcloud.cos.exception.CosClientException;
|
||||
import com.qcloud.cos.utils.StringUtils;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.cosn.Constants;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.net.URI;
|
||||
|
||||
/**
|
||||
* the provider obtaining the cos credentials from the environment variables.
|
||||
* The provider obtaining the cos credentials from the environment variables.
|
||||
*/
|
||||
public class EnvironmentVariableCredentialProvider
|
||||
implements COSCredentialsProvider {
|
||||
public class EnvironmentVariableCredentialsProvider
|
||||
extends AbstractCOSCredentialsProvider implements COSCredentialsProvider {
|
||||
|
||||
public EnvironmentVariableCredentialsProvider(@Nullable URI uri,
|
||||
Configuration conf) {
|
||||
super(uri, conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
public COSCredentials getCredentials() {
|
||||
String secretId = System.getenv(Constants.COSN_SECRET_ID_ENV);
|
||||
|
@ -41,15 +49,19 @@ public class EnvironmentVariableCredentialProvider
|
|||
if (!StringUtils.isNullOrEmpty(secretId)
|
||||
&& !StringUtils.isNullOrEmpty(secretKey)) {
|
||||
return new BasicCOSCredentials(secretId, secretKey);
|
||||
} else {
|
||||
throw new CosClientException(
|
||||
"Unable to load COS credentials from environment variables" +
|
||||
"(COS_SECRET_ID or COS_SECRET_KEY)");
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void refresh() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "EnvironmentVariableCredentialProvider{}";
|
||||
return String.format("EnvironmentVariableCredentialsProvider{%s, %s}",
|
||||
Constants.COSN_SECRET_ID_ENV,
|
||||
Constants.COSN_SECRET_KEY_ENV);
|
||||
}
|
||||
}
|
|
@ -20,35 +20,41 @@ package org.apache.hadoop.fs.cosn.auth;
|
|||
import com.qcloud.cos.auth.BasicCOSCredentials;
|
||||
import com.qcloud.cos.auth.COSCredentials;
|
||||
import com.qcloud.cos.auth.COSCredentialsProvider;
|
||||
import com.qcloud.cos.exception.CosClientException;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import com.qcloud.cos.utils.StringUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.cosn.CosNConfigKeys;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.net.URI;
|
||||
|
||||
/**
|
||||
* Get the credentials from the hadoop configuration.
|
||||
*/
|
||||
public class SimpleCredentialProvider implements COSCredentialsProvider {
|
||||
public class SimpleCredentialsProvider
|
||||
extends AbstractCOSCredentialsProvider implements COSCredentialsProvider {
|
||||
private String secretId;
|
||||
private String secretKey;
|
||||
|
||||
public SimpleCredentialProvider(Configuration conf) {
|
||||
public SimpleCredentialsProvider(@Nullable URI uri, Configuration conf) {
|
||||
super(uri, conf);
|
||||
if (null != conf) {
|
||||
this.secretId = conf.get(
|
||||
CosNConfigKeys.COSN_SECRET_ID_KEY
|
||||
);
|
||||
CosNConfigKeys.COSN_SECRET_ID_KEY);
|
||||
this.secretKey = conf.get(
|
||||
CosNConfigKeys.COSN_SECRET_KEY_KEY
|
||||
);
|
||||
CosNConfigKeys.COSN_SECRET_KEY_KEY);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public COSCredentials getCredentials() {
|
||||
if (!StringUtils.isEmpty(this.secretId)
|
||||
&& !StringUtils.isEmpty(this.secretKey)) {
|
||||
if (!StringUtils.isNullOrEmpty(this.secretId)
|
||||
&& !StringUtils.isNullOrEmpty(this.secretKey)) {
|
||||
return new BasicCOSCredentials(this.secretId, this.secretKey);
|
||||
}
|
||||
throw new CosClientException("secret id or secret key is unset");
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void refresh() {
|
||||
}
|
||||
}
|
|
@ -130,20 +130,19 @@ Each user needs to properly configure the credentials ( User's secreteId and sec
|
|||
```xml
|
||||
<property>
|
||||
<name>fs.cosn.credentials.provider</name>
|
||||
<value>org.apache.hadoop.fs.auth.SimpleCredentialProvider</value>
|
||||
<value>org.apache.hadoop.fs.auth.SimpleCredentialsProvider</value>
|
||||
<description>
|
||||
|
||||
This option allows the user to specify how to get the credentials.
|
||||
Comma-separated class names of credential provider classes which implement
|
||||
com.qcloud.cos.auth.COSCredentialsProvider:
|
||||
|
||||
1.org.apache.hadoop.fs.auth.SimpleCredentialProvider: Obtain the secret id and secret key
|
||||
from fs.cosn.userinfo.secretId and fs.cosn.userinfo.secretKey in core-site.xml
|
||||
2.org.apache.hadoop.fs.auth.EnvironmentVariableCredentialProvider: Obtain the secret id and secret key from system environment variables named COS_SECRET_ID and COS_SECRET_KEY
|
||||
1.org.apache.hadoop.fs.auth.SimpleCredentialsProvider: Obtain the secret id and secret key from fs.cosn.userinfo.secretId and fs.cosn.userinfo.secretKey in core-site.xml
|
||||
2.org.apache.hadoop.fs.auth.EnvironmentVariableCredentialsProvider: Obtain the secret id and secret key from system environment variables named COS_SECRET_ID and COS_SECRET_KEY
|
||||
|
||||
If unspecified, the default order of credential providers is:
|
||||
1. org.apache.hadoop.fs.auth.SimpleCredentialProvider
|
||||
2. org.apache.hadoop.fs.auth.EnvironmentVariableCredentialProvider
|
||||
1. org.apache.hadoop.fs.auth.SimpleCredentialsProvider
|
||||
2. org.apache.hadoop.fs.auth.EnvironmentVariableCredentialsProvider
|
||||
|
||||
</description>
|
||||
</property>
|
||||
|
@ -237,7 +236,7 @@ Hadoop-COS provides rich runtime properties to set, and most of these do not req
|
|||
| properties | description | default value | required |
|
||||
|:----------:|:-----------|:-------------:|:--------:|
|
||||
| fs.defaultFS | Configure the default file system used by Hadoop.| None | NO |
|
||||
| fs.cosn.credentials.provider | This option allows the user to specify how to get the credentials. Comma-separated class names of credential provider classes which implement com.qcloud.cos.auth.COSCredentialsProvider: <br/> 1. org.apache.hadoop.fs.cos.auth.SimpleCredentialProvider: Obtain the secret id and secret key from `fs.cosn.userinfo.secretId` and `fs.cosn.userinfo.secretKey` in core-site.xml; <br/> 2. org.apache.hadoop.fs.auth.EnvironmentVariableCredentialProvider: Obtain the secret id and secret key from system environment variables named `COSN_SECRET_ID` and `COSN_SECRET_KEY`. <br/> <br/> If unspecified, the default order of credential providers is: <br/> 1. org.apache.hadoop.fs.auth.SimpleCredentialProvider; <br/> 2. org.apache.hadoop.fs.auth.EnvironmentVariableCredentialProvider. | None | NO |
|
||||
| fs.cosn.credentials.provider | This option allows the user to specify how to get the credentials. Comma-separated class names of credential provider classes which implement com.qcloud.cos.auth.COSCredentialsProvider: <br/> 1. org.apache.hadoop.fs.cos.auth.SimpleCredentialsProvider: Obtain the secret id and secret key from `fs.cosn.userinfo.secretId` and `fs.cosn.userinfo.secretKey` in core-site.xml; <br/> 2. org.apache.hadoop.fs.auth.EnvironmentVariableCredentialsProvider: Obtain the secret id and secret key from system environment variables named `COSN_SECRET_ID` and `COSN_SECRET_KEY`. <br/> <br/> If unspecified, the default order of credential providers is: <br/> 1. org.apache.hadoop.fs.auth.SimpleCredentialsProvider; <br/> 2. org.apache.hadoop.fs.auth.EnvironmentVariableCredentialsProvider. | None | NO |
|
||||
| fs.cosn.userinfo.secretId/secretKey | The API key information of your account | None | YES |
|
||||
| fs.cosn.bucket.region | The region where the bucket is located. | None | YES |
|
||||
| fs.cosn.impl | The implementation class of the CosN filesystem. | None | YES |
|
||||
|
|
|
@ -0,0 +1,134 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.cosn;
|
||||
|
||||
import com.qcloud.cos.auth.COSCredentials;
|
||||
import com.qcloud.cos.auth.COSCredentialsProvider;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.junit.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
public class TestCosCredentials {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(TestCosCredentials.class);
|
||||
|
||||
private final URI fsUri;
|
||||
|
||||
private final String testCosNSecretId = "secretId";
|
||||
private final String testCosNSecretKey = "secretKey";
|
||||
private final String testCosNEnvSecretId = "env_secretId";
|
||||
private final String testCosNEnvSecretKey = "env_secretKey";
|
||||
|
||||
public TestCosCredentials() throws URISyntaxException {
|
||||
// A fake uri for tests.
|
||||
this.fsUri = new URI("cosn://test-bucket-1250000000");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSimpleCredentialsProvider() throws Throwable {
|
||||
Configuration configuration = new Configuration();
|
||||
configuration.set(CosNConfigKeys.COSN_SECRET_ID_KEY,
|
||||
testCosNSecretId);
|
||||
configuration.set(CosNConfigKeys.COSN_SECRET_KEY_KEY,
|
||||
testCosNSecretKey);
|
||||
validateCredentials(this.fsUri, configuration);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEnvironmentCredentialsProvider() throws Throwable {
|
||||
Configuration configuration = new Configuration();
|
||||
// Set EnvironmentVariableCredentialsProvider as the CosCredentials
|
||||
// Provider.
|
||||
configuration.set(CosNConfigKeys.COSN_CREDENTIALS_PROVIDER,
|
||||
"org.apache.hadoop.fs.cosn.EnvironmentVariableCredentialsProvider");
|
||||
// Set the environment variables storing the secret id and secret key.
|
||||
System.setProperty(Constants.COSN_SECRET_ID_ENV, testCosNEnvSecretId);
|
||||
System.setProperty(Constants.COSN_SECRET_KEY_ENV, testCosNEnvSecretKey);
|
||||
validateCredentials(this.fsUri, configuration);
|
||||
}
|
||||
|
||||
private void validateCredentials(URI uri, Configuration configuration)
|
||||
throws IOException {
|
||||
if (null != configuration) {
|
||||
COSCredentialsProvider credentialsProvider =
|
||||
CosNUtils.createCosCredentialsProviderSet(uri, configuration);
|
||||
COSCredentials cosCredentials = credentialsProvider.getCredentials();
|
||||
assertNotNull("The cos credentials obtained is null.", cosCredentials);
|
||||
if (configuration.get(
|
||||
CosNConfigKeys.COSN_CREDENTIALS_PROVIDER).compareToIgnoreCase(
|
||||
"org.apache.hadoop.fs.cosn.EnvironmentVariableCredentialsProvider")
|
||||
== 0) {
|
||||
if (null == cosCredentials.getCOSAccessKeyId()
|
||||
|| cosCredentials.getCOSAccessKeyId().isEmpty()
|
||||
|| null == cosCredentials.getCOSSecretKey()
|
||||
|| cosCredentials.getCOSSecretKey().isEmpty()) {
|
||||
String failMessage = String.format(
|
||||
"Test EnvironmentVariableCredentialsProvider failed. The " +
|
||||
"expected is [secretId: %s, secretKey: %s], but got null or" +
|
||||
" empty.", testCosNEnvSecretId, testCosNEnvSecretKey);
|
||||
fail(failMessage);
|
||||
}
|
||||
|
||||
if (cosCredentials.getCOSAccessKeyId()
|
||||
.compareTo(testCosNEnvSecretId) != 0
|
||||
|| cosCredentials.getCOSSecretKey()
|
||||
.compareTo(testCosNEnvSecretKey) != 0) {
|
||||
String failMessage = String.format("Test " +
|
||||
"EnvironmentVariableCredentialsProvider failed. " +
|
||||
"The expected is [secretId: %s, secretKey: %s], but got is " +
|
||||
"[secretId:%s, secretKey:%s].", testCosNEnvSecretId,
|
||||
testCosNEnvSecretKey, cosCredentials.getCOSAccessKeyId(),
|
||||
cosCredentials.getCOSSecretKey());
|
||||
}
|
||||
// expected
|
||||
} else {
|
||||
if (null == cosCredentials.getCOSAccessKeyId()
|
||||
|| cosCredentials.getCOSAccessKeyId().isEmpty()
|
||||
|| null == cosCredentials.getCOSSecretKey()
|
||||
|| cosCredentials.getCOSSecretKey().isEmpty()) {
|
||||
String failMessage = String.format(
|
||||
"Test COSCredentials failed. The " +
|
||||
"expected is [secretId: %s, secretKey: %s], but got null or" +
|
||||
" empty.", testCosNSecretId, testCosNSecretKey);
|
||||
fail(failMessage);
|
||||
}
|
||||
if (cosCredentials.getCOSAccessKeyId()
|
||||
.compareTo(testCosNSecretId) != 0
|
||||
|| cosCredentials.getCOSSecretKey()
|
||||
.compareTo(testCosNSecretKey) != 0) {
|
||||
String failMessage = String.format("Test " +
|
||||
"EnvironmentVariableCredentialsProvider failed. " +
|
||||
"The expected is [secretId: %s, secretKey: %s], but got is " +
|
||||
"[secretId:%s, secretKey:%s].", testCosNSecretId,
|
||||
testCosNSecretKey, cosCredentials.getCOSAccessKeyId(),
|
||||
cosCredentials.getCOSSecretKey());
|
||||
fail(failMessage);
|
||||
}
|
||||
// expected
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -20,11 +20,11 @@
|
|||
<parent>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-project</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<relativePath>../hadoop-project</relativePath>
|
||||
</parent>
|
||||
<artifactId>hadoop-cloud-storage-project</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<description>Apache Hadoop Cloud Storage Project</description>
|
||||
<name>Apache Hadoop Cloud Storage Project</name>
|
||||
<packaging>pom</packaging>
|
||||
|
|
|
@ -20,11 +20,11 @@
|
|||
<parent>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-project</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<relativePath>../../hadoop-project</relativePath>
|
||||
</parent>
|
||||
<artifactId>hadoop-annotations</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<description>Apache Hadoop Annotations</description>
|
||||
<name>Apache Hadoop Annotations</name>
|
||||
<packaging>jar</packaging>
|
||||
|
|
|
@ -20,11 +20,11 @@
|
|||
<parent>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-project</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<relativePath>../../hadoop-project</relativePath>
|
||||
</parent>
|
||||
<artifactId>hadoop-auth-examples</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<packaging>war</packaging>
|
||||
|
||||
<name>Apache Hadoop Auth Examples</name>
|
||||
|
|
|
@ -20,11 +20,11 @@
|
|||
<parent>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-project</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<relativePath>../../hadoop-project</relativePath>
|
||||
</parent>
|
||||
<artifactId>hadoop-auth</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<name>Apache Hadoop Auth</name>
|
||||
|
|
|
@ -283,6 +283,10 @@
|
|||
<!-- protobuf generated code -->
|
||||
<Class name="~org\.apache\.hadoop\.ipc\.protobuf\.ProtobufRpcEngineProtos.*"/>
|
||||
</Match>
|
||||
<Match>
|
||||
<!-- protobuf generated code -->
|
||||
<Class name="~org\.apache\.hadoop\.ipc\.protobuf\.ProtobufRpcEngine2Protos.*"/>
|
||||
</Match>
|
||||
<Match>
|
||||
<!-- protobuf generated code -->
|
||||
<Class name="~org\.apache\.hadoop\.ipc\.protobuf\.ProtocolInfoProtos.*"/>
|
||||
|
|
|
@ -20,11 +20,11 @@
|
|||
<parent>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-project-dist</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<relativePath>../../hadoop-project-dist</relativePath>
|
||||
</parent>
|
||||
<artifactId>hadoop-common</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<description>Apache Hadoop Common</description>
|
||||
<name>Apache Hadoop Common</name>
|
||||
<packaging>jar</packaging>
|
||||
|
@ -390,7 +390,12 @@
|
|||
<executions>
|
||||
<execution>
|
||||
<id>src-compile-protoc</id>
|
||||
<configuration><skip>false</skip></configuration>
|
||||
<configuration>
|
||||
<skip>false</skip>
|
||||
<excludes>
|
||||
<exclude>ProtobufRpcEngine.proto</exclude>
|
||||
</excludes>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>src-test-compile-protoc</id>
|
||||
|
@ -406,6 +411,9 @@
|
|||
<id>replace-generated-sources</id>
|
||||
<configuration>
|
||||
<skip>false</skip>
|
||||
<excludes>
|
||||
<exclude>**/ProtobufRpcEngineProtos.java</exclude>
|
||||
</excludes>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
|
@ -418,6 +426,14 @@
|
|||
<id>replace-sources</id>
|
||||
<configuration>
|
||||
<skip>false</skip>
|
||||
<!--These classes have direct Protobuf references for backward compatibility reasons-->
|
||||
<excludes>
|
||||
<exclude>**/ProtobufHelper.java</exclude>
|
||||
<exclude>**/RpcWritable.java</exclude>
|
||||
<exclude>**/ProtobufRpcEngineCallback.java</exclude>
|
||||
<exclude>**/ProtobufRpcEngine.java</exclude>
|
||||
<exclude>**/ProtobufRpcEngineProtos.java</exclude>
|
||||
</excludes>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
|
@ -1010,7 +1026,79 @@
|
|||
</plugins>
|
||||
</build>
|
||||
</profile>
|
||||
|
||||
<!-- profile to use already generated protobuf code using 2.5.0 for aarch64-->
|
||||
<profile>
|
||||
<id>aarch64</id>
|
||||
<activation>
|
||||
<activeByDefault>false</activeByDefault>
|
||||
<os>
|
||||
<arch>aarch64</arch>
|
||||
</os>
|
||||
</activation>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>build-helper-maven-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>add-source-legacy-protobuf</id>
|
||||
<phase>generate-sources</phase>
|
||||
<goals>
|
||||
<goal>add-source</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<sources>
|
||||
<source>${basedir}/src/main/arm-java</source>
|
||||
</sources>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</profile>
|
||||
<!-- profile to generate protobuf code using 2.5.0-->
|
||||
<profile>
|
||||
<id>x86_64</id>
|
||||
<activation>
|
||||
<activeByDefault>false</activeByDefault>
|
||||
<os>
|
||||
<arch>!aarch64</arch>
|
||||
</os>
|
||||
</activation>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.xolstice.maven.plugins</groupId>
|
||||
<artifactId>protobuf-maven-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>src-compile-protoc-legacy</id>
|
||||
<phase>generate-sources</phase>
|
||||
<goals>
|
||||
<goal>compile</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<skip>false</skip>
|
||||
<!--Generating with old protobuf version for backward compatibility-->
|
||||
<protocArtifact>
|
||||
com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier}
|
||||
</protocArtifact>
|
||||
<includeDependenciesInDescriptorSet>false</includeDependenciesInDescriptorSet>
|
||||
<protoSourceRoot>${basedir}/src/main/proto</protoSourceRoot>
|
||||
<outputDirectory>${project.build.directory}/generated-sources/java</outputDirectory>
|
||||
<clearOutputDirectory>false</clearOutputDirectory>
|
||||
<includes>
|
||||
<include>ProtobufRpcEngine.proto</include>
|
||||
</includes>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</profile>
|
||||
</profiles>
|
||||
</project>
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -27,7 +27,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
|
|||
import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.ZKFCProtocolService;
|
||||
import org.apache.hadoop.ha.protocolPB.ZKFCProtocolPB;
|
||||
import org.apache.hadoop.ha.protocolPB.ZKFCProtocolServerSideTranslatorPB;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine2;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.RPC.Server;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
|
@ -50,7 +50,7 @@ public class ZKFCRpcServer implements ZKFCProtocol {
|
|||
this.zkfc = zkfc;
|
||||
|
||||
RPC.setProtocolEngine(conf, ZKFCProtocolPB.class,
|
||||
ProtobufRpcEngine.class);
|
||||
ProtobufRpcEngine2.class);
|
||||
ZKFCProtocolServerSideTranslatorPB translator =
|
||||
new ZKFCProtocolServerSideTranslatorPB(this);
|
||||
BlockingService service = ZKFCProtocolService
|
||||
|
|
|
@ -38,7 +38,7 @@ import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequ
|
|||
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto;
|
||||
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto;
|
||||
import org.apache.hadoop.ipc.ProtobufHelper;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine2;
|
||||
import org.apache.hadoop.ipc.ProtocolTranslator;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
@ -67,7 +67,7 @@ public class HAServiceProtocolClientSideTranslatorPB implements
|
|||
public HAServiceProtocolClientSideTranslatorPB(InetSocketAddress addr,
|
||||
Configuration conf) throws IOException {
|
||||
RPC.setProtocolEngine(conf, HAServiceProtocolPB.class,
|
||||
ProtobufRpcEngine.class);
|
||||
ProtobufRpcEngine2.class);
|
||||
rpcProxy = RPC.getProxy(HAServiceProtocolPB.class,
|
||||
RPC.getProtocolVersion(HAServiceProtocolPB.class), addr, conf);
|
||||
}
|
||||
|
@ -76,7 +76,7 @@ public class HAServiceProtocolClientSideTranslatorPB implements
|
|||
InetSocketAddress addr, Configuration conf,
|
||||
SocketFactory socketFactory, int timeout) throws IOException {
|
||||
RPC.setProtocolEngine(conf, HAServiceProtocolPB.class,
|
||||
ProtobufRpcEngine.class);
|
||||
ProtobufRpcEngine2.class);
|
||||
rpcProxy = RPC.getProxy(HAServiceProtocolPB.class,
|
||||
RPC.getProtocolVersion(HAServiceProtocolPB.class), addr,
|
||||
UserGroupInformation.getCurrentUser(), conf, socketFactory, timeout);
|
||||
|
|
|
@ -28,7 +28,7 @@ import org.apache.hadoop.ha.ZKFCProtocol;
|
|||
import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.CedeActiveRequestProto;
|
||||
import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.GracefulFailoverRequestProto;
|
||||
import org.apache.hadoop.ipc.ProtobufHelper;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine2;
|
||||
import org.apache.hadoop.ipc.ProtocolTranslator;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
|
@ -48,7 +48,7 @@ public class ZKFCProtocolClientSideTranslatorPB implements
|
|||
InetSocketAddress addr, Configuration conf,
|
||||
SocketFactory socketFactory, int timeout) throws IOException {
|
||||
RPC.setProtocolEngine(conf, ZKFCProtocolPB.class,
|
||||
ProtobufRpcEngine.class);
|
||||
ProtobufRpcEngine2.class);
|
||||
rpcProxy = RPC.getProxy(ZKFCProtocolPB.class,
|
||||
RPC.getProtocolVersion(ZKFCProtocolPB.class), addr,
|
||||
UserGroupInformation.getCurrentUser(), conf, socketFactory, timeout);
|
||||
|
|
|
@ -236,7 +236,7 @@ public class BZip2Codec implements Configurable, SplittableCompressionCodec {
|
|||
*/
|
||||
@Override
|
||||
public String getDefaultExtension() {
|
||||
return ".bz2";
|
||||
return CodecConstants.BZIP2_CODEC_EXTENSION;
|
||||
}
|
||||
|
||||
private static class BZip2CompressionOutputStream extends
|
||||
|
|
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.io.compress;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
/**
|
||||
* Codec related constants.
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public final class CodecConstants {
|
||||
|
||||
private CodecConstants() {
|
||||
}
|
||||
/**
|
||||
* Default extension for {@link org.apache.hadoop.io.compress.DefaultCodec}.
|
||||
*/
|
||||
public static final String DEFAULT_CODEC_EXTENSION = ".deflate";
|
||||
|
||||
/**
|
||||
* Default extension for {@link org.apache.hadoop.io.compress.BZip2Codec}.
|
||||
*/
|
||||
public static final String BZIP2_CODEC_EXTENSION = ".bz2";
|
||||
|
||||
/**
|
||||
* Default extension for {@link org.apache.hadoop.io.compress.GzipCodec}.
|
||||
*/
|
||||
public static final String GZIP_CODEC_EXTENSION = ".gz";
|
||||
|
||||
/**
|
||||
* Default extension for {@link org.apache.hadoop.io.compress.Lz4Codec}.
|
||||
*/
|
||||
public static final String LZ4_CODEC_EXTENSION = ".lz4";
|
||||
|
||||
/**
|
||||
* Default extension for
|
||||
* {@link org.apache.hadoop.io.compress.PassthroughCodec}.
|
||||
*/
|
||||
public static final String PASSTHROUGH_CODEC_EXTENSION = ".passthrough";
|
||||
|
||||
/**
|
||||
* Default extension for {@link org.apache.hadoop.io.compress.SnappyCodec}.
|
||||
*/
|
||||
public static final String SNAPPY_CODEC_EXTENSION = ".snappy";
|
||||
|
||||
/**
|
||||
* Default extension for {@link org.apache.hadoop.io.compress.ZStandardCodec}.
|
||||
*/
|
||||
public static final String ZSTANDARD_CODEC_EXTENSION = ".zst";
|
||||
}
|
|
@ -114,7 +114,7 @@ public class DefaultCodec implements Configurable, CompressionCodec, DirectDecom
|
|||
|
||||
@Override
|
||||
public String getDefaultExtension() {
|
||||
return ".deflate";
|
||||
return CodecConstants.DEFAULT_CODEC_EXTENSION;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -206,7 +206,7 @@ public class GzipCodec extends DefaultCodec {
|
|||
|
||||
@Override
|
||||
public String getDefaultExtension() {
|
||||
return ".gz";
|
||||
return CodecConstants.GZIP_CODEC_EXTENSION;
|
||||
}
|
||||
|
||||
static final class GzipZlibCompressor extends ZlibCompressor {
|
||||
|
|
|
@ -221,6 +221,6 @@ public class Lz4Codec implements Configurable, CompressionCodec {
|
|||
*/
|
||||
@Override
|
||||
public String getDefaultExtension() {
|
||||
return ".lz4";
|
||||
return CodecConstants.LZ4_CODEC_EXTENSION;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -77,7 +77,8 @@ public class PassthroughCodec
|
|||
* This default extension is here so that if no extension has been defined,
|
||||
* some value is still returned: {@value}..
|
||||
*/
|
||||
public static final String DEFAULT_EXTENSION = ".passthrough";
|
||||
public static final String DEFAULT_EXTENSION =
|
||||
CodecConstants.PASSTHROUGH_CODEC_EXTENSION;
|
||||
|
||||
private Configuration conf;
|
||||
|
||||
|
|
|
@ -225,6 +225,6 @@ public class SnappyCodec implements Configurable, CompressionCodec, DirectDecomp
|
|||
*/
|
||||
@Override
|
||||
public String getDefaultExtension() {
|
||||
return ".snappy";
|
||||
return CodecConstants.SNAPPY_CODEC_EXTENSION;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -230,7 +230,7 @@ public class ZStandardCodec implements
|
|||
*/
|
||||
@Override
|
||||
public String getDefaultExtension() {
|
||||
return ".zst";
|
||||
return CodecConstants.ZSTANDARD_CODEC_EXTENSION;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -53,6 +53,23 @@ public class ProtobufHelper {
|
|||
return e instanceof IOException ? (IOException) e : new IOException(se);
|
||||
}
|
||||
|
||||
/**
|
||||
* Kept for backward compatible.
|
||||
* Return the IOException thrown by the remote server wrapped in
|
||||
* ServiceException as cause.
|
||||
* @param se ServiceException that wraps IO exception thrown by the server
|
||||
* @return Exception wrapped in ServiceException or
|
||||
* a new IOException that wraps the unexpected ServiceException.
|
||||
*/
|
||||
@Deprecated
|
||||
public static IOException getRemoteException(
|
||||
com.google.protobuf.ServiceException se) {
|
||||
Throwable e = se.getCause();
|
||||
if (e == null) {
|
||||
return new IOException(se);
|
||||
}
|
||||
return e instanceof IOException ? (IOException) e : new IOException(se);
|
||||
}
|
||||
|
||||
/**
|
||||
* Map used to cache fixed strings to ByteStrings. Since there is no
|
||||
|
|
|
@ -19,8 +19,11 @@
|
|||
package org.apache.hadoop.ipc;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.hadoop.thirdparty.protobuf.*;
|
||||
import org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor;
|
||||
import com.google.protobuf.BlockingService;
|
||||
import com.google.protobuf.Descriptors.MethodDescriptor;
|
||||
import com.google.protobuf.Message;
|
||||
import com.google.protobuf.ServiceException;
|
||||
import com.google.protobuf.TextFormat;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
|
@ -29,6 +32,7 @@ import org.apache.hadoop.io.Writable;
|
|||
import org.apache.hadoop.io.retry.RetryPolicy;
|
||||
import org.apache.hadoop.ipc.Client.ConnectionId;
|
||||
import org.apache.hadoop.ipc.RPC.RpcInvoker;
|
||||
import org.apache.hadoop.ipc.RPC.RpcKind;
|
||||
import org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.token.SecretManager;
|
||||
|
@ -52,7 +56,10 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
|||
|
||||
/**
|
||||
* RPC Engine for for protobuf based RPCs.
|
||||
* This engine uses Protobuf 2.5.0. Recommended to upgrade to Protobuf 3.x
|
||||
* from hadoop-thirdparty and use ProtobufRpcEngine2.
|
||||
*/
|
||||
@Deprecated
|
||||
@InterfaceStability.Evolving
|
||||
public class ProtobufRpcEngine implements RpcEngine {
|
||||
public static final Logger LOG =
|
||||
|
@ -355,6 +362,7 @@ public class ProtobufRpcEngine implements RpcEngine {
|
|||
new ThreadLocal<>();
|
||||
|
||||
static final ThreadLocal<CallInfo> currentCallInfo = new ThreadLocal<>();
|
||||
private static final RpcInvoker RPC_INVOKER = new ProtoBufRpcInvoker();
|
||||
|
||||
static class CallInfo {
|
||||
private final RPC.Server server;
|
||||
|
@ -434,6 +442,14 @@ public class ProtobufRpcEngine implements RpcEngine {
|
|||
protocolImpl);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RpcInvoker getServerRpcInvoker(RpcKind rpcKind) {
|
||||
if (rpcKind == RpcKind.RPC_PROTOCOL_BUFFER) {
|
||||
return RPC_INVOKER;
|
||||
}
|
||||
return super.getServerRpcInvoker(rpcKind);
|
||||
}
|
||||
|
||||
/**
|
||||
* Protobuf invoker for {@link RpcInvoker}
|
||||
*/
|
||||
|
|
|
@ -0,0 +1,598 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.ipc;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.hadoop.thirdparty.protobuf.*;
|
||||
import org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.retry.RetryPolicy;
|
||||
import org.apache.hadoop.ipc.Client.ConnectionId;
|
||||
import org.apache.hadoop.ipc.RPC.RpcInvoker;
|
||||
import org.apache.hadoop.ipc.protobuf.ProtobufRpcEngine2Protos.RequestHeaderProto;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.token.SecretManager;
|
||||
import org.apache.hadoop.security.token.TokenIdentifier;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.apache.hadoop.util.concurrent.AsyncGet;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import javax.net.SocketFactory;
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.lang.reflect.Proxy;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
/**
|
||||
* RPC Engine for for protobuf based RPCs.
|
||||
*/
|
||||
@InterfaceStability.Evolving
|
||||
public class ProtobufRpcEngine2 implements RpcEngine {
|
||||
public static final Logger LOG =
|
||||
LoggerFactory.getLogger(ProtobufRpcEngine2.class);
|
||||
private static final ThreadLocal<AsyncGet<Message, Exception>>
|
||||
ASYNC_RETURN_MESSAGE = new ThreadLocal<>();
|
||||
|
||||
static { // Register the rpcRequest deserializer for ProtobufRpcEngine
|
||||
org.apache.hadoop.ipc.Server.registerProtocolEngine(
|
||||
RPC.RpcKind.RPC_PROTOCOL_BUFFER, RpcProtobufRequest.class,
|
||||
new Server.ProtoBufRpcInvoker());
|
||||
}
|
||||
|
||||
private static final ClientCache CLIENTS = new ClientCache();
|
||||
|
||||
@Unstable
|
||||
public static AsyncGet<Message, Exception> getAsyncReturnMessage() {
|
||||
return ASYNC_RETURN_MESSAGE.get();
|
||||
}
|
||||
|
||||
public <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion,
|
||||
InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
|
||||
SocketFactory factory, int rpcTimeout) throws IOException {
|
||||
return getProxy(protocol, clientVersion, addr, ticket, conf, factory,
|
||||
rpcTimeout, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> ProtocolProxy<T> getProxy(
|
||||
Class<T> protocol, long clientVersion,
|
||||
InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
|
||||
SocketFactory factory, int rpcTimeout, RetryPolicy connectionRetryPolicy)
|
||||
throws IOException {
|
||||
return getProxy(protocol, clientVersion, addr, ticket, conf, factory,
|
||||
rpcTimeout, connectionRetryPolicy, null, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion,
|
||||
InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
|
||||
SocketFactory factory, int rpcTimeout, RetryPolicy connectionRetryPolicy,
|
||||
AtomicBoolean fallbackToSimpleAuth, AlignmentContext alignmentContext)
|
||||
throws IOException {
|
||||
|
||||
final Invoker invoker = new Invoker(protocol, addr, ticket, conf, factory,
|
||||
rpcTimeout, connectionRetryPolicy, fallbackToSimpleAuth,
|
||||
alignmentContext);
|
||||
return new ProtocolProxy<T>(protocol, (T) Proxy.newProxyInstance(
|
||||
protocol.getClassLoader(), new Class[]{protocol}, invoker), false);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ProtocolProxy<ProtocolMetaInfoPB> getProtocolMetaInfoProxy(
|
||||
ConnectionId connId, Configuration conf, SocketFactory factory)
|
||||
throws IOException {
|
||||
Class<ProtocolMetaInfoPB> protocol = ProtocolMetaInfoPB.class;
|
||||
return new ProtocolProxy<ProtocolMetaInfoPB>(protocol,
|
||||
(ProtocolMetaInfoPB) Proxy.newProxyInstance(protocol.getClassLoader(),
|
||||
new Class[]{protocol}, new Invoker(protocol, connId, conf,
|
||||
factory)), false);
|
||||
}
|
||||
|
||||
private static final class Invoker implements RpcInvocationHandler {
|
||||
private final Map<String, Message> returnTypes =
|
||||
new ConcurrentHashMap<String, Message>();
|
||||
private boolean isClosed = false;
|
||||
private final Client.ConnectionId remoteId;
|
||||
private final Client client;
|
||||
private final long clientProtocolVersion;
|
||||
private final String protocolName;
|
||||
private AtomicBoolean fallbackToSimpleAuth;
|
||||
private AlignmentContext alignmentContext;
|
||||
|
||||
private Invoker(Class<?> protocol, InetSocketAddress addr,
|
||||
UserGroupInformation ticket, Configuration conf, SocketFactory factory,
|
||||
int rpcTimeout, RetryPolicy connectionRetryPolicy,
|
||||
AtomicBoolean fallbackToSimpleAuth, AlignmentContext alignmentContext)
|
||||
throws IOException {
|
||||
this(protocol, Client.ConnectionId.getConnectionId(
|
||||
addr, protocol, ticket, rpcTimeout, connectionRetryPolicy, conf),
|
||||
conf, factory);
|
||||
this.fallbackToSimpleAuth = fallbackToSimpleAuth;
|
||||
this.alignmentContext = alignmentContext;
|
||||
}
|
||||
|
||||
/**
|
||||
* This constructor takes a connectionId, instead of creating a new one.
|
||||
*/
|
||||
private Invoker(Class<?> protocol, Client.ConnectionId connId,
|
||||
Configuration conf, SocketFactory factory) {
|
||||
this.remoteId = connId;
|
||||
this.client = CLIENTS.getClient(conf, factory, RpcWritable.Buffer.class);
|
||||
this.protocolName = RPC.getProtocolName(protocol);
|
||||
this.clientProtocolVersion = RPC
|
||||
.getProtocolVersion(protocol);
|
||||
}
|
||||
|
||||
private RequestHeaderProto constructRpcRequestHeader(Method method) {
|
||||
RequestHeaderProto.Builder builder = RequestHeaderProto
|
||||
.newBuilder();
|
||||
builder.setMethodName(method.getName());
|
||||
|
||||
|
||||
// For protobuf, {@code protocol} used when creating client side proxy is
|
||||
// the interface extending BlockingInterface, which has the annotations
|
||||
// such as ProtocolName etc.
|
||||
//
|
||||
// Using Method.getDeclaringClass(), as in WritableEngine to get at
|
||||
// the protocol interface will return BlockingInterface, from where
|
||||
// the annotation ProtocolName and Version cannot be
|
||||
// obtained.
|
||||
//
|
||||
// Hence we simply use the protocol class used to create the proxy.
|
||||
// For PB this may limit the use of mixins on client side.
|
||||
builder.setDeclaringClassProtocolName(protocolName);
|
||||
builder.setClientProtocolVersion(clientProtocolVersion);
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* This is the client side invoker of RPC method. It only throws
|
||||
* ServiceException, since the invocation proxy expects only
|
||||
* ServiceException to be thrown by the method in case protobuf service.
|
||||
*
|
||||
* ServiceException has the following causes:
|
||||
* <ol>
|
||||
* <li>Exceptions encountered on the client side in this method are
|
||||
* set as cause in ServiceException as is.</li>
|
||||
* <li>Exceptions from the server are wrapped in RemoteException and are
|
||||
* set as cause in ServiceException</li>
|
||||
* </ol>
|
||||
*
|
||||
* Note that the client calling protobuf RPC methods, must handle
|
||||
* ServiceException by getting the cause from the ServiceException. If the
|
||||
* cause is RemoteException, then unwrap it to get the exception thrown by
|
||||
* the server.
|
||||
*/
|
||||
@Override
|
||||
public Message invoke(Object proxy, final Method method, Object[] args)
|
||||
throws ServiceException {
|
||||
long startTime = 0;
|
||||
if (LOG.isDebugEnabled()) {
|
||||
startTime = Time.now();
|
||||
}
|
||||
|
||||
if (args.length != 2) { // RpcController + Message
|
||||
throw new ServiceException(
|
||||
"Too many or few parameters for request. Method: ["
|
||||
+ method.getName() + "]" + ", Expected: 2, Actual: "
|
||||
+ args.length);
|
||||
}
|
||||
if (args[1] == null) {
|
||||
throw new ServiceException("null param while calling Method: ["
|
||||
+ method.getName() + "]");
|
||||
}
|
||||
|
||||
// if Tracing is on then start a new span for this rpc.
|
||||
// guard it in the if statement to make sure there isn't
|
||||
// any extra string manipulation.
|
||||
Tracer tracer = Tracer.curThreadTracer();
|
||||
TraceScope traceScope = null;
|
||||
if (tracer != null) {
|
||||
traceScope = tracer.newScope(RpcClientUtil.methodToTraceString(method));
|
||||
}
|
||||
|
||||
RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method);
|
||||
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace(Thread.currentThread().getId() + ": Call -> " +
|
||||
remoteId + ": " + method.getName() +
|
||||
" {" + TextFormat.shortDebugString((Message) args[1]) + "}");
|
||||
}
|
||||
|
||||
|
||||
final Message theRequest = (Message) args[1];
|
||||
final RpcWritable.Buffer val;
|
||||
try {
|
||||
val = (RpcWritable.Buffer) client.call(RPC.RpcKind.RPC_PROTOCOL_BUFFER,
|
||||
new RpcProtobufRequest(rpcRequestHeader, theRequest), remoteId,
|
||||
fallbackToSimpleAuth, alignmentContext);
|
||||
|
||||
} catch (Throwable e) {
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace(Thread.currentThread().getId() + ": Exception <- " +
|
||||
remoteId + ": " + method.getName() +
|
||||
" {" + e + "}");
|
||||
}
|
||||
if (traceScope != null) {
|
||||
traceScope.addTimelineAnnotation("Call got exception: " +
|
||||
e.toString());
|
||||
}
|
||||
throw new ServiceException(e);
|
||||
} finally {
|
||||
if (traceScope != null) {
|
||||
traceScope.close();
|
||||
}
|
||||
}
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
long callTime = Time.now() - startTime;
|
||||
LOG.debug("Call: " + method.getName() + " took " + callTime + "ms");
|
||||
}
|
||||
|
||||
if (Client.isAsynchronousMode()) {
|
||||
final AsyncGet<RpcWritable.Buffer, IOException> arr
|
||||
= Client.getAsyncRpcResponse();
|
||||
final AsyncGet<Message, Exception> asyncGet =
|
||||
new AsyncGet<Message, Exception>() {
|
||||
@Override
|
||||
public Message get(long timeout, TimeUnit unit) throws Exception {
|
||||
return getReturnMessage(method, arr.get(timeout, unit));
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isDone() {
|
||||
return arr.isDone();
|
||||
}
|
||||
};
|
||||
ASYNC_RETURN_MESSAGE.set(asyncGet);
|
||||
return null;
|
||||
} else {
|
||||
return getReturnMessage(method, val);
|
||||
}
|
||||
}
|
||||
|
||||
private Message getReturnMessage(final Method method,
|
||||
final RpcWritable.Buffer buf) throws ServiceException {
|
||||
Message prototype = null;
|
||||
try {
|
||||
prototype = getReturnProtoType(method);
|
||||
} catch (Exception e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
Message returnMessage;
|
||||
try {
|
||||
returnMessage = buf.getValue(prototype.getDefaultInstanceForType());
|
||||
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace(Thread.currentThread().getId() + ": Response <- " +
|
||||
remoteId + ": " + method.getName() +
|
||||
" {" + TextFormat.shortDebugString(returnMessage) + "}");
|
||||
}
|
||||
|
||||
} catch (Throwable e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
return returnMessage;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
if (!isClosed) {
|
||||
isClosed = true;
|
||||
CLIENTS.stopClient(client);
|
||||
}
|
||||
}
|
||||
|
||||
private Message getReturnProtoType(Method method) throws Exception {
|
||||
if (returnTypes.containsKey(method.getName())) {
|
||||
return returnTypes.get(method.getName());
|
||||
}
|
||||
|
||||
Class<?> returnType = method.getReturnType();
|
||||
Method newInstMethod = returnType.getMethod("getDefaultInstance");
|
||||
newInstMethod.setAccessible(true);
|
||||
Message prototype = (Message) newInstMethod.invoke(null, (Object[]) null);
|
||||
returnTypes.put(method.getName(), prototype);
|
||||
return prototype;
|
||||
}
|
||||
|
||||
@Override //RpcInvocationHandler
|
||||
public ConnectionId getConnectionId() {
|
||||
return remoteId;
|
||||
}
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
static Client getClient(Configuration conf) {
|
||||
return CLIENTS.getClient(conf, SocketFactory.getDefault(),
|
||||
RpcWritable.Buffer.class);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@Override
|
||||
public RPC.Server getServer(Class<?> protocol, Object protocolImpl,
|
||||
String bindAddress, int port, int numHandlers, int numReaders,
|
||||
int queueSizePerHandler, boolean verbose, Configuration conf,
|
||||
SecretManager<? extends TokenIdentifier> secretManager,
|
||||
String portRangeConfig, AlignmentContext alignmentContext)
|
||||
throws IOException {
|
||||
return new Server(protocol, protocolImpl, conf, bindAddress, port,
|
||||
numHandlers, numReaders, queueSizePerHandler, verbose, secretManager,
|
||||
portRangeConfig, alignmentContext);
|
||||
}
|
||||
|
||||
public static class Server extends RPC.Server {
|
||||
|
||||
static final ThreadLocal<ProtobufRpcEngineCallback2> CURRENT_CALLBACK =
|
||||
new ThreadLocal<>();
|
||||
|
||||
static final ThreadLocal<CallInfo> CURRENT_CALL_INFO = new ThreadLocal<>();
|
||||
|
||||
static class CallInfo {
|
||||
private final RPC.Server server;
|
||||
private final String methodName;
|
||||
|
||||
CallInfo(RPC.Server server, String methodName) {
|
||||
this.server = server;
|
||||
this.methodName = methodName;
|
||||
}
|
||||
}
|
||||
|
||||
static class ProtobufRpcEngineCallbackImpl
|
||||
implements ProtobufRpcEngineCallback2 {
|
||||
|
||||
private final RPC.Server server;
|
||||
private final Call call;
|
||||
private final String methodName;
|
||||
private final long setupTime;
|
||||
|
||||
ProtobufRpcEngineCallbackImpl() {
|
||||
this.server = CURRENT_CALL_INFO.get().server;
|
||||
this.call = Server.getCurCall().get();
|
||||
this.methodName = CURRENT_CALL_INFO.get().methodName;
|
||||
this.setupTime = Time.now();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setResponse(Message message) {
|
||||
long processingTime = Time.now() - setupTime;
|
||||
call.setDeferredResponse(RpcWritable.wrap(message));
|
||||
server.updateDeferredMetrics(methodName, processingTime);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void error(Throwable t) {
|
||||
long processingTime = Time.now() - setupTime;
|
||||
String detailedMetricsName = t.getClass().getSimpleName();
|
||||
server.updateDeferredMetrics(detailedMetricsName, processingTime);
|
||||
call.setDeferredError(t);
|
||||
}
|
||||
}
|
||||
|
||||
@InterfaceStability.Unstable
|
||||
public static ProtobufRpcEngineCallback2 registerForDeferredResponse() {
|
||||
ProtobufRpcEngineCallback2 callback = new ProtobufRpcEngineCallbackImpl();
|
||||
CURRENT_CALLBACK.set(callback);
|
||||
return callback;
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct an RPC server.
|
||||
*
|
||||
* @param protocolClass the class of protocol
|
||||
* @param protocolImpl the protocolImpl whose methods will be called
|
||||
* @param conf the configuration to use
|
||||
* @param bindAddress the address to bind on to listen for connection
|
||||
* @param port the port to listen for connections on
|
||||
* @param numHandlers the number of method handler threads to run
|
||||
* @param verbose whether each call should be logged
|
||||
* @param portRangeConfig A config parameter that can be used to restrict
|
||||
* the range of ports used when port is 0 (an ephemeral port)
|
||||
* @param alignmentContext provides server state info on client responses
|
||||
*/
|
||||
public Server(Class<?> protocolClass, Object protocolImpl,
|
||||
Configuration conf, String bindAddress, int port, int numHandlers,
|
||||
int numReaders, int queueSizePerHandler, boolean verbose,
|
||||
SecretManager<? extends TokenIdentifier> secretManager,
|
||||
String portRangeConfig, AlignmentContext alignmentContext)
|
||||
throws IOException {
|
||||
super(bindAddress, port, null, numHandlers,
|
||||
numReaders, queueSizePerHandler, conf,
|
||||
serverNameFromClass(protocolImpl.getClass()), secretManager,
|
||||
portRangeConfig);
|
||||
setAlignmentContext(alignmentContext);
|
||||
this.verbose = verbose;
|
||||
registerProtocolAndImpl(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocolClass,
|
||||
protocolImpl);
|
||||
}
|
||||
|
||||
/**
|
||||
* Protobuf invoker for {@link RpcInvoker}.
|
||||
*/
|
||||
static class ProtoBufRpcInvoker implements RpcInvoker {
|
||||
private static ProtoClassProtoImpl getProtocolImpl(RPC.Server server,
|
||||
String protoName, long clientVersion) throws RpcServerException {
|
||||
ProtoNameVer pv = new ProtoNameVer(protoName, clientVersion);
|
||||
ProtoClassProtoImpl impl =
|
||||
server.getProtocolImplMap(RPC.RpcKind.RPC_PROTOCOL_BUFFER).get(pv);
|
||||
if (impl == null) { // no match for Protocol AND Version
|
||||
VerProtocolImpl highest = server.getHighestSupportedProtocol(
|
||||
RPC.RpcKind.RPC_PROTOCOL_BUFFER, protoName);
|
||||
if (highest == null) {
|
||||
throw new RpcNoSuchProtocolException(
|
||||
"Unknown protocol: " + protoName);
|
||||
}
|
||||
// protocol supported but not the version that client wants
|
||||
throw new RPC.VersionMismatch(protoName, clientVersion,
|
||||
highest.version);
|
||||
}
|
||||
return impl;
|
||||
}
|
||||
|
||||
@Override
|
||||
/**
|
||||
* This is a server side method, which is invoked over RPC. On success
|
||||
* the return response has protobuf response payload. On failure, the
|
||||
* exception name and the stack trace are returned in the response.
|
||||
* See {@link HadoopRpcResponseProto}
|
||||
*
|
||||
* In this method there three types of exceptions possible and they are
|
||||
* returned in response as follows.
|
||||
* <ol>
|
||||
* <li> Exceptions encountered in this method that are returned
|
||||
* as {@link RpcServerException} </li>
|
||||
* <li> Exceptions thrown by the service is wrapped in ServiceException.
|
||||
* In that this method returns in response the exception thrown by the
|
||||
* service.</li>
|
||||
* <li> Other exceptions thrown by the service. They are returned as
|
||||
* it is.</li>
|
||||
* </ol>
|
||||
*/
|
||||
public Writable call(RPC.Server server, String connectionProtocolName,
|
||||
Writable writableRequest, long receiveTime) throws Exception {
|
||||
RpcProtobufRequest request = (RpcProtobufRequest) writableRequest;
|
||||
RequestHeaderProto rpcRequest = request.getRequestHeader();
|
||||
String methodName = rpcRequest.getMethodName();
|
||||
|
||||
/**
|
||||
* RPCs for a particular interface (ie protocol) are done using a
|
||||
* IPC connection that is setup using rpcProxy.
|
||||
* The rpcProxy's has a declared protocol name that is
|
||||
* sent form client to server at connection time.
|
||||
*
|
||||
* Each Rpc call also sends a protocol name
|
||||
* (called declaringClassprotocolName). This name is usually the same
|
||||
* as the connection protocol name except in some cases.
|
||||
* For example metaProtocols such ProtocolInfoProto which get info
|
||||
* about the protocol reuse the connection but need to indicate that
|
||||
* the actual protocol is different (i.e. the protocol is
|
||||
* ProtocolInfoProto) since they reuse the connection; in this case
|
||||
* the declaringClassProtocolName field is set to the ProtocolInfoProto.
|
||||
*/
|
||||
|
||||
String declaringClassProtoName =
|
||||
rpcRequest.getDeclaringClassProtocolName();
|
||||
long clientVersion = rpcRequest.getClientProtocolVersion();
|
||||
if (server.verbose) {
|
||||
LOG.info("Call: connectionProtocolName=" + connectionProtocolName +
|
||||
", method=" + methodName);
|
||||
}
|
||||
|
||||
ProtoClassProtoImpl protocolImpl = getProtocolImpl(server,
|
||||
declaringClassProtoName, clientVersion);
|
||||
BlockingService service = (BlockingService) protocolImpl.protocolImpl;
|
||||
MethodDescriptor methodDescriptor = service.getDescriptorForType()
|
||||
.findMethodByName(methodName);
|
||||
if (methodDescriptor == null) {
|
||||
String msg = "Unknown method " + methodName + " called on "
|
||||
+ connectionProtocolName + " protocol.";
|
||||
LOG.warn(msg);
|
||||
throw new RpcNoSuchMethodException(msg);
|
||||
}
|
||||
Message prototype = service.getRequestPrototype(methodDescriptor);
|
||||
Message param = request.getValue(prototype);
|
||||
|
||||
Message result;
|
||||
Call currentCall = Server.getCurCall().get();
|
||||
try {
|
||||
server.rpcDetailedMetrics.init(protocolImpl.protocolClass);
|
||||
CURRENT_CALL_INFO.set(new CallInfo(server, methodName));
|
||||
currentCall.setDetailedMetricsName(methodName);
|
||||
result = service.callBlockingMethod(methodDescriptor, null, param);
|
||||
// Check if this needs to be a deferred response,
|
||||
// by checking the ThreadLocal callback being set
|
||||
if (CURRENT_CALLBACK.get() != null) {
|
||||
currentCall.deferResponse();
|
||||
CURRENT_CALLBACK.set(null);
|
||||
return null;
|
||||
}
|
||||
} catch (ServiceException e) {
|
||||
Exception exception = (Exception) e.getCause();
|
||||
currentCall.setDetailedMetricsName(
|
||||
exception.getClass().getSimpleName());
|
||||
throw (Exception) e.getCause();
|
||||
} catch (Exception e) {
|
||||
currentCall.setDetailedMetricsName(e.getClass().getSimpleName());
|
||||
throw e;
|
||||
} finally {
|
||||
CURRENT_CALL_INFO.set(null);
|
||||
}
|
||||
return RpcWritable.wrap(result);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// htrace in the ipc layer creates the span name based on toString()
|
||||
// which uses the rpc header. in the normal case we want to defer decoding
|
||||
// the rpc header until needed by the rpc engine.
|
||||
static class RpcProtobufRequest extends RpcWritable.Buffer {
|
||||
private volatile RequestHeaderProto requestHeader;
|
||||
private Message payload;
|
||||
|
||||
RpcProtobufRequest() {
|
||||
}
|
||||
|
||||
RpcProtobufRequest(RequestHeaderProto header, Message payload) {
|
||||
this.requestHeader = header;
|
||||
this.payload = payload;
|
||||
}
|
||||
|
||||
RequestHeaderProto getRequestHeader() throws IOException {
|
||||
if (getByteBuffer() != null && requestHeader == null) {
|
||||
requestHeader = getValue(RequestHeaderProto.getDefaultInstance());
|
||||
}
|
||||
return requestHeader;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(ResponseBuffer out) throws IOException {
|
||||
requestHeader.writeDelimitedTo(out);
|
||||
if (payload != null) {
|
||||
payload.writeDelimitedTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
// this is used by htrace to name the span.
|
||||
@Override
|
||||
public String toString() {
|
||||
try {
|
||||
RequestHeaderProto header = getRequestHeader();
|
||||
return header.getDeclaringClassProtocolName() + "." +
|
||||
header.getMethodName();
|
||||
} catch (IOException e) {
|
||||
throw new IllegalArgumentException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -18,12 +18,17 @@
|
|||
|
||||
package org.apache.hadoop.ipc;
|
||||
|
||||
import org.apache.hadoop.thirdparty.protobuf.Message;
|
||||
import com.google.protobuf.Message;
|
||||
|
||||
/**
|
||||
* This engine uses Protobuf 2.5.0. Recommended to upgrade to Protobuf 3.x
|
||||
* from hadoop-thirdparty and use ProtobufRpcEngineCallback2.
|
||||
*/
|
||||
@Deprecated
|
||||
public interface ProtobufRpcEngineCallback {
|
||||
|
||||
public void setResponse(Message message);
|
||||
void setResponse(Message message);
|
||||
|
||||
public void error(Throwable t);
|
||||
void error(Throwable t);
|
||||
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.ipc;
|
||||
|
||||
import org.apache.hadoop.thirdparty.protobuf.Message;
|
||||
|
||||
public interface ProtobufRpcEngineCallback2 {
|
||||
|
||||
public void setResponse(Message message);
|
||||
|
||||
public void error(Throwable t);
|
||||
|
||||
}
|
|
@ -1043,7 +1043,7 @@ public class RPC {
|
|||
|
||||
private void initProtocolMetaInfo(Configuration conf) {
|
||||
RPC.setProtocolEngine(conf, ProtocolMetaInfoPB.class,
|
||||
ProtobufRpcEngine.class);
|
||||
ProtobufRpcEngine2.class);
|
||||
ProtocolMetaInfoServerSideTranslatorPB xlator =
|
||||
new ProtocolMetaInfoServerSideTranslatorPB(this);
|
||||
BlockingService protocolInfoBlockingService = ProtocolInfoService
|
||||
|
@ -1067,7 +1067,7 @@ public class RPC {
|
|||
@Override
|
||||
public Writable call(RPC.RpcKind rpcKind, String protocol,
|
||||
Writable rpcRequest, long receiveTime) throws Exception {
|
||||
return getRpcInvoker(rpcKind).call(this, protocol, rpcRequest,
|
||||
return getServerRpcInvoker(rpcKind).call(this, protocol, rpcRequest,
|
||||
receiveTime);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -114,7 +114,7 @@ public class RpcClientUtil {
|
|||
if (versionMap == null) {
|
||||
Configuration conf = new Configuration();
|
||||
RPC.setProtocolEngine(conf, ProtocolMetaInfoPB.class,
|
||||
ProtobufRpcEngine.class);
|
||||
ProtobufRpcEngine2.class);
|
||||
ProtocolMetaInfoPB protocolInfoProxy = getProtocolMetaInfoProxy(rpcProxy,
|
||||
conf);
|
||||
GetProtocolSignatureRequestProto.Builder builder =
|
||||
|
|
|
@ -42,6 +42,8 @@ public abstract class RpcWritable implements Writable {
|
|||
return (RpcWritable)o;
|
||||
} else if (o instanceof Message) {
|
||||
return new ProtobufWrapper((Message)o);
|
||||
} else if (o instanceof com.google.protobuf.Message) {
|
||||
return new ProtobufWrapperLegacy((com.google.protobuf.Message) o);
|
||||
} else if (o instanceof Writable) {
|
||||
return new WritableWrapper((Writable)o);
|
||||
}
|
||||
|
@ -132,6 +134,49 @@ public abstract class RpcWritable implements Writable {
|
|||
}
|
||||
}
|
||||
|
||||
// adapter for Protobufs.
|
||||
static class ProtobufWrapperLegacy extends RpcWritable {
|
||||
private com.google.protobuf.Message message;
|
||||
|
||||
ProtobufWrapperLegacy(com.google.protobuf.Message message) {
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
com.google.protobuf.Message getMessage() {
|
||||
return message;
|
||||
}
|
||||
|
||||
@Override
|
||||
void writeTo(ResponseBuffer out) throws IOException {
|
||||
int length = message.getSerializedSize();
|
||||
length += com.google.protobuf.CodedOutputStream.
|
||||
computeUInt32SizeNoTag(length);
|
||||
out.ensureCapacity(length);
|
||||
message.writeDelimitedTo(out);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
<T> T readFrom(ByteBuffer bb) throws IOException {
|
||||
// using the parser with a byte[]-backed coded input stream is the
|
||||
// most efficient way to deserialize a protobuf. it has a direct
|
||||
// path to the PB ctor that doesn't create multi-layered streams
|
||||
// that internally buffer.
|
||||
com.google.protobuf.CodedInputStream cis =
|
||||
com.google.protobuf.CodedInputStream.newInstance(
|
||||
bb.array(), bb.position() + bb.arrayOffset(), bb.remaining());
|
||||
try {
|
||||
cis.pushLimit(cis.readRawVarint32());
|
||||
message = message.getParserForType().parseFrom(cis);
|
||||
cis.checkLastTagWas(0);
|
||||
} finally {
|
||||
// advance over the bytes read.
|
||||
bb.position(bb.position() + cis.getTotalBytesRead());
|
||||
}
|
||||
return (T)message;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* adapter to allow decoding of writables and protobufs from a byte buffer.
|
||||
*/
|
||||
|
|
|
@ -305,6 +305,10 @@ public abstract class Server {
|
|||
return (val == null) ? null : val.rpcRequestWrapperClass;
|
||||
}
|
||||
|
||||
protected RpcInvoker getServerRpcInvoker(RPC.RpcKind rpcKind) {
|
||||
return getRpcInvoker(rpcKind);
|
||||
}
|
||||
|
||||
public static RpcInvoker getRpcInvoker(RPC.RpcKind rpcKind) {
|
||||
RpcKindMapValue val = rpcKindMap.get(rpcKind);
|
||||
return (val == null) ? null : val.rpcInvoker;
|
||||
|
@ -2688,15 +2692,15 @@ public abstract class Server {
|
|||
call.setPriorityLevel(callQueue.getPriorityLevel(call));
|
||||
call.markCallCoordinated(false);
|
||||
if(alignmentContext != null && call.rpcRequest != null &&
|
||||
(call.rpcRequest instanceof ProtobufRpcEngine.RpcProtobufRequest)) {
|
||||
(call.rpcRequest instanceof ProtobufRpcEngine2.RpcProtobufRequest)) {
|
||||
// if call.rpcRequest is not RpcProtobufRequest, will skip the following
|
||||
// step and treat the call as uncoordinated. As currently only certain
|
||||
// ClientProtocol methods request made through RPC protobuf needs to be
|
||||
// coordinated.
|
||||
String methodName;
|
||||
String protoName;
|
||||
ProtobufRpcEngine.RpcProtobufRequest req =
|
||||
(ProtobufRpcEngine.RpcProtobufRequest) call.rpcRequest;
|
||||
ProtobufRpcEngine2.RpcProtobufRequest req =
|
||||
(ProtobufRpcEngine2.RpcProtobufRequest) call.rpcRequest;
|
||||
try {
|
||||
methodName = req.getRequestHeader().getMethodName();
|
||||
protoName = req.getRequestHeader().getDeclaringClassProtocolName();
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.apache.hadoop.security.ssl;
|
|||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.Socket;
|
||||
import java.net.SocketException;
|
||||
import java.security.KeyManagementException;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -31,11 +30,9 @@ import javax.net.ssl.SSLContext;
|
|||
import javax.net.ssl.SSLSocket;
|
||||
import javax.net.ssl.SSLSocketFactory;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.wildfly.openssl.OpenSSLProvider;
|
||||
import org.wildfly.openssl.SSL;
|
||||
|
||||
|
||||
/**
|
||||
* A {@link SSLSocketFactory} that can delegate to various SSL implementations.
|
||||
|
@ -60,8 +57,8 @@ import org.wildfly.openssl.SSL;
|
|||
* </p>
|
||||
*
|
||||
* In order to load OpenSSL, applications must ensure the wildfly-openssl
|
||||
* artifact is on the classpath. Currently, only ABFS and S3A provide
|
||||
* wildfly-openssl as a runtime dependency.
|
||||
* artifact is on the classpath. Currently, only ABFS declares
|
||||
* wildfly-openssl as an explicit dependency.
|
||||
*/
|
||||
public final class DelegatingSSLSocketFactory extends SSLSocketFactory {
|
||||
|
||||
|
@ -110,7 +107,16 @@ public final class DelegatingSSLSocketFactory extends SSLSocketFactory {
|
|||
}
|
||||
|
||||
/**
|
||||
* Singletone instance of the SSLSocketFactory.
|
||||
* For testing only: reset the socket factory.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
public static synchronized void resetDefaultFactory() {
|
||||
LOG.info("Resetting default SSL Socket Factory");
|
||||
instance = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Singleton instance of the SSLSocketFactory.
|
||||
*
|
||||
* SSLSocketFactory must be initialized with appropriate SSLChannelMode
|
||||
* using initializeDefaultFactory method.
|
||||
|
@ -126,9 +132,7 @@ public final class DelegatingSSLSocketFactory extends SSLSocketFactory {
|
|||
throws IOException {
|
||||
try {
|
||||
initializeSSLContext(preferredChannelMode);
|
||||
} catch (NoSuchAlgorithmException e) {
|
||||
throw new IOException(e);
|
||||
} catch (KeyManagementException e) {
|
||||
} catch (NoSuchAlgorithmException | KeyManagementException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
|
||||
|
@ -146,42 +150,23 @@ public final class DelegatingSSLSocketFactory extends SSLSocketFactory {
|
|||
}
|
||||
|
||||
private void initializeSSLContext(SSLChannelMode preferredChannelMode)
|
||||
throws NoSuchAlgorithmException, KeyManagementException {
|
||||
throws NoSuchAlgorithmException, KeyManagementException, IOException {
|
||||
LOG.debug("Initializing SSL Context to channel mode {}",
|
||||
preferredChannelMode);
|
||||
switch (preferredChannelMode) {
|
||||
case Default:
|
||||
if (!openSSLProviderRegistered) {
|
||||
OpenSSLProvider.register();
|
||||
openSSLProviderRegistered = true;
|
||||
}
|
||||
try {
|
||||
java.util.logging.Logger logger = java.util.logging.Logger.getLogger(
|
||||
SSL.class.getName());
|
||||
logger.setLevel(Level.WARNING);
|
||||
ctx = SSLContext.getInstance("openssl.TLS");
|
||||
ctx.init(null, null, null);
|
||||
// Strong reference needs to be kept to logger until initialization of
|
||||
// SSLContext finished (see HADOOP-16174):
|
||||
logger.setLevel(Level.INFO);
|
||||
bindToOpenSSLProvider();
|
||||
channelMode = SSLChannelMode.OpenSSL;
|
||||
} catch (NoSuchAlgorithmException e) {
|
||||
LOG.debug("Failed to load OpenSSL. Falling back to the JSSE default.");
|
||||
} catch (LinkageError | NoSuchAlgorithmException | RuntimeException e) {
|
||||
LOG.debug("Failed to load OpenSSL. Falling back to the JSSE default.",
|
||||
e);
|
||||
ctx = SSLContext.getDefault();
|
||||
channelMode = SSLChannelMode.Default_JSSE;
|
||||
}
|
||||
break;
|
||||
case OpenSSL:
|
||||
if (!openSSLProviderRegistered) {
|
||||
OpenSSLProvider.register();
|
||||
openSSLProviderRegistered = true;
|
||||
}
|
||||
java.util.logging.Logger logger = java.util.logging.Logger.getLogger(
|
||||
SSL.class.getName());
|
||||
logger.setLevel(Level.WARNING);
|
||||
ctx = SSLContext.getInstance("openssl.TLS");
|
||||
ctx.init(null, null, null);
|
||||
// Strong reference needs to be kept to logger until initialization of
|
||||
// SSLContext finished (see HADOOP-16174):
|
||||
logger.setLevel(Level.INFO);
|
||||
bindToOpenSSLProvider();
|
||||
channelMode = SSLChannelMode.OpenSSL;
|
||||
break;
|
||||
case Default_JSSE:
|
||||
|
@ -193,11 +178,38 @@ public final class DelegatingSSLSocketFactory extends SSLSocketFactory {
|
|||
channelMode = SSLChannelMode.Default_JSSE_with_GCM;
|
||||
break;
|
||||
default:
|
||||
throw new NoSuchAlgorithmException("Unknown channel mode: "
|
||||
throw new IOException("Unknown channel mode: "
|
||||
+ preferredChannelMode);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Bind to the OpenSSL provider via wildfly.
|
||||
* This MUST be the only place where wildfly classes are referenced,
|
||||
* so ensuring that any linkage problems only surface here where they may
|
||||
* be caught by the initialization code.
|
||||
*/
|
||||
private void bindToOpenSSLProvider()
|
||||
throws NoSuchAlgorithmException, KeyManagementException {
|
||||
if (!openSSLProviderRegistered) {
|
||||
LOG.debug("Attempting to register OpenSSL provider");
|
||||
org.wildfly.openssl.OpenSSLProvider.register();
|
||||
openSSLProviderRegistered = true;
|
||||
}
|
||||
// Strong reference needs to be kept to logger until initialization of
|
||||
// SSLContext finished (see HADOOP-16174):
|
||||
java.util.logging.Logger logger = java.util.logging.Logger.getLogger(
|
||||
"org.wildfly.openssl.SSL");
|
||||
Level originalLevel = logger.getLevel();
|
||||
try {
|
||||
logger.setLevel(Level.WARNING);
|
||||
ctx = SSLContext.getInstance("openssl.TLS");
|
||||
ctx.init(null, null, null);
|
||||
} finally {
|
||||
logger.setLevel(originalLevel);
|
||||
}
|
||||
}
|
||||
|
||||
public String getProviderName() {
|
||||
return providerName;
|
||||
}
|
||||
|
@ -212,21 +224,26 @@ public final class DelegatingSSLSocketFactory extends SSLSocketFactory {
|
|||
return ciphers.clone();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the channel mode of this instance.
|
||||
* @return a channel mode.
|
||||
*/
|
||||
public SSLChannelMode getChannelMode() {
|
||||
return channelMode;
|
||||
}
|
||||
|
||||
public Socket createSocket() throws IOException {
|
||||
SSLSocketFactory factory = ctx.getSocketFactory();
|
||||
SSLSocket ss = (SSLSocket) factory.createSocket();
|
||||
configureSocket(ss);
|
||||
return ss;
|
||||
return configureSocket(factory.createSocket());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Socket createSocket(Socket s, String host, int port,
|
||||
boolean autoClose) throws IOException {
|
||||
SSLSocketFactory factory = ctx.getSocketFactory();
|
||||
SSLSocket ss = (SSLSocket) factory.createSocket(s, host, port, autoClose);
|
||||
|
||||
configureSocket(ss);
|
||||
return ss;
|
||||
return configureSocket(
|
||||
factory.createSocket(s, host, port, autoClose));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -234,52 +251,41 @@ public final class DelegatingSSLSocketFactory extends SSLSocketFactory {
|
|||
InetAddress localAddress, int localPort)
|
||||
throws IOException {
|
||||
SSLSocketFactory factory = ctx.getSocketFactory();
|
||||
SSLSocket ss = (SSLSocket) factory
|
||||
.createSocket(address, port, localAddress, localPort);
|
||||
|
||||
configureSocket(ss);
|
||||
return ss;
|
||||
return configureSocket(factory
|
||||
.createSocket(address, port, localAddress, localPort));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Socket createSocket(String host, int port, InetAddress localHost,
|
||||
int localPort) throws IOException {
|
||||
SSLSocketFactory factory = ctx.getSocketFactory();
|
||||
SSLSocket ss = (SSLSocket) factory
|
||||
.createSocket(host, port, localHost, localPort);
|
||||
|
||||
configureSocket(ss);
|
||||
|
||||
return ss;
|
||||
return configureSocket(factory
|
||||
.createSocket(host, port, localHost, localPort));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Socket createSocket(InetAddress host, int port) throws IOException {
|
||||
SSLSocketFactory factory = ctx.getSocketFactory();
|
||||
SSLSocket ss = (SSLSocket) factory.createSocket(host, port);
|
||||
|
||||
configureSocket(ss);
|
||||
|
||||
return ss;
|
||||
return configureSocket(factory.createSocket(host, port));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Socket createSocket(String host, int port) throws IOException {
|
||||
SSLSocketFactory factory = ctx.getSocketFactory();
|
||||
SSLSocket ss = (SSLSocket) factory.createSocket(host, port);
|
||||
|
||||
configureSocket(ss);
|
||||
|
||||
return ss;
|
||||
return configureSocket(factory.createSocket(host, port));
|
||||
}
|
||||
|
||||
private void configureSocket(SSLSocket ss) throws SocketException {
|
||||
ss.setEnabledCipherSuites(ciphers);
|
||||
private Socket configureSocket(Socket socket) {
|
||||
((SSLSocket) socket).setEnabledCipherSuites(ciphers);
|
||||
return socket;
|
||||
}
|
||||
|
||||
private String[] alterCipherList(String[] defaultCiphers) {
|
||||
|
||||
ArrayList<String> preferredSuits = new ArrayList<>();
|
||||
ArrayList<String> preferredSuites = new ArrayList<>();
|
||||
|
||||
// Remove GCM mode based ciphers from the supported list.
|
||||
for (int i = 0; i < defaultCiphers.length; i++) {
|
||||
|
@ -287,11 +293,11 @@ public final class DelegatingSSLSocketFactory extends SSLSocketFactory {
|
|||
LOG.debug("Removed Cipher - {} from list of enabled SSLSocket ciphers",
|
||||
defaultCiphers[i]);
|
||||
} else {
|
||||
preferredSuits.add(defaultCiphers[i]);
|
||||
preferredSuites.add(defaultCiphers[i]);
|
||||
}
|
||||
}
|
||||
|
||||
ciphers = preferredSuits.toArray(new String[0]);
|
||||
ciphers = preferredSuites.toArray(new String[0]);
|
||||
return ciphers;
|
||||
}
|
||||
}
|
|
@ -28,7 +28,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine2;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
@ -179,7 +179,7 @@ public class TraceAdmin extends Configured implements Tool {
|
|||
servicePrincipal);
|
||||
}
|
||||
RPC.setProtocolEngine(getConf(), TraceAdminProtocolPB.class,
|
||||
ProtobufRpcEngine.class);
|
||||
ProtobufRpcEngine2.class);
|
||||
InetSocketAddress address = NetUtils.createSocketAddr(hostPort);
|
||||
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
|
||||
Class<?> xface = TraceAdminProtocolPB.class;
|
||||
|
|
|
@ -42,16 +42,18 @@
|
|||
#ifdef UNIX
|
||||
static void * (*dlsym_CRYPTO_malloc) (int, const char *, int);
|
||||
static void (*dlsym_CRYPTO_free) (void *);
|
||||
#if OPENSSL_VERSION_NUMBER < 0x10100000L
|
||||
static int (*dlsym_CRYPTO_num_locks) (void);
|
||||
static void (*dlsym_CRYPTO_set_locking_callback) (void (*)());
|
||||
static void (*dlsym_CRYPTO_set_id_callback) (unsigned long (*)());
|
||||
static void (*dlsym_ENGINE_load_rdrand) (void);
|
||||
static void (*dlsym_ENGINE_cleanup) (void);
|
||||
#endif
|
||||
static ENGINE * (*dlsym_ENGINE_by_id) (const char *);
|
||||
static int (*dlsym_ENGINE_init) (ENGINE *);
|
||||
static int (*dlsym_ENGINE_set_default) (ENGINE *, unsigned int);
|
||||
static int (*dlsym_ENGINE_finish) (ENGINE *);
|
||||
static int (*dlsym_ENGINE_free) (ENGINE *);
|
||||
static void (*dlsym_ENGINE_cleanup) (void);
|
||||
static int (*dlsym_RAND_bytes) (unsigned char *, int);
|
||||
static unsigned long (*dlsym_ERR_get_error) (void);
|
||||
#endif
|
||||
|
@ -113,6 +115,8 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_crypto_random_OpensslSecureRandom_
|
|||
dlerror(); // Clear any existing error
|
||||
LOAD_DYNAMIC_SYMBOL(dlsym_CRYPTO_malloc, env, openssl, "CRYPTO_malloc");
|
||||
LOAD_DYNAMIC_SYMBOL(dlsym_CRYPTO_free, env, openssl, "CRYPTO_free");
|
||||
#if OPENSSL_VERSION_NUMBER < 0x10100000L
|
||||
// pre-1.1.0
|
||||
LOAD_DYNAMIC_SYMBOL(dlsym_CRYPTO_num_locks, env, openssl, "CRYPTO_num_locks");
|
||||
LOAD_DYNAMIC_SYMBOL(dlsym_CRYPTO_set_locking_callback, \
|
||||
env, openssl, "CRYPTO_set_locking_callback");
|
||||
|
@ -120,13 +124,14 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_crypto_random_OpensslSecureRandom_
|
|||
openssl, "CRYPTO_set_id_callback");
|
||||
LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_load_rdrand, env, \
|
||||
openssl, "ENGINE_load_rdrand");
|
||||
LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_cleanup, env, openssl, "ENGINE_cleanup");
|
||||
#endif
|
||||
LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_by_id, env, openssl, "ENGINE_by_id");
|
||||
LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_init, env, openssl, "ENGINE_init");
|
||||
LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_set_default, env, \
|
||||
openssl, "ENGINE_set_default");
|
||||
LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_finish, env, openssl, "ENGINE_finish");
|
||||
LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_free, env, openssl, "ENGINE_free");
|
||||
LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_cleanup, env, openssl, "ENGINE_cleanup");
|
||||
LOAD_DYNAMIC_SYMBOL(dlsym_RAND_bytes, env, openssl, "RAND_bytes");
|
||||
LOAD_DYNAMIC_SYMBOL(dlsym_ERR_get_error, env, openssl, "ERR_get_error");
|
||||
#endif
|
||||
|
@ -303,9 +308,11 @@ static unsigned long pthreads_thread_id(void)
|
|||
*/
|
||||
static ENGINE * openssl_rand_init(void)
|
||||
{
|
||||
#if OPENSSL_VERSION_NUMBER < 0x10100000L
|
||||
locks_setup();
|
||||
|
||||
dlsym_ENGINE_load_rdrand();
|
||||
#endif
|
||||
ENGINE *eng = dlsym_ENGINE_by_id("rdrand");
|
||||
|
||||
int ret = -1;
|
||||
|
@ -340,11 +347,12 @@ static void openssl_rand_clean(ENGINE *eng, int clean_locks)
|
|||
dlsym_ENGINE_finish(eng);
|
||||
dlsym_ENGINE_free(eng);
|
||||
}
|
||||
|
||||
#if OPENSSL_VERSION_NUMBER < 0x10100000L
|
||||
dlsym_ENGINE_cleanup();
|
||||
if (clean_locks) {
|
||||
locks_cleanup();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static int openssl_rand_bytes(unsigned char *buf, int num)
|
||||
|
|
|
@ -0,0 +1,67 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* These .proto interfaces are private and stable.
|
||||
* Please see http://wiki.apache.org/hadoop/Compatibility
|
||||
* for what changes are allowed for a *stable* .proto interface.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
/**
|
||||
* These are the messages used by Hadoop RPC for the Rpc Engine Protocol Buffer
|
||||
* to marshal the request and response in the RPC layer.
|
||||
* The messages are sent in addition to the normal RPC header as
|
||||
* defined in RpcHeader.proto
|
||||
*/
|
||||
option java_package = "org.apache.hadoop.ipc.protobuf";
|
||||
option java_outer_classname = "ProtobufRpcEngine2Protos";
|
||||
option java_generate_equals_and_hash = true;
|
||||
package hadoop.common;
|
||||
|
||||
/**
|
||||
* This message is the header for the Protobuf Rpc Engine
|
||||
* when sending a RPC request from RPC client to the RPC server.
|
||||
* The actual request (serialized as protobuf) follows this request.
|
||||
*
|
||||
* No special header is needed for the Rpc Response for Protobuf Rpc Engine.
|
||||
* The normal RPC response header (see RpcHeader.proto) are sufficient.
|
||||
*/
|
||||
message RequestHeaderProto {
|
||||
/** Name of the RPC method */
|
||||
required string methodName = 1;
|
||||
|
||||
/**
|
||||
* RPCs for a particular interface (ie protocol) are done using a
|
||||
* IPC connection that is setup using rpcProxy.
|
||||
* The rpcProxy's has a declared protocol name that is
|
||||
* sent form client to server at connection time.
|
||||
*
|
||||
* Each Rpc call also sends a protocol name
|
||||
* (called declaringClassprotocolName). This name is usually the same
|
||||
* as the connection protocol name except in some cases.
|
||||
* For example metaProtocols such ProtocolInfoProto which get metainfo
|
||||
* about the protocol reuse the connection but need to indicate that
|
||||
* the actual protocol is different (i.e. the protocol is
|
||||
* ProtocolInfoProto) since they reuse the connection; in this case
|
||||
* the declaringClassProtocolName field is set to the ProtocolInfoProto
|
||||
*/
|
||||
required string declaringClassProtocolName = 2;
|
||||
|
||||
/** protocol version of class declaring the called method */
|
||||
required uint64 clientProtocolVersion = 3;
|
||||
}
|
|
@ -28,7 +28,7 @@ import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
|
|||
import org.apache.hadoop.ha.protocolPB.HAServiceProtocolPB;
|
||||
import org.apache.hadoop.ha.protocolPB.HAServiceProtocolServerSideTranslatorPB;
|
||||
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceProtocolService;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine2;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.Server;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
|
@ -119,7 +119,7 @@ class DummyHAService extends HAServiceTarget {
|
|||
|
||||
try {
|
||||
RPC.setProtocolEngine(conf,
|
||||
HAServiceProtocolPB.class, ProtobufRpcEngine.class);
|
||||
HAServiceProtocolPB.class, ProtobufRpcEngine2.class);
|
||||
HAServiceProtocolServerSideTranslatorPB haServiceProtocolXlator =
|
||||
new HAServiceProtocolServerSideTranslatorPB(new MockHAProtocolImpl());
|
||||
BlockingService haPbService = HAServiceProtocolService
|
||||
|
|
|
@ -66,7 +66,7 @@ public class RPCCallBenchmark extends TestRpcBase implements Tool {
|
|||
public int secondsToRun = 15;
|
||||
private int msgSize = 1024;
|
||||
public Class<? extends RpcEngine> rpcEngine =
|
||||
ProtobufRpcEngine.class;
|
||||
ProtobufRpcEngine2.class;
|
||||
|
||||
private MyOptions(String args[]) {
|
||||
try {
|
||||
|
@ -181,7 +181,7 @@ public class RPCCallBenchmark extends TestRpcBase implements Tool {
|
|||
if (line.hasOption('e')) {
|
||||
String eng = line.getOptionValue('e');
|
||||
if ("protobuf".equals(eng)) {
|
||||
rpcEngine = ProtobufRpcEngine.class;
|
||||
rpcEngine = ProtobufRpcEngine2.class;
|
||||
} else {
|
||||
throw new ParseException("invalid engine: " + eng);
|
||||
}
|
||||
|
@ -224,7 +224,7 @@ public class RPCCallBenchmark extends TestRpcBase implements Tool {
|
|||
|
||||
RPC.Server server;
|
||||
// Get RPC server for server side implementation
|
||||
if (opts.rpcEngine == ProtobufRpcEngine.class) {
|
||||
if (opts.rpcEngine == ProtobufRpcEngine2.class) {
|
||||
// Create server side implementation
|
||||
PBServerImpl serverImpl = new PBServerImpl();
|
||||
BlockingService service = TestProtobufRpcProto
|
||||
|
@ -378,7 +378,7 @@ public class RPCCallBenchmark extends TestRpcBase implements Tool {
|
|||
private RpcServiceWrapper createRpcClient(MyOptions opts) throws IOException {
|
||||
InetSocketAddress addr = NetUtils.createSocketAddr(opts.host, opts.getPort());
|
||||
|
||||
if (opts.rpcEngine == ProtobufRpcEngine.class) {
|
||||
if (opts.rpcEngine == ProtobufRpcEngine2.class) {
|
||||
final TestRpcService proxy = RPC.getProxy(TestRpcService.class, 0, addr, conf);
|
||||
return new RpcServiceWrapper() {
|
||||
@Override
|
||||
|
|
|
@ -45,7 +45,7 @@ public class TestMultipleProtocolServer extends TestRpcBase {
|
|||
// Set RPC engine to protobuf RPC engine
|
||||
Configuration conf2 = new Configuration();
|
||||
RPC.setProtocolEngine(conf2, TestRpcService.class,
|
||||
ProtobufRpcEngine.class);
|
||||
ProtobufRpcEngine2.class);
|
||||
TestRpcService client = RPC.getProxy(TestRpcService.class, 0, addr, conf2);
|
||||
TestProtoBufRpc.testProtoBufRpc(client);
|
||||
}
|
||||
|
|
|
@ -25,8 +25,6 @@ import java.net.InetSocketAddress;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
|
||||
import org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto;
|
||||
import org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto;
|
||||
import org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto;
|
||||
import org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto;
|
||||
import org.apache.hadoop.ipc.protobuf.TestProtos.OptRequestProto;
|
||||
|
@ -138,7 +136,7 @@ public class TestProtoBufRPCCompatibility {
|
|||
conf = new Configuration();
|
||||
conf.setInt(CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH, 1024);
|
||||
// Set RPC engine to protobuf RPC engine
|
||||
RPC.setProtocolEngine(conf, NewRpcService.class, ProtobufRpcEngine.class);
|
||||
RPC.setProtocolEngine(conf, NewRpcService.class, ProtobufRpcEngine2.class);
|
||||
|
||||
// Create server side implementation
|
||||
NewServerImpl serverImpl = new NewServerImpl();
|
||||
|
@ -151,7 +149,7 @@ public class TestProtoBufRPCCompatibility {
|
|||
|
||||
server.start();
|
||||
|
||||
RPC.setProtocolEngine(conf, OldRpcService.class, ProtobufRpcEngine.class);
|
||||
RPC.setProtocolEngine(conf, OldRpcService.class, ProtobufRpcEngine2.class);
|
||||
|
||||
OldRpcService proxy = RPC.getProxy(OldRpcService.class, 0, addr, conf);
|
||||
// Verify that exception is thrown if protocolVersion is mismatch between
|
||||
|
@ -168,7 +166,8 @@ public class TestProtoBufRPCCompatibility {
|
|||
}
|
||||
|
||||
// Verify that missing of optional field is still compatible in RPC call.
|
||||
RPC.setProtocolEngine(conf, NewerRpcService.class, ProtobufRpcEngine.class);
|
||||
RPC.setProtocolEngine(conf, NewerRpcService.class,
|
||||
ProtobufRpcEngine2.class);
|
||||
NewerRpcService newProxy = RPC.getProxy(NewerRpcService.class, 0, addr,
|
||||
conf);
|
||||
newProxy.echo(null, emptyRequest);
|
||||
|
|
|
@ -94,8 +94,9 @@ public class TestProtoBufRpc extends TestRpcBase {
|
|||
conf.setInt(CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH, 1024);
|
||||
conf.setBoolean(CommonConfigurationKeys.IPC_SERVER_LOG_SLOW_RPC, true);
|
||||
// Set RPC engine to protobuf RPC engine
|
||||
RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class);
|
||||
RPC.setProtocolEngine(conf, TestRpcService2.class, ProtobufRpcEngine.class);
|
||||
RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine2.class);
|
||||
RPC.setProtocolEngine(conf, TestRpcService2.class,
|
||||
ProtobufRpcEngine2.class);
|
||||
|
||||
// Create server side implementation
|
||||
PBServerImpl serverImpl = new PBServerImpl();
|
||||
|
|
|
@ -52,7 +52,7 @@ public class TestProtoBufRpcServerHandoff {
|
|||
TestProtobufRpcHandoffProto.newReflectiveBlockingService(serverImpl);
|
||||
|
||||
RPC.setProtocolEngine(conf, TestProtoBufRpcServerHandoffProtocol.class,
|
||||
ProtobufRpcEngine.class);
|
||||
ProtobufRpcEngine2.class);
|
||||
RPC.Server server = new RPC.Builder(conf)
|
||||
.setProtocol(TestProtoBufRpcServerHandoffProtocol.class)
|
||||
.setInstance(blockingService)
|
||||
|
@ -144,8 +144,8 @@ public class TestProtoBufRpcServerHandoff {
|
|||
TestProtos.SleepRequestProto2 request) throws
|
||||
ServiceException {
|
||||
final long startTime = System.currentTimeMillis();
|
||||
final ProtobufRpcEngineCallback callback =
|
||||
ProtobufRpcEngine.Server.registerForDeferredResponse();
|
||||
final ProtobufRpcEngineCallback2 callback =
|
||||
ProtobufRpcEngine2.Server.registerForDeferredResponse();
|
||||
final long sleepTime = request.getSleepTime();
|
||||
new Thread() {
|
||||
@Override
|
||||
|
|
|
@ -114,19 +114,19 @@ public class TestRPCCompatibility {
|
|||
ProtocolSignature.resetCache();
|
||||
|
||||
RPC.setProtocolEngine(conf,
|
||||
TestProtocol0.class, ProtobufRpcEngine.class);
|
||||
TestProtocol0.class, ProtobufRpcEngine2.class);
|
||||
|
||||
RPC.setProtocolEngine(conf,
|
||||
TestProtocol1.class, ProtobufRpcEngine.class);
|
||||
TestProtocol1.class, ProtobufRpcEngine2.class);
|
||||
|
||||
RPC.setProtocolEngine(conf,
|
||||
TestProtocol2.class, ProtobufRpcEngine.class);
|
||||
TestProtocol2.class, ProtobufRpcEngine2.class);
|
||||
|
||||
RPC.setProtocolEngine(conf,
|
||||
TestProtocol3.class, ProtobufRpcEngine.class);
|
||||
TestProtocol3.class, ProtobufRpcEngine2.class);
|
||||
|
||||
RPC.setProtocolEngine(conf,
|
||||
TestProtocol4.class, ProtobufRpcEngine.class);
|
||||
TestProtocol4.class, ProtobufRpcEngine2.class);
|
||||
}
|
||||
|
||||
@After
|
||||
|
|
|
@ -44,7 +44,7 @@ public class TestRPCWaitForProxy extends TestRpcBase {
|
|||
@Before
|
||||
public void setupProtocolEngine() {
|
||||
RPC.setProtocolEngine(conf, TestRpcService.class,
|
||||
ProtobufRpcEngine.class);
|
||||
ProtobufRpcEngine2.class);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.apache.hadoop.io.retry.RetryPolicies;
|
|||
import org.apache.hadoop.io.retry.RetryPolicy;
|
||||
import org.apache.hadoop.io.retry.TestConnectionRetryPolicy;
|
||||
import org.apache.hadoop.ipc.Client.ConnectionId;
|
||||
import org.apache.hadoop.ipc.TestRpcBase.TestRpcService;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
|
@ -129,7 +128,7 @@ public class TestReuseRpcConnections extends TestRpcBase {
|
|||
try {
|
||||
proxy1 = getClient(addr, newConf, retryPolicy1);
|
||||
proxy1.ping(null, newEmptyRequest());
|
||||
client = ProtobufRpcEngine.getClient(newConf);
|
||||
client = ProtobufRpcEngine2.getClient(newConf);
|
||||
final Set<ConnectionId> conns = client.getConnectionIds();
|
||||
assertEquals("number of connections in cache is wrong", 1, conns.size());
|
||||
|
||||
|
|
|
@ -70,7 +70,7 @@ public class TestRpcBase {
|
|||
protected void setupConf() {
|
||||
conf = new Configuration();
|
||||
// Set RPC engine to protobuf RPC engine
|
||||
RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class);
|
||||
RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine2.class);
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
}
|
||||
|
||||
|
|
|
@ -169,7 +169,7 @@ public class TestSaslRPC extends TestRpcBase {
|
|||
clientFallBackToSimpleAllowed = true;
|
||||
|
||||
// Set RPC engine to protobuf RPC engine
|
||||
RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class);
|
||||
RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine2.class);
|
||||
}
|
||||
|
||||
static String getQOPNames (QualityOfProtection[] qops){
|
||||
|
@ -356,7 +356,7 @@ public class TestSaslRPC extends TestRpcBase {
|
|||
newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, timeouts[0]);
|
||||
proxy1 = getClient(addr, newConf);
|
||||
proxy1.getAuthMethod(null, newEmptyRequest());
|
||||
client = ProtobufRpcEngine.getClient(newConf);
|
||||
client = ProtobufRpcEngine2.getClient(newConf);
|
||||
Set<ConnectionId> conns = client.getConnectionIds();
|
||||
assertEquals("number of connections in cache is wrong", 1, conns.size());
|
||||
// same conf, connection should be re-used
|
||||
|
|
|
@ -39,8 +39,8 @@ public class MockDomainNameResolver implements DomainNameResolver {
|
|||
public static final byte[] BYTE_ADDR_2 = new byte[]{10, 1, 1, 2};
|
||||
public static final String ADDR_1 = "10.1.1.1";
|
||||
public static final String ADDR_2 = "10.1.1.2";
|
||||
public static final String FQDN_1 = "host01.com";
|
||||
public static final String FQDN_2 = "host02.com";
|
||||
public static final String FQDN_1 = "host01.test";
|
||||
public static final String FQDN_2 = "host02.test";
|
||||
|
||||
/** Internal mapping of domain names and IP addresses. */
|
||||
private Map<String, InetAddress[]> addrs = new TreeMap<>();
|
||||
|
|
|
@ -21,7 +21,7 @@ import org.apache.hadoop.thirdparty.protobuf.ServiceException;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine2;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.Server;
|
||||
import org.apache.hadoop.ipc.TestRpcBase;
|
||||
|
@ -151,7 +151,7 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
|
||||
// Set RPC engine to protobuf RPC engine
|
||||
RPC.setProtocolEngine(conf, TestRpcService.class,
|
||||
ProtobufRpcEngine.class);
|
||||
ProtobufRpcEngine2.class);
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
final Server server = setupTestServer(conf, 5);
|
||||
|
||||
|
@ -181,7 +181,7 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
|
||||
"group1");
|
||||
RPC.setProtocolEngine(conf, TestRpcService.class,
|
||||
ProtobufRpcEngine.class);
|
||||
ProtobufRpcEngine2.class);
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
final Server server = setupTestServer(conf, 5);
|
||||
|
||||
|
@ -215,7 +215,7 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
|
||||
"group1");
|
||||
RPC.setProtocolEngine(conf, TestRpcService.class,
|
||||
ProtobufRpcEngine.class);
|
||||
ProtobufRpcEngine2.class);
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
final Server server = setupTestServer(conf, 5);
|
||||
|
||||
|
@ -251,7 +251,7 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
|
||||
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1");
|
||||
RPC.setProtocolEngine(conf, TestRpcService.class,
|
||||
ProtobufRpcEngine.class);
|
||||
ProtobufRpcEngine2.class);
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
final Server server = setupTestServer(conf, 2);
|
||||
|
||||
|
@ -286,7 +286,7 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
final Configuration conf = new Configuration();
|
||||
configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
|
||||
RPC.setProtocolEngine(conf, TestRpcService.class,
|
||||
ProtobufRpcEngine.class);
|
||||
ProtobufRpcEngine2.class);
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
final Server server = setupTestServer(conf, 2);
|
||||
|
||||
|
@ -322,7 +322,7 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
|
||||
"group3");
|
||||
RPC.setProtocolEngine(conf, TestRpcService.class,
|
||||
ProtobufRpcEngine.class);
|
||||
ProtobufRpcEngine2.class);
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
final Server server = setupTestServer(conf, 2);
|
||||
|
||||
|
@ -363,7 +363,7 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
TestTokenSecretManager sm = new TestTokenSecretManager();
|
||||
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
|
||||
RPC.setProtocolEngine(conf, TestRpcService.class,
|
||||
ProtobufRpcEngine.class);
|
||||
ProtobufRpcEngine2.class);
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
final Server server = setupTestServer(conf, 5, sm);
|
||||
|
||||
|
@ -411,7 +411,7 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, newConf);
|
||||
// Set RPC engine to protobuf RPC engine
|
||||
RPC.setProtocolEngine(newConf, TestRpcService.class,
|
||||
ProtobufRpcEngine.class);
|
||||
ProtobufRpcEngine2.class);
|
||||
UserGroupInformation.setConfiguration(newConf);
|
||||
final Server server = setupTestServer(newConf, 5, sm);
|
||||
|
||||
|
|
|
@ -22,11 +22,11 @@
|
|||
<parent>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-project</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<relativePath>../../hadoop-project</relativePath>
|
||||
</parent>
|
||||
<artifactId>hadoop-kms</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<name>Apache Hadoop KMS</name>
|
||||
|
|
|
@ -22,12 +22,16 @@ import java.net.InetSocketAddress;
|
|||
import java.net.MalformedURLException;
|
||||
import java.net.URI;
|
||||
import java.net.URL;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.http.HttpServer2;
|
||||
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
||||
import org.apache.hadoop.metrics2.source.JvmMetrics;
|
||||
import org.apache.hadoop.security.AuthenticationFilterInitializer;
|
||||
import org.apache.hadoop.security.authentication.server.ProxyUserAuthenticationFilterInitializer;
|
||||
import org.apache.hadoop.security.authorize.AccessControlList;
|
||||
import org.apache.hadoop.security.ssl.SSLFactory;
|
||||
import org.apache.hadoop.util.JvmPauseMonitor;
|
||||
|
@ -94,6 +98,22 @@ public class KMSWebServer {
|
|||
KMSConfiguration.HTTP_PORT_DEFAULT);
|
||||
URI endpoint = new URI(scheme, null, host, port, null, null, null);
|
||||
|
||||
String configuredInitializers =
|
||||
conf.get(HttpServer2.FILTER_INITIALIZER_PROPERTY);
|
||||
if (configuredInitializers != null) {
|
||||
Set<String> target = new LinkedHashSet<String>();
|
||||
String[] initializers = configuredInitializers.split(",");
|
||||
for (String init : initializers) {
|
||||
if (!init.equals(AuthenticationFilterInitializer.class.getName()) &&
|
||||
!init.equals(
|
||||
ProxyUserAuthenticationFilterInitializer.class.getName())) {
|
||||
target.add(init);
|
||||
}
|
||||
}
|
||||
String actualInitializers = StringUtils.join(",", target);
|
||||
conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY, actualInitializers);
|
||||
}
|
||||
|
||||
httpServer = new HttpServer2.Builder()
|
||||
.setName(NAME)
|
||||
.setConf(conf)
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.MultipleIOException;
|
||||
import org.apache.hadoop.minikdc.MiniKdc;
|
||||
import org.apache.hadoop.security.AuthenticationFilterInitializer;
|
||||
import org.apache.hadoop.security.Credentials;
|
||||
import org.apache.hadoop.security.SecurityUtil;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
@ -3079,4 +3080,45 @@ public class TestKMS {
|
|||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFilterInitializer() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
File testDir = getTestDir();
|
||||
conf = createBaseKMSConf(testDir, conf);
|
||||
conf.set("hadoop.security.authentication", "kerberos");
|
||||
conf.set("hadoop.kms.authentication.token.validity", "1");
|
||||
conf.set("hadoop.kms.authentication.type", "kerberos");
|
||||
conf.set("hadoop.kms.authentication.kerberos.keytab",
|
||||
keytab.getAbsolutePath());
|
||||
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
|
||||
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
|
||||
conf.set("hadoop.http.filter.initializers",
|
||||
AuthenticationFilterInitializer.class.getName());
|
||||
conf.set("hadoop.http.authentication.type", "kerberos");
|
||||
conf.set("hadoop.http.authentication.kerberos.principal", "HTTP/localhost");
|
||||
conf.set("hadoop.http.authentication.kerberos.keytab",
|
||||
keytab.getAbsolutePath());
|
||||
|
||||
writeConf(testDir, conf);
|
||||
|
||||
runServer(null, null, testDir, new KMSCallable<Void>() {
|
||||
@Override
|
||||
public Void call() throws Exception {
|
||||
final Configuration conf = new Configuration();
|
||||
URL url = getKMSUrl();
|
||||
final URI uri = createKMSUri(getKMSUrl());
|
||||
|
||||
doAs("client", new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
final KeyProvider kp = createProvider(uri, conf);
|
||||
Assert.assertTrue(kp.getKeys().isEmpty());
|
||||
return null;
|
||||
}
|
||||
});
|
||||
return null;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,12 +18,12 @@
|
|||
<parent>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-project</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<relativePath>../../hadoop-project</relativePath>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<artifactId>hadoop-minikdc</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<description>Apache Hadoop MiniKDC</description>
|
||||
<name>Apache Hadoop MiniKDC</name>
|
||||
<packaging>jar</packaging>
|
||||
|
|
|
@ -20,11 +20,11 @@
|
|||
<parent>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-project</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<relativePath>../../hadoop-project</relativePath>
|
||||
</parent>
|
||||
<artifactId>hadoop-nfs</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<name>Apache Hadoop NFS</name>
|
||||
|
|
|
@ -19,12 +19,12 @@
|
|||
<parent>
|
||||
<artifactId>hadoop-project</artifactId>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<relativePath>../../hadoop-project</relativePath>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<artifactId>hadoop-registry</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<name>Apache Hadoop Registry</name>
|
||||
|
||||
<dependencies>
|
||||
|
|
|
@ -20,11 +20,11 @@
|
|||
<parent>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-project</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<relativePath>../hadoop-project</relativePath>
|
||||
</parent>
|
||||
<artifactId>hadoop-common-project</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<description>Apache Hadoop Common Project</description>
|
||||
<name>Apache Hadoop Common Project</name>
|
||||
<packaging>pom</packaging>
|
||||
|
|
|
@ -20,11 +20,11 @@
|
|||
<parent>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-project</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<relativePath>../hadoop-project</relativePath>
|
||||
</parent>
|
||||
<artifactId>hadoop-dist</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<description>Apache Hadoop Distribution</description>
|
||||
<name>Apache Hadoop Distribution</name>
|
||||
<packaging>jar</packaging>
|
||||
|
|
|
@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||
<parent>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-project-dist</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<relativePath>../../hadoop-project-dist</relativePath>
|
||||
</parent>
|
||||
<artifactId>hadoop-hdfs-client</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<description>Apache Hadoop HDFS Client</description>
|
||||
<name>Apache Hadoop HDFS Client</name>
|
||||
<packaging>jar</packaging>
|
||||
|
|
|
@ -56,7 +56,7 @@ import org.apache.hadoop.io.retry.RetryPolicies;
|
|||
import org.apache.hadoop.io.retry.RetryPolicy;
|
||||
import org.apache.hadoop.io.retry.RetryProxy;
|
||||
import org.apache.hadoop.io.retry.RetryUtils;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine2;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.SecurityUtil;
|
||||
|
@ -355,7 +355,7 @@ public class NameNodeProxiesClient {
|
|||
AlignmentContext alignmentContext)
|
||||
throws IOException {
|
||||
RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class,
|
||||
ProtobufRpcEngine.class);
|
||||
ProtobufRpcEngine2.class);
|
||||
|
||||
final RetryPolicy defaultPolicy =
|
||||
RetryUtils.getDefaultRetryPolicy(
|
||||
|
|
|
@ -68,7 +68,7 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
|||
import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus.Result;
|
||||
import org.apache.hadoop.ipc.ProtobufHelper;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine2;
|
||||
import org.apache.hadoop.ipc.ProtocolMetaInterface;
|
||||
import org.apache.hadoop.ipc.ProtocolTranslator;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
|
@ -181,7 +181,7 @@ public class ClientDatanodeProtocolTranslatorPB implements
|
|||
InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
|
||||
SocketFactory factory, int socketTimeout) throws IOException {
|
||||
RPC.setProtocolEngine(conf, ClientDatanodeProtocolPB.class,
|
||||
ProtobufRpcEngine.class);
|
||||
ProtobufRpcEngine2.class);
|
||||
return RPC.getProxy(ClientDatanodeProtocolPB.class,
|
||||
RPC.getProtocolVersion(ClientDatanodeProtocolPB.class), addr, ticket,
|
||||
conf, factory, socketTimeout);
|
||||
|
|
|
@ -237,7 +237,7 @@ import org.apache.hadoop.io.Text;
|
|||
import org.apache.hadoop.io.retry.AsyncCallHandler;
|
||||
import org.apache.hadoop.ipc.Client;
|
||||
import org.apache.hadoop.ipc.ProtobufHelper;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine2;
|
||||
import org.apache.hadoop.ipc.ProtocolMetaInterface;
|
||||
import org.apache.hadoop.ipc.ProtocolTranslator;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
|
@ -456,7 +456,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
|||
|
||||
private void setAsyncReturnValue() {
|
||||
final AsyncGet<Message, Exception> asyncReturnMessage
|
||||
= ProtobufRpcEngine.getAsyncReturnMessage();
|
||||
= ProtobufRpcEngine2.getAsyncReturnMessage();
|
||||
final AsyncGet<Void, Exception> asyncGet
|
||||
= new AsyncGet<Void, Exception>() {
|
||||
@Override
|
||||
|
@ -1569,7 +1569,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
|||
if (Client.isAsynchronousMode()) {
|
||||
rpcProxy.getAclStatus(null, req);
|
||||
final AsyncGet<Message, Exception> asyncReturnMessage
|
||||
= ProtobufRpcEngine.getAsyncReturnMessage();
|
||||
= ProtobufRpcEngine2.getAsyncReturnMessage();
|
||||
final AsyncGet<AclStatus, Exception> asyncGet
|
||||
= new AsyncGet<AclStatus, Exception>() {
|
||||
@Override
|
||||
|
|
|
@ -34,7 +34,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListR
|
|||
import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto;
|
||||
import org.apache.hadoop.ipc.ProtobufHelper;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine2;
|
||||
import org.apache.hadoop.ipc.ProtocolMetaInterface;
|
||||
import org.apache.hadoop.ipc.ProtocolTranslator;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
|
@ -84,7 +84,7 @@ public class ReconfigurationProtocolTranslatorPB implements
|
|||
InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
|
||||
SocketFactory factory, int socketTimeout) throws IOException {
|
||||
RPC.setProtocolEngine(conf, ReconfigurationProtocolPB.class,
|
||||
ProtobufRpcEngine.class);
|
||||
ProtobufRpcEngine2.class);
|
||||
return RPC.getProxy(ReconfigurationProtocolPB.class,
|
||||
RPC.getProtocolVersion(ReconfigurationProtocolPB.class),
|
||||
addr, ticket, conf, factory, socketTimeout);
|
||||
|
|
|
@ -152,6 +152,7 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
+ "/v" + VERSION;
|
||||
public static final String EZ_HEADER = "X-Hadoop-Accept-EZ";
|
||||
public static final String FEFINFO_HEADER = "X-Hadoop-feInfo";
|
||||
public static final String DFS_HTTP_POLICY_KEY = "dfs.http.policy";
|
||||
|
||||
/**
|
||||
* Default connection factory may be overridden in tests to use smaller
|
||||
|
@ -181,6 +182,7 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
|
||||
private DFSOpsCountStatistics storageStatistics;
|
||||
private KeyProvider testProvider;
|
||||
private boolean isTLSKrb;
|
||||
|
||||
/**
|
||||
* Return the protocol scheme for the FileSystem.
|
||||
|
@ -242,6 +244,7 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
.newDefaultURLConnectionFactory(connectTimeout, readTimeout, conf);
|
||||
}
|
||||
|
||||
this.isTLSKrb = "HTTPS_ONLY".equals(conf.get(DFS_HTTP_POLICY_KEY));
|
||||
|
||||
ugi = UserGroupInformation.getCurrentUser();
|
||||
this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
|
||||
|
@ -699,6 +702,11 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
//redirect hostname and port
|
||||
redirectHost = null;
|
||||
|
||||
if (url.getProtocol().equals("http") &&
|
||||
UserGroupInformation.isSecurityEnabled() &&
|
||||
isTLSKrb) {
|
||||
throw new IOException("Access denied: dfs.http.policy is HTTPS_ONLY.");
|
||||
}
|
||||
|
||||
// resolve redirects for a DN operation unless already resolved
|
||||
if (op.getRedirect() && !redirected) {
|
||||
|
|
|
@ -22,11 +22,11 @@
|
|||
<parent>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-project</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<relativePath>../../hadoop-project</relativePath>
|
||||
</parent>
|
||||
<artifactId>hadoop-hdfs-httpfs</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<name>Apache Hadoop HttpFS</name>
|
||||
|
|
|
@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||
<parent>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-project-dist</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<relativePath>../../hadoop-project-dist</relativePath>
|
||||
</parent>
|
||||
<artifactId>hadoop-hdfs-native-client</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<description>Apache Hadoop HDFS Native Client</description>
|
||||
<name>Apache Hadoop HDFS Native Client</name>
|
||||
<packaging>jar</packaging>
|
||||
|
|
|
@ -3409,7 +3409,7 @@ tOffset hdfsGetUsed(hdfsFS fs)
|
|||
}
|
||||
fss = (jobject)jVal.l;
|
||||
jthr = invokeMethod(env, &jVal, INSTANCE, fss, JC_FS_STATUS,
|
||||
HADOOP_FSSTATUS,"getUsed", "()J");
|
||||
"getUsed", "()J");
|
||||
destroyLocalReference(env, fss);
|
||||
if (jthr) {
|
||||
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||
|
|
|
@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||
<parent>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-project</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<relativePath>../../hadoop-project</relativePath>
|
||||
</parent>
|
||||
<artifactId>hadoop-hdfs-nfs</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<description>Apache Hadoop HDFS-NFS</description>
|
||||
<name>Apache Hadoop HDFS-NFS</name>
|
||||
<packaging>jar</packaging>
|
||||
|
|
|
@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||
<parent>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-project-dist</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<relativePath>../../hadoop-project-dist</relativePath>
|
||||
</parent>
|
||||
<artifactId>hadoop-hdfs-rbf</artifactId>
|
||||
<version>3.3.0-SNAPSHOT</version>
|
||||
<version>3.3.0</version>
|
||||
<description>Apache Hadoop HDFS-RBF</description>
|
||||
<name>Apache Hadoop HDFS-RBF</name>
|
||||
<packaging>jar</packaging>
|
||||
|
|
|
@ -47,7 +47,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
|
|||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.retry.RetryPolicy;
|
||||
import org.apache.hadoop.io.retry.RetryUtils;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine2;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
|
||||
|
@ -379,7 +379,7 @@ public class ConnectionPool {
|
|||
throw new IllegalStateException(msg);
|
||||
}
|
||||
ProtoImpl classes = PROTO_MAP.get(proto);
|
||||
RPC.setProtocolEngine(conf, classes.protoPb, ProtobufRpcEngine.class);
|
||||
RPC.setProtocolEngine(conf, classes.protoPb, ProtobufRpcEngine2.class);
|
||||
|
||||
final RetryPolicy defaultPolicy = RetryUtils.getDefaultRetryPolicy(conf,
|
||||
HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY,
|
||||
|
|
|
@ -34,6 +34,8 @@ import org.apache.hadoop.hdfs.server.federation.store.StateStoreUnavailableExcep
|
|||
import org.apache.hadoop.hdfs.server.federation.store.StateStoreUtils;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.records.RouterState;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.SecurityUtil;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.service.AbstractService;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -170,7 +172,12 @@ public class MountTableRefresherService extends AbstractService {
|
|||
@VisibleForTesting
|
||||
protected RouterClient createRouterClient(InetSocketAddress routerSocket,
|
||||
Configuration config) throws IOException {
|
||||
return SecurityUtil.doAsLoginUser(() -> {
|
||||
if (UserGroupInformation.isSecurityEnabled()) {
|
||||
UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
|
||||
}
|
||||
return new RouterClient(routerSocket, config);
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -23,6 +23,8 @@ import java.util.concurrent.CountDownLatch;
|
|||
import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesRequest;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesResponse;
|
||||
import org.apache.hadoop.security.SecurityUtil;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -61,10 +63,16 @@ public class MountTableRefresherThread extends Thread {
|
|||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
RefreshMountTableEntriesResponse refreshMountTableEntries =
|
||||
manager.refreshMountTableEntries(
|
||||
SecurityUtil.doAsLoginUser(() -> {
|
||||
if (UserGroupInformation.isSecurityEnabled()) {
|
||||
UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
|
||||
}
|
||||
RefreshMountTableEntriesResponse refreshMountTableEntries = manager
|
||||
.refreshMountTableEntries(
|
||||
RefreshMountTableEntriesRequest.newInstance());
|
||||
success = refreshMountTableEntries.getResult();
|
||||
return true;
|
||||
});
|
||||
} catch (IOException e) {
|
||||
LOG.error("Failed to refresh mount table entries cache at router {}",
|
||||
adminAddress, e);
|
||||
|
|
|
@ -75,7 +75,7 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableE
|
|||
import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine2;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.RPC.Server;
|
||||
import org.apache.hadoop.ipc.RefreshRegistry;
|
||||
|
@ -136,7 +136,7 @@ public class RouterAdminServer extends AbstractService
|
|||
RBFConfigKeys.DFS_ROUTER_ADMIN_HANDLER_COUNT_DEFAULT);
|
||||
|
||||
RPC.setProtocolEngine(this.conf, RouterAdminProtocolPB.class,
|
||||
ProtobufRpcEngine.class);
|
||||
ProtobufRpcEngine2.class);
|
||||
|
||||
RouterAdminProtocolServerSideTranslatorPB routerAdminProtocolTranslator =
|
||||
new RouterAdminProtocolServerSideTranslatorPB(this);
|
||||
|
|
|
@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolPB;
|
|||
import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolTranslatorPB;
|
||||
import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
|
||||
import org.apache.hadoop.hdfs.server.federation.resolver.RouterGenericManager;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine2;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
@ -47,7 +47,7 @@ public class RouterClient implements Closeable {
|
|||
throws IOException {
|
||||
|
||||
RPC.setProtocolEngine(
|
||||
conf, RouterAdminProtocolPB.class, ProtobufRpcEngine.class);
|
||||
conf, RouterAdminProtocolPB.class, ProtobufRpcEngine2.class);
|
||||
|
||||
AtomicBoolean fallbackToSimpleAuth = new AtomicBoolean(false);
|
||||
final long version = RPC.getProtocolVersion(RouterAdminProtocolPB.class);
|
||||
|
|
|
@ -33,6 +33,7 @@ import java.util.Map;
|
|||
import java.util.Map.Entry;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.MembershipStore;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
|
||||
|
@ -48,6 +49,7 @@ import org.slf4j.LoggerFactory;
|
|||
* Wrapper for the Router to offer the Namenode FSCK.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
public class RouterFsck {
|
||||
|
||||
public static final Logger LOG =
|
||||
|
@ -69,6 +71,10 @@ public class RouterFsck {
|
|||
public void fsck() {
|
||||
final long startTime = Time.monotonicNow();
|
||||
try {
|
||||
String warnMsg = "Now FSCK to DFSRouter is unstable feature. " +
|
||||
"There may be incompatible changes between releases.";
|
||||
LOG.warn(warnMsg);
|
||||
out.println(warnMsg);
|
||||
String msg = "Federated FSCK started by " +
|
||||
UserGroupInformation.getCurrentUser() + " from " + remoteAddress +
|
||||
" at " + new Date();
|
||||
|
|
|
@ -133,7 +133,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
|||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
||||
import org.apache.hadoop.io.EnumSetWritable;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine2;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.RPC.Server;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
|
@ -256,7 +256,7 @@ public class RouterRpcServer extends AbstractService implements ClientProtocol,
|
|||
readerQueueSize);
|
||||
|
||||
RPC.setProtocolEngine(this.conf, ClientNamenodeProtocolPB.class,
|
||||
ProtobufRpcEngine.class);
|
||||
ProtobufRpcEngine2.class);
|
||||
|
||||
ClientNamenodeProtocolServerSideTranslatorPB
|
||||
clientProtocolServerTranslator =
|
||||
|
|
|
@ -71,7 +71,7 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableE
|
|||
import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine2;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.RefreshResponse;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
|
@ -1222,7 +1222,7 @@ public class RouterAdmin extends Configured implements Tool {
|
|||
InetSocketAddress address = NetUtils.createSocketAddr(hostport);
|
||||
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
|
||||
|
||||
RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine.class);
|
||||
RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine2.class);
|
||||
GenericRefreshProtocolPB proxy = (GenericRefreshProtocolPB)RPC.getProxy(
|
||||
xface, RPC.getProtocolVersion(xface), address, ugi, conf,
|
||||
NetUtils.getDefaultSocketFactory(conf), 0);
|
||||
|
|
|
@ -474,7 +474,10 @@ public final class FederationTestUtils {
|
|||
|
||||
/**
|
||||
* Add a mount table entry in some name services and wait until it is
|
||||
* available.
|
||||
* available. If there are multiple routers,
|
||||
* {@link #createMountTableEntry(List, String, DestinationOrder, Collection)}
|
||||
* should be used instead because the method does not refresh
|
||||
* the mount tables of the other routers.
|
||||
* @param router Router to change.
|
||||
* @param mountPoint Name of the mount point.
|
||||
* @param order Order of the mount table entry.
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue