HDDS-1299. Support TokenIssuer interface for running jobs with OzoneFileSystem. Contributed by Xiaoyu Yao.
This closes #627.
This commit is contained in:
parent
43e421afef
commit
6a34c9bb29
|
@ -19,6 +19,7 @@
|
|||
package org.apache.hadoop.ozone.client;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
@ -26,6 +27,7 @@ import java.util.NoSuchElementException;
|
|||
import java.util.Objects;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.crypto.key.KeyProvider;
|
||||
import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
|
||||
import org.apache.hadoop.hdds.tracing.TracingUtil;
|
||||
import org.apache.hadoop.io.Text;
|
||||
|
@ -50,6 +52,7 @@ public class ObjectStore {
|
|||
* The proxy used for connecting to the cluster and perform
|
||||
* client operations.
|
||||
*/
|
||||
// TODO: remove rest api and client
|
||||
private final ClientProtocol proxy;
|
||||
|
||||
/**
|
||||
|
@ -259,6 +262,14 @@ public class ObjectStore {
|
|||
proxy.deleteVolume(volumeName);
|
||||
}
|
||||
|
||||
public KeyProvider getKeyProvider() throws IOException {
|
||||
return proxy.getKeyProvider();
|
||||
}
|
||||
|
||||
public URI getKeyProviderUri() throws IOException {
|
||||
return proxy.getKeyProviderUri();
|
||||
}
|
||||
|
||||
/**
|
||||
* An Iterator to iterate over {@link OzoneVolume} list.
|
||||
*/
|
||||
|
@ -426,5 +437,11 @@ public class ObjectStore {
|
|||
proxy.cancelDelegationToken(token);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return canonical service name of ozone delegation token.
|
||||
*/
|
||||
public String getCanonicalServiceName() {
|
||||
return proxy.getCanonicalServiceName();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.apache.hadoop.ozone.client.protocol;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.hadoop.crypto.key.KeyProvider;
|
||||
import org.apache.hadoop.hdds.protocol.StorageType;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.ozone.OzoneAcl;
|
||||
|
@ -34,6 +35,7 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
|
|||
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -511,4 +513,24 @@ public interface ClientProtocol {
|
|||
|
||||
@VisibleForTesting
|
||||
OMFailoverProxyProvider getOMProxyProvider();
|
||||
|
||||
/**
|
||||
* Get KMS client provider.
|
||||
* @return KMS client provider.
|
||||
* @throws IOException
|
||||
*/
|
||||
KeyProvider getKeyProvider() throws IOException;
|
||||
|
||||
/**
|
||||
* Get KMS client provider uri.
|
||||
* @return KMS client provider uri.
|
||||
* @throws IOException
|
||||
*/
|
||||
URI getKeyProviderUri() throws IOException;
|
||||
|
||||
/**
|
||||
* Get CanonicalServiceName for ozone delegation token.
|
||||
* @return Canonical Service Name of ozone delegation token.
|
||||
*/
|
||||
String getCanonicalServiceName();
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import com.fasterxml.jackson.databind.ObjectMapper;
|
|||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.base.Strings;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.crypto.key.KeyProvider;
|
||||
import org.apache.hadoop.hdds.protocol.StorageType;
|
||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||
import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
|
||||
|
@ -42,6 +43,7 @@ import org.apache.hadoop.ozone.client.rest.headers.Header;
|
|||
import org.apache.hadoop.ozone.client.rest.response.BucketInfo;
|
||||
import org.apache.hadoop.ozone.client.rest.response.KeyInfoDetails;
|
||||
import org.apache.hadoop.ozone.client.rest.response.VolumeInfo;
|
||||
import org.apache.hadoop.ozone.client.rpc.OzoneKMSUtil;
|
||||
import org.apache.hadoop.ozone.om.OMConfigKeys;
|
||||
import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider;
|
||||
import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
|
||||
|
@ -729,6 +731,17 @@ public class RestClient implements ClientProtocol {
|
|||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public KeyProvider getKeyProvider() throws IOException {
|
||||
// TODO: fix me to support kms instances for difference OMs
|
||||
return OzoneKMSUtil.getKeyProvider(conf, getKeyProviderUri());
|
||||
}
|
||||
|
||||
@Override
|
||||
public URI getKeyProviderUri() throws IOException {
|
||||
return OzoneKMSUtil.getKeyProviderUri(ugi, null, null, conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
public OzoneInputStream getKey(
|
||||
String volumeName, String bucketName, String keyName)
|
||||
|
@ -1060,4 +1073,13 @@ public class RestClient implements ClientProtocol {
|
|||
throw new UnsupportedOperationException("Ozone REST protocol does not " +
|
||||
"support this operation.");
|
||||
}
|
||||
|
||||
/**
|
||||
* Get CanonicalServiceName for ozone delegation token.
|
||||
* @return Canonical Service Name of ozone delegation token.
|
||||
*/
|
||||
public String getCanonicalServiceName(){
|
||||
throw new UnsupportedOperationException("Ozone REST protocol does not " +
|
||||
"support this operation.");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.hadoop.conf.StorageUnit;
|
|||
import org.apache.hadoop.crypto.CryptoInputStream;
|
||||
import org.apache.hadoop.crypto.CryptoOutputStream;
|
||||
import org.apache.hadoop.crypto.key.KeyProvider;
|
||||
import org.apache.hadoop.crypto.key.KeyProviderTokenIssuer;
|
||||
import org.apache.hadoop.fs.FileEncryptionInfo;
|
||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||
import org.apache.hadoop.hdds.protocol.StorageType;
|
||||
|
@ -92,6 +93,7 @@ import org.slf4j.LoggerFactory;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URI;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
|
@ -101,7 +103,7 @@ import java.util.stream.Collectors;
|
|||
* to execute client calls. This uses RPC protocol for communication
|
||||
* with the servers.
|
||||
*/
|
||||
public class RpcClient implements ClientProtocol {
|
||||
public class RpcClient implements ClientProtocol, KeyProviderTokenIssuer {
|
||||
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(RpcClient.class);
|
||||
|
@ -124,6 +126,7 @@ public class RpcClient implements ClientProtocol {
|
|||
private final long watchTimeout;
|
||||
private final ClientId clientId = ClientId.randomId();
|
||||
private final int maxRetryCount;
|
||||
private Text dtService;
|
||||
|
||||
/**
|
||||
* Creates RpcClient instance with the given configuration.
|
||||
|
@ -208,6 +211,8 @@ public class RpcClient implements ClientProtocol {
|
|||
maxRetryCount =
|
||||
conf.getInt(OzoneConfigKeys.OZONE_CLIENT_MAX_RETRIES, OzoneConfigKeys.
|
||||
OZONE_CLIENT_MAX_RETRIES_DEFAULT);
|
||||
dtService =
|
||||
getOMProxyProvider().getProxy().getDelegationTokenService();
|
||||
}
|
||||
|
||||
private InetSocketAddress getScmAddressForClient() throws IOException {
|
||||
|
@ -452,12 +457,11 @@ public class RpcClient implements ClientProtocol {
|
|||
Token<OzoneTokenIdentifier> token =
|
||||
ozoneManagerClient.getDelegationToken(renewer);
|
||||
if (token != null) {
|
||||
Text dtService =
|
||||
getOMProxyProvider().getProxy().getDelegationTokenService();
|
||||
token.setService(dtService);
|
||||
LOG.debug("Created token {}", token);
|
||||
LOG.debug("Created token {} for dtService {}", token, dtService);
|
||||
} else {
|
||||
LOG.debug("Cannot get ozone delegation token from {}", renewer);
|
||||
LOG.debug("Cannot get ozone delegation token for renewer {} to access " +
|
||||
"service {}", renewer, dtService);
|
||||
}
|
||||
return token;
|
||||
}
|
||||
|
@ -646,10 +650,8 @@ public class RpcClient implements ClientProtocol {
|
|||
// check crypto protocol version
|
||||
OzoneKMSUtil.checkCryptoProtocolVersion(feInfo);
|
||||
KeyProvider.KeyVersion decrypted;
|
||||
// TODO: support get kms uri from om rpc server.
|
||||
decrypted = OzoneKMSUtil.decryptEncryptedDataEncryptionKey(feInfo,
|
||||
OzoneKMSUtil.getKeyProvider(conf, OzoneKMSUtil.getKeyProviderUri(
|
||||
ugi, null, null, conf)));
|
||||
getKeyProvider());
|
||||
return decrypted;
|
||||
}
|
||||
|
||||
|
@ -968,4 +970,25 @@ public class RpcClient implements ClientProtocol {
|
|||
|
||||
}
|
||||
|
||||
@Override
|
||||
public KeyProvider getKeyProvider() throws IOException {
|
||||
return OzoneKMSUtil.getKeyProvider(conf, getKeyProviderUri());
|
||||
}
|
||||
|
||||
@Override
|
||||
public URI getKeyProviderUri() throws IOException {
|
||||
// TODO: fix me to support kms instances for difference OMs
|
||||
return OzoneKMSUtil.getKeyProviderUri(ugi,
|
||||
null, null, conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getCanonicalServiceName() {
|
||||
return (dtService != null) ? dtService.toString() : null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Token<?> getDelegationToken(String renewer) throws IOException {
|
||||
return getDelegationToken(renewer == null ? null : new Text(renewer));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -134,10 +134,8 @@ public class OzoneDelegationTokenSecretManager
|
|||
addToTokenStore(identifier, password, expiryTime);
|
||||
Token<OzoneTokenIdentifier> token = new Token<>(identifier.getBytes(),
|
||||
password, identifier.getKind(), getService());
|
||||
if (LOG.isTraceEnabled()) {
|
||||
String tokenId = identifier.toStringStable();
|
||||
LOG.trace("Issued delegation token -> expiryTime:{},tokenId:{}",
|
||||
expiryTime, tokenId);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Created delegation token: {}", token);
|
||||
}
|
||||
return token;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
HDDS_VERSION=${hdds.version}
|
||||
HADOOP_VERSION=3
|
|
@ -0,0 +1,76 @@
|
|||
<!---
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. See accompanying LICENSE file.
|
||||
-->
|
||||
# Secure Docker-compose with KMS, Yarn RM and NM
|
||||
This docker compose allows to test Sample Map Reduce Jobs with OzoneFileSystem
|
||||
It is a superset of ozonesecure docker-compose, which add Yarn NM/RM in addition
|
||||
to Ozone OM/SCM/NM/DN and Kerberos KDC.
|
||||
|
||||
## Basic setup
|
||||
|
||||
```
|
||||
cd hadoop-ozone/dist/target/ozone-0.5.0-SNAPSHOT/compose/ozonesecure-mr
|
||||
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
## Ozone Manager Setup
|
||||
|
||||
```
|
||||
docker-compose exec om bash
|
||||
|
||||
kinit -kt /etc/security/keytabs/testuser.keytab testuser/om@EXAMPLE.COM
|
||||
|
||||
ozone sh volume create /vol1
|
||||
|
||||
ozone sh bucket create /vol1/bucket1
|
||||
|
||||
ozone sh key put /vol1/bucket1/key1 LICENSE.txt
|
||||
|
||||
ozone fs -ls o3fs://bucket1.vol1/
|
||||
```
|
||||
|
||||
## Yarn Resource Manager Setup
|
||||
```
|
||||
docker-compose exec rm bash
|
||||
|
||||
kinit -kt /etc/security/keytabs/hadoop.keytab hadoop/rm@EXAMPLE.COM
|
||||
export HADOOP_MAPRED_HOME=/opt/hadoop/share/hadoop/mapreduce
|
||||
|
||||
export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/opt/hadoop/share/hadoop/mapreduce/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-0.5.0-SNAPSHOT.jar
|
||||
|
||||
hadoop fs -mkdir /user
|
||||
hadoop fs -mkdir /user/hadoop
|
||||
```
|
||||
|
||||
## Run Examples
|
||||
|
||||
### WordCount
|
||||
Status: Fully working with HDDS-1299
|
||||
```
|
||||
yarn jar $HADOOP_MAPRED_HOME/hadoop-mapreduce-examples-*.jar wordcount o3fs://bucket1.vol1/key1 o3fs://bucket1.vol1/key1.count
|
||||
|
||||
hadoop fs -cat /key1.count/part-r-00000
|
||||
```
|
||||
|
||||
### Pi
|
||||
Status: Not fully working yet, tracked by HDDS-1317
|
||||
```
|
||||
yarn jar $HADOOP_MAPRED_HOME/hadoop-mapreduce-examples-*.jar pi 10 100
|
||||
```
|
||||
|
||||
### RandomWrite
|
||||
Status: Not fully working yet, tracked by HDDS-1317
|
||||
```
|
||||
yarn jar $HADOOP_MAPRED_HOME/hadoop-mapreduce-examples-*.jar randomwriter -Dtest.randomwrite.total_bytes=10000000 o3fs://bucket1.vol1/randomwrite.out
|
||||
```
|
|
@ -0,0 +1,114 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
version: "3"
|
||||
services:
|
||||
kdc:
|
||||
build:
|
||||
context: docker-image/docker-krb5
|
||||
dockerfile: Dockerfile-krb5
|
||||
args:
|
||||
buildno: 1
|
||||
hostname: kdc
|
||||
volumes:
|
||||
- ../..:/opt/hadoop
|
||||
kms:
|
||||
image: apache/hadoop:${HADOOP_VERSION}
|
||||
ports:
|
||||
- 9600:9600
|
||||
env_file:
|
||||
- ./docker-config
|
||||
command: ["hadoop", "kms"]
|
||||
datanode:
|
||||
image: apache/hadoop-runner
|
||||
volumes:
|
||||
- ../..:/opt/hadoop
|
||||
ports:
|
||||
- 9864
|
||||
command: ["/opt/hadoop/bin/ozone","datanode"]
|
||||
env_file:
|
||||
- docker-config
|
||||
om:
|
||||
image: apache/hadoop-runner
|
||||
hostname: om
|
||||
volumes:
|
||||
- ../..:/opt/hadoop
|
||||
ports:
|
||||
- 9874:9874
|
||||
environment:
|
||||
WAITFOR: scm:9876
|
||||
ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
|
||||
env_file:
|
||||
- docker-config
|
||||
command: ["/opt/hadoop/bin/ozone","om"]
|
||||
s3g:
|
||||
image: apache/hadoop-runner
|
||||
hostname: s3g
|
||||
volumes:
|
||||
- ../..:/opt/hadoop
|
||||
ports:
|
||||
- 9878:9878
|
||||
env_file:
|
||||
- ./docker-config
|
||||
command: ["/opt/hadoop/bin/ozone","s3g"]
|
||||
scm:
|
||||
image: apache/hadoop-runner:latest
|
||||
hostname: scm
|
||||
volumes:
|
||||
- ../..:/opt/hadoop
|
||||
ports:
|
||||
- 9876:9876
|
||||
env_file:
|
||||
- docker-config
|
||||
environment:
|
||||
ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
|
||||
command: ["/opt/hadoop/bin/ozone","scm"]
|
||||
rm:
|
||||
image: apache/hadoop:${HADOOP_VERSION}
|
||||
hostname: rm
|
||||
volumes:
|
||||
- ../..:/opt/ozone
|
||||
ports:
|
||||
- 8088:8088
|
||||
env_file:
|
||||
- ./docker-config
|
||||
environment:
|
||||
HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-0.5.0-SNAPSHOT.jar
|
||||
command: ["yarn", "resourcemanager"]
|
||||
nm:
|
||||
image: apache/hadoop:${HADOOP_VERSION}
|
||||
hostname: nm
|
||||
volumes:
|
||||
- ../..:/opt/ozone
|
||||
env_file:
|
||||
- ./docker-config
|
||||
environment:
|
||||
HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-0.5.0-SNAPSHOT.jar
|
||||
WAIT_FOR: rm:8088
|
||||
command: ["yarn","nodemanager"]
|
||||
jhs:
|
||||
image: apache/hadoop:${HADOOP_VERSION}
|
||||
hostname: jhs
|
||||
volumes:
|
||||
- ../..:/opt/ozone
|
||||
ports:
|
||||
- 8188:8188
|
||||
env_file:
|
||||
- ./docker-config
|
||||
environment:
|
||||
HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-0.5.0-SNAPSHOT.jar
|
||||
WAIT_FOR: rm:8088
|
||||
command: ["yarn","timelineserver"]
|
|
@ -0,0 +1,177 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
OZONE-SITE.XML_ozone.om.address=om
|
||||
OZONE-SITE.XML_ozone.om.http-address=om:9874
|
||||
OZONE-SITE.XML_ozone.scm.names=scm
|
||||
OZONE-SITE.XML_ozone.enabled=True
|
||||
OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id
|
||||
OZONE-SITE.XML_ozone.scm.block.client.address=scm
|
||||
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
|
||||
OZONE-SITE.XML_ozone.handler.type=distributed
|
||||
OZONE-SITE.XML_ozone.scm.client.address=scm
|
||||
OZONE-SITE.XML_hdds.block.token.enabled=true
|
||||
OZONE-SITE.XML_ozone.replication=1
|
||||
OZONE-SITE.XML_hdds.scm.kerberos.principal=scm/scm@EXAMPLE.COM
|
||||
OZONE-SITE.XML_hdds.scm.kerberos.keytab.file=/etc/security/keytabs/scm.keytab
|
||||
OZONE-SITE.XML_ozone.om.kerberos.principal=om/om@EXAMPLE.COM
|
||||
OZONE-SITE.XML_ozone.om.kerberos.keytab.file=/etc/security/keytabs/om.keytab
|
||||
OZONE-SITE.XML_ozone.s3g.keytab.file=/etc/security/keytabs/HTTP.keytab
|
||||
OZONE-SITE.XML_ozone.s3g.authentication.kerberos.principal=HTTP/s3g@EXAMPLE.COM
|
||||
|
||||
OZONE-SITE.XML_ozone.security.enabled=true
|
||||
OZONE-SITE.XML_hdds.scm.http.kerberos.principal=HTTP/scm@EXAMPLE.COM
|
||||
OZONE-SITE.XML_hdds.scm.http.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
|
||||
OZONE-SITE.XML_ozone.om.http.kerberos.principal=HTTP/om@EXAMPLE.COM
|
||||
OZONE-SITE.XML_ozone.om.http.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
|
||||
HDFS-SITE.XML_dfs.datanode.kerberos.principal=dn/_HOST@EXAMPLE.COM
|
||||
HDFS-SITE.XML_dfs.datanode.keytab.file=/etc/security/keytabs/dn.keytab
|
||||
HDFS-SITE.XML_dfs.web.authentication.kerberos.principal=HTTP/_HOST@EXAMPLE.COM
|
||||
HDFS-SITE.XML_dfs.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
|
||||
OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
|
||||
HDFS-SITE.XML_dfs.datanode.address=0.0.0.0:1019
|
||||
HDFS-SITE.XML_dfs.datanode.http.address=0.0.0.0:1012
|
||||
CORE-SITE.XML_dfs.data.transfer.protection=authentication
|
||||
CORE-SITE.XML_hadoop.security.authentication=kerberos
|
||||
COER-SITE.XML_hadoop.security.auth_to_local=RULE:[2:$1@$0](.*@EXAMPLE.COM)s/@.*///L
|
||||
CORE-SITE.XML_hadoop.security.key.provider.path=kms://http@kms:9600/kms
|
||||
|
||||
#temporary disable authorization as org.apache.hadoop.yarn.server.api.ResourceTrackerPB is not properly annotated to support it
|
||||
CORE-SITE.XML_hadoop.security.authorization=false
|
||||
HADOOP-POLICY.XML_ozone.om.security.client.protocol.acl=*
|
||||
HADOOP-POLICY.XML_hdds.security.client.datanode.container.protocol.acl=*
|
||||
HADOOP-POLICY.XML_hdds.security.client.scm.container.protocol.acl=*
|
||||
HADOOP-POLICY.XML_hdds.security.client.scm.block.protocol.acl=*
|
||||
HADOOP-POLICY.XML_hdds.security.client.scm.certificate.protocol.acl=*
|
||||
HADOOP-POLICY.XML_org.apache.hadoop.yarn.server.api.ResourceTracker.acl=*
|
||||
|
||||
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
|
||||
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
|
||||
|
||||
CORE-SITE.xml_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem
|
||||
CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs
|
||||
CORE-SITE.xml_fs.defaultFS=o3fs://bucket1.vol1/
|
||||
|
||||
MAPRED-SITE.XML_mapreduce.framework.name=yarn
|
||||
MAPRED-SITE.XML_yarn.app.mapreduce.am.env=HADOOP_MAPRED_HOME=$HADOOP_HOME
|
||||
MAPRED-SITE.XML_mapreduce.map.env=HADOOP_MAPRED_HOME=$HADOOP_HOME
|
||||
MAPRED-SITE.XML_mapreduce.reduce.env=HADOOP_MAPRED_HOME=$HADOOP_HOME
|
||||
MAPRED-SITE.XML_mapreduce.map.memory.mb=2048
|
||||
MAPRED-SITE.XML_mapreduce.reduce.memory.mb=2048
|
||||
#MAPRED-SITE.XML_mapred.child.java.opts=-Xmx2048
|
||||
MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-0.5.0-SNAPSHOT.jar
|
||||
|
||||
YARN-SITE.XML_yarn.app.mapreduce.am.staging-dir=/user
|
||||
YARN_SITE.XML_yarn.timeline-service.enabled=true
|
||||
YARN_SITE.XML_yarn.timeline-service.generic.application.history.enabled=true
|
||||
YARN_SITE.XML_yarn.timeline-service.hostname=jhs
|
||||
YARN-SITE.XML_yarn.timeline-service.principal=jhs/jhs@EXAMPLE.COM
|
||||
YARN-SITE.XML_yarn.timeline-service.keytab=/etc/security/keytabs/jhs.keytab
|
||||
YARN_SITE.XML_yarn.log.server.url=http://jhs:8188/applicationhistory/logs/
|
||||
|
||||
YARN-SITE.XML_yarn.nodemanager.principal=nm/_HOST@EXAMPLE.COM
|
||||
YARN-SITE.XML_yarn.nodemanager.keytab=/etc/security/keytabs/nm.keytab
|
||||
YARN-SITE.XML_yarn.nodemanager.pmem-check-enabled=false
|
||||
YARN-SITE.XML_yarn.nodemanager.delete.debug-delay-sec=600
|
||||
YARN-SITE.XML_yarn.nodemanager.vmem-check-enabled=false
|
||||
YARN-SITE.XML_yarn.nodemanager.aux-services=mapreduce_shuffle
|
||||
YARN-SITE.XML_yarn.nodemanager.disk-health-checker.enable=false
|
||||
|
||||
YARN-SITE.XML_yarn.resourcemanager.hostname=rm
|
||||
YARN-SITE.XML_yarn.resourcemanager.keytab=/etc/security/keytabs/rm.keytab
|
||||
YARN-SITE.XML_yarn.resourcemanager.principal=rm/rm@EXAMPLE.COM
|
||||
YARN_SITE_XML_yarn.resourcemanager.system.metrics.publisher.enabled=true
|
||||
|
||||
YARN-SITE.XML_yarn.log-aggregation-enable=true
|
||||
YARN-SITE.yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds=3600
|
||||
YARN-SITE.yarn.nodemanager.delete.debug-delay-sec=600
|
||||
|
||||
YARN-SITE.yarn.nodemanager.container-executor.class=org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor
|
||||
YARN-SITE.yarn.nodemanager.linux-container-executor.path=/opt/hadoop/bin/container-executor
|
||||
YARN-SITE.yarn.nodemanager.linux-container-executor.group=hadoop
|
||||
|
||||
CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.maximum-applications=10000
|
||||
CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.maximum-am-resource-percent=0.1
|
||||
CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.resource-calculator=org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator
|
||||
CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.queues=default
|
||||
CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.capacity=100
|
||||
CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.user-limit-factor=1
|
||||
CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.maximum-capacity=100
|
||||
CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.state=RUNNING
|
||||
CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.acl_submit_applications=*
|
||||
CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.acl_administer_queue=*
|
||||
CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.node-locality-delay=40
|
||||
CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.queue-mappings=
|
||||
CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.queue-mappings-override.enable=false
|
||||
|
||||
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
|
||||
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
||||
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
|
||||
LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
|
||||
LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
|
||||
LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN
|
||||
LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop=INFO
|
||||
LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR
|
||||
|
||||
#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation.
|
||||
#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
|
||||
|
||||
#LOG4J2.PROPERTIES_* are for Ozone Audit Logging
|
||||
LOG4J2.PROPERTIES_monitorInterval=30
|
||||
LOG4J2.PROPERTIES_filter=read,write
|
||||
LOG4J2.PROPERTIES_filter.read.type=MarkerFilter
|
||||
LOG4J2.PROPERTIES_filter.read.marker=READ
|
||||
LOG4J2.PROPERTIES_filter.read.onMatch=DENY
|
||||
LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL
|
||||
LOG4J2.PROPERTIES_filter.write.type=MarkerFilter
|
||||
LOG4J2.PROPERTIES_filter.write.marker=WRITE
|
||||
LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL
|
||||
LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL
|
||||
LOG4J2.PROPERTIES_appenders=console, rolling
|
||||
LOG4J2.PROPERTIES_appender.console.type=Console
|
||||
LOG4J2.PROPERTIES_appender.console.name=STDOUT
|
||||
LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout
|
||||
LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n
|
||||
LOG4J2.PROPERTIES_appender.rolling.type=RollingFile
|
||||
LOG4J2.PROPERTIES_appender.rolling.name=RollingFile
|
||||
LOG4J2.PROPERTIES_appender.rolling.fileName =${sys:hadoop.log.dir}/om-audit-${hostName}.log
|
||||
LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz
|
||||
LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout
|
||||
LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n
|
||||
LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies
|
||||
LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy
|
||||
LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400
|
||||
LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy
|
||||
LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB
|
||||
LOG4J2.PROPERTIES_loggers=audit
|
||||
LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger
|
||||
LOG4J2.PROPERTIES_logger.audit.name=OMAudit
|
||||
LOG4J2.PROPERTIES_logger.audit.level=INFO
|
||||
LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling
|
||||
LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile
|
||||
LOG4J2.PROPERTIES_rootLogger.level=INFO
|
||||
LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout
|
||||
LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT
|
||||
|
||||
OZONE_DATANODE_SECURE_USER=root
|
||||
KEYTAB_DIR=/etc/security/keytabs
|
||||
KERBEROS_KEYTABS=dn om scm HTTP testuser s3g rm nm yarn jhs hadoop
|
||||
KERBEROS_KEYSTORES=hadoop
|
||||
KERBEROS_SERVER=kdc
|
||||
JAVA_HOME=/usr/lib/jvm/jre
|
||||
JSVC_HOME=/usr/bin
|
||||
SLEEP_SECONDS=5
|
||||
KERBEROS_ENABLED=true
|
35
hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/Dockerfile-krb5
vendored
Normal file
35
hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/Dockerfile-krb5
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License lsfor the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
FROM openjdk:8u191-jdk-alpine3.9
|
||||
# hadolint ignore=DL3018
|
||||
RUN apk add --no-cache bash ca-certificates openssl krb5-server krb5 && rm -rf /var/cache/apk/* && update-ca-certificates
|
||||
RUN wget -O /usr/local/bin/dumb-init https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64
|
||||
RUN chmod +x /usr/local/bin/dumb-init
|
||||
RUN wget -O /root/issuer https://github.com/ajayydv/docker/raw/kdc/issuer
|
||||
RUN chmod +x /root/issuer
|
||||
WORKDIR /opt
|
||||
COPY krb5.conf /etc/
|
||||
COPY kadm5.acl /var/lib/krb5kdc/kadm5.acl
|
||||
RUN kdb5_util create -s -P Welcome1
|
||||
RUN kadmin.local -q "addprinc -randkey admin/admin@EXAMPLE.COM"
|
||||
RUN kadmin.local -q "ktadd -k /tmp/admin.keytab admin/admin@EXAMPLE.COM"
|
||||
COPY launcher.sh .
|
||||
RUN chmod +x /opt/launcher.sh
|
||||
RUN mkdir -p /data
|
||||
ENTRYPOINT ["/usr/local/bin/dumb-init", "--", "/opt/launcher.sh"]
|
||||
|
34
hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/README.md
vendored
Normal file
34
hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/README.md
vendored
Normal file
|
@ -0,0 +1,34 @@
|
|||
<!---
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. See accompanying LICENSE file.
|
||||
-->
|
||||
|
||||
# Experimental UNSECURE krb5 Kerberos container.
|
||||
|
||||
Only for development. Not for production.
|
||||
|
||||
The docker image contains a rest service which provides keystore and keytab files without any authentication!
|
||||
|
||||
Master password: Welcome1
|
||||
|
||||
Principal: admin/admin@EXAMPLE.COM Password: Welcome1
|
||||
|
||||
Test:
|
||||
|
||||
```
|
||||
docker run --net=host krb5
|
||||
|
||||
docker run --net=host -it --entrypoint=bash krb5
|
||||
kinit admin/admin
|
||||
#pwd: Welcome1
|
||||
klist
|
||||
```
|
20
hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/kadm5.acl
vendored
Normal file
20
hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/kadm5.acl
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
*/admin@EXAMPLE.COM x
|
41
hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/krb5.conf
vendored
Normal file
41
hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/krb5.conf
vendored
Normal file
|
@ -0,0 +1,41 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
[logging]
|
||||
default = FILE:/var/log/krb5libs.log
|
||||
kdc = FILE:/var/log/krb5kdc.log
|
||||
admin_server = FILE:/var/log/kadmind.log
|
||||
|
||||
[libdefaults]
|
||||
dns_canonicalize_hostname = false
|
||||
dns_lookup_realm = false
|
||||
ticket_lifetime = 24h
|
||||
renew_lifetime = 7d
|
||||
forwardable = true
|
||||
rdns = false
|
||||
default_realm = EXAMPLE.COM
|
||||
|
||||
[realms]
|
||||
EXAMPLE.COM = {
|
||||
kdc = localhost
|
||||
admin_server = localhost
|
||||
max_renewable_life = 7d
|
||||
}
|
||||
|
||||
[domain_realm]
|
||||
.example.com = EXAMPLE.COM
|
||||
example.com = EXAMPLE.COM
|
||||
|
25
hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/launcher.sh
vendored
Normal file
25
hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/launcher.sh
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -e
|
||||
/root/issuer &
|
||||
krb5kdc -n &
|
||||
sleep 4
|
||||
kadmind -nofork &
|
||||
sleep 2
|
||||
tail -f /var/log/krb5kdc.log &
|
||||
tail -f /var/log/kadmind.log
|
||||
|
|
@ -1464,8 +1464,6 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
|
|||
@Override
|
||||
public Token<OzoneTokenIdentifier> getDelegationToken(Text renewer)
|
||||
throws OMException {
|
||||
final boolean success;
|
||||
final String tokenId;
|
||||
Token<OzoneTokenIdentifier> token;
|
||||
try {
|
||||
if (!isAllowedDelegationTokenOp()) {
|
||||
|
|
|
@ -17,11 +17,13 @@
|
|||
*/
|
||||
package org.apache.hadoop.fs.ozone;
|
||||
|
||||
import org.apache.hadoop.crypto.key.KeyProvider;
|
||||
import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.net.URI;
|
||||
import java.util.Iterator;
|
||||
|
||||
/**
|
||||
|
@ -57,4 +59,10 @@ public interface OzoneClientAdapter {
|
|||
|
||||
Token<OzoneTokenIdentifier> getDelegationToken(String renewer)
|
||||
throws IOException;
|
||||
|
||||
KeyProvider getKeyProvider() throws IOException;
|
||||
|
||||
URI getKeyProviderUri() throws IOException;
|
||||
|
||||
String getCanonicalServiceName();
|
||||
}
|
||||
|
|
|
@ -21,12 +21,14 @@ import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.net.URI;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.crypto.key.KeyProvider;
|
||||
import org.apache.hadoop.hdds.client.ReplicationFactor;
|
||||
import org.apache.hadoop.hdds.client.ReplicationType;
|
||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||
|
@ -291,12 +293,27 @@ public class OzoneClientAdapterImpl implements OzoneClientAdapter {
|
|||
throws IOException {
|
||||
if (!securityEnabled) {
|
||||
return null;
|
||||
} else {
|
||||
Token<OzoneTokenIdentifier> token =
|
||||
ozoneClient.getObjectStore().getDelegationToken(new Text(renewer));
|
||||
}
|
||||
Token<OzoneTokenIdentifier> token = ozoneClient.getObjectStore()
|
||||
.getDelegationToken(renewer == null ? null : new Text(renewer));
|
||||
token.setKind(OzoneTokenIdentifier.KIND_NAME);
|
||||
return token;
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public KeyProvider getKeyProvider() throws IOException {
|
||||
return objectStore.getKeyProvider();
|
||||
}
|
||||
|
||||
@Override
|
||||
public URI getKeyProviderUri() throws IOException {
|
||||
return objectStore.getKeyProviderUri();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getCanonicalServiceName() {
|
||||
return objectStore.getCanonicalServiceName();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -38,6 +38,8 @@ import org.apache.commons.lang3.math.NumberUtils;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.crypto.key.KeyProvider;
|
||||
import org.apache.hadoop.crypto.key.KeyProviderTokenIssuer;
|
||||
import org.apache.hadoop.fs.CreateFlag;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
|
@ -49,6 +51,7 @@ import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
|
|||
import org.apache.hadoop.fs.GlobalStorageStatistics;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.token.DelegationTokenIssuer;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
|
||||
|
@ -73,7 +76,8 @@ import org.slf4j.LoggerFactory;
|
|||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public class OzoneFileSystem extends FileSystem {
|
||||
public class OzoneFileSystem extends FileSystem
|
||||
implements KeyProviderTokenIssuer {
|
||||
static final Logger LOG = LoggerFactory.getLogger(OzoneFileSystem.class);
|
||||
|
||||
/**
|
||||
|
@ -300,6 +304,26 @@ public class OzoneFileSystem extends FileSystem {
|
|||
+ getClass().getSimpleName() + " FileSystem implementation");
|
||||
}
|
||||
|
||||
@Override
|
||||
public KeyProvider getKeyProvider() throws IOException {
|
||||
return adapter.getKeyProvider();
|
||||
}
|
||||
|
||||
@Override
|
||||
public URI getKeyProviderUri() throws IOException {
|
||||
return adapter.getKeyProviderUri();
|
||||
}
|
||||
|
||||
@Override
|
||||
public DelegationTokenIssuer[] getAdditionalTokenIssuers()
|
||||
throws IOException {
|
||||
KeyProvider keyProvider = getKeyProvider();
|
||||
if (keyProvider instanceof DelegationTokenIssuer) {
|
||||
return new DelegationTokenIssuer[]{(DelegationTokenIssuer)keyProvider};
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private class RenameIterator extends OzoneListingIterator {
|
||||
private final String srcKey;
|
||||
private final String dstKey;
|
||||
|
@ -691,6 +715,16 @@ public class OzoneFileSystem extends FileSystem {
|
|||
return adapter.getDelegationToken(renewer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a canonical service name for this file system. If the URI is logical,
|
||||
* the hostname part of the URI will be returned.
|
||||
* @return a service string that uniquely identifies this file system.
|
||||
*/
|
||||
@Override
|
||||
public String getCanonicalServiceName() {
|
||||
return adapter.getCanonicalServiceName();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the username of the FS.
|
||||
*
|
||||
|
|
Loading…
Reference in New Issue