HDDS-1525. Mapreduce failure when using Hadoop 2.7.5

Closes #1065
This commit is contained in:
Márton Elek 2019-07-10 14:27:55 +02:00
parent 93824886e9
commit bbf5844968
No known key found for this signature in database
GPG Key ID: D51EA8F00EE79B28
26 changed files with 675 additions and 306 deletions

View File

@ -25,7 +25,6 @@ import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.crypto.CryptoInputStream; import org.apache.hadoop.crypto.CryptoInputStream;
import org.apache.hadoop.crypto.CryptoOutputStream; import org.apache.hadoop.crypto.CryptoOutputStream;
import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderTokenIssuer;
import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.hdds.protocol.StorageType;
@ -109,7 +108,7 @@ import java.util.stream.Collectors;
* to execute client calls. This uses RPC protocol for communication * to execute client calls. This uses RPC protocol for communication
* with the servers. * with the servers.
*/ */
public class RpcClient implements ClientProtocol, KeyProviderTokenIssuer { public class RpcClient implements ClientProtocol {
private static final Logger LOG = private static final Logger LOG =
LoggerFactory.getLogger(RpcClient.class); LoggerFactory.getLogger(RpcClient.class);
@ -1137,9 +1136,4 @@ public class RpcClient implements ClientProtocol, KeyProviderTokenIssuer {
public String getCanonicalServiceName() { public String getCanonicalServiceName() {
return (dtService != null) ? dtService.toString() : null; return (dtService != null) ? dtService.toString() : null;
} }
@Override
public Token<?> getDelegationToken(String renewer) throws IOException {
return getDelegationToken(renewer == null ? null : new Text(renewer));
}
} }

View File

@ -28,8 +28,6 @@ OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
HDFS-SITE.XML_dfs.datanode.address=0.0.0.0:1019 HDFS-SITE.XML_dfs.datanode.address=0.0.0.0:1019
HDFS-SITE.XML_dfs.datanode.http.address=0.0.0.0:1012 HDFS-SITE.XML_dfs.datanode.http.address=0.0.0.0:1012
CORE-SITE.xml_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem
CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs
CORE-SITE.xml_fs.defaultFS=o3fs://bucket1.vol1/ CORE-SITE.xml_fs.defaultFS=o3fs://bucket1.vol1/
MAPRED-SITE.XML_mapreduce.framework.name=yarn MAPRED-SITE.XML_mapreduce.framework.name=yarn
@ -39,7 +37,6 @@ MAPRED-SITE.XML_mapreduce.reduce.env=HADOOP_MAPRED_HOME=$HADOOP_HOME
MAPRED-SITE.XML_mapreduce.map.memory.mb=4096 MAPRED-SITE.XML_mapreduce.map.memory.mb=4096
MAPRED-SITE.XML_mapreduce.reduce.memory.mb=4096 MAPRED-SITE.XML_mapreduce.reduce.memory.mb=4096
MAPRED-SITE.XML_mapred.child.java.opts=-Xmx2g MAPRED-SITE.XML_mapred.child.java.opts=-Xmx2g
MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-0.5.0-SNAPSHOT.jar
YARN-SITE.XML_yarn.app.mapreduce.am.staging-dir=/user YARN-SITE.XML_yarn.app.mapreduce.am.staging-dir=/user
YARN_SITE.XML_yarn.timeline-service.enabled=true YARN_SITE.XML_yarn.timeline-service.enabled=true
@ -87,44 +84,3 @@ LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN
LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop=INFO LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop=INFO
LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR
#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation.
#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
#LOG4J2.PROPERTIES_* are for Ozone Audit Logging
LOG4J2.PROPERTIES_monitorInterval=30
LOG4J2.PROPERTIES_filter=read,write
LOG4J2.PROPERTIES_filter.read.type=MarkerFilter
LOG4J2.PROPERTIES_filter.read.marker=READ
LOG4J2.PROPERTIES_filter.read.onMatch=DENY
LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL
LOG4J2.PROPERTIES_filter.write.type=MarkerFilter
LOG4J2.PROPERTIES_filter.write.marker=WRITE
LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL
LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL
LOG4J2.PROPERTIES_appenders=console, rolling
LOG4J2.PROPERTIES_appender.console.type=Console
LOG4J2.PROPERTIES_appender.console.name=STDOUT
LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout
LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n
LOG4J2.PROPERTIES_appender.rolling.type=RollingFile
LOG4J2.PROPERTIES_appender.rolling.name=RollingFile
LOG4J2.PROPERTIES_appender.rolling.fileName=${sys:hadoop.log.dir}/om-audit-${hostName}.log
LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz
LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout
LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n
LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies
LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy
LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400
LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy
LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB
LOG4J2.PROPERTIES_loggers=audit
LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger
LOG4J2.PROPERTIES_logger.audit.name=OMAudit
LOG4J2.PROPERTIES_logger.audit.level=INFO
LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling
LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile
LOG4J2.PROPERTIES_rootLogger.level=INFO
LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout
LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT

View File

@ -14,5 +14,9 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
HDDS_VERSION=${hdds.version} HDDS_VERSION=@hdds.version@
HADOOP_RUNNER_VERSION=${docker.ozone-runner.version} #TODO: swich to apache/hadoop. Older versions are not supported by apache/hadoop, yet.
# See: HADOOP-16092 for more details.
HADOOP_IMAGE=flokkr/hadoop
HADOOP_VERSION=2.7.7
HADOOP_RUNNER_VERSION=@docker.ozone-runner.version@

View File

@ -0,0 +1,102 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
version: "3"
services:
datanode:
image: apache/ozone-runner:${HADOOP_RUNNER_VERSION}
volumes:
- ../../..:/opt/hadoop
ports:
- 9864
command: ["/opt/hadoop/bin/ozone","datanode"]
env_file:
- docker-config
- ../common-config
om:
image: apache/ozone-runner:${HADOOP_RUNNER_VERSION}
hostname: om
volumes:
- ../../..:/opt/hadoop
ports:
- 9874:9874
environment:
WAITFOR: scm:9876
ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
env_file:
- docker-config
- ../common-config
command: ["/opt/hadoop/bin/ozone","om"]
s3g:
image: apache/ozone-runner:${HADOOP_RUNNER_VERSION}
hostname: s3g
volumes:
- ../../..:/opt/hadoop
ports:
- 9878:9878
env_file:
- ./docker-config
- ../common-config
command: ["/opt/hadoop/bin/ozone","s3g"]
scm:
image: apache/ozone-runner:${HADOOP_RUNNER_VERSION}
hostname: scm
volumes:
- ../../..:/opt/hadoop
ports:
- 9876:9876
env_file:
- docker-config
- ../common-config
environment:
ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
command: ["/opt/hadoop/bin/ozone","scm"]
rm:
image: ${HADOOP_IMAGE}:${HADOOP_VERSION}
hostname: rm
volumes:
- ../../..:/opt/ozone
ports:
- 8088:8088
env_file:
- ./docker-config
- ../common-config
environment:
HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-legacy-@project.version@.jar
command: ["yarn", "resourcemanager"]
nm:
image: ${HADOOP_IMAGE}:${HADOOP_VERSION}
hostname: nm
volumes:
- ../../..:/opt/ozone
env_file:
- ./docker-config
- ../common-config
environment:
HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-legacy-@project.version@.jar
WAIT_FOR: rm:8088
command: ["yarn","nodemanager"]
# Optional section: comment out this part to get DNS resolution for all the containers.
# dns:
# image: andyshinn/dnsmasq:2.76
# ports:
# - 53:53/udp
# - 53:53/tcp
# volumes:
# - "/var/run/docker.sock:/var/run/docker.sock"
# command:
# - "-k"
# - "-d"

View File

@ -0,0 +1,19 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
CORE-SITE.xml_fs.o3fs.impl=org.apache.hadoop.fs.ozone.BasicOzoneFileSystem
CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.BasicOzFs
MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-legacy-@project.version@.jar

View File

@ -19,20 +19,25 @@ COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
export COMPOSE_DIR export COMPOSE_DIR
# shellcheck source=/dev/null # shellcheck source=/dev/null
source "$COMPOSE_DIR/../testlib.sh" source "$COMPOSE_DIR/../../testlib.sh"
start_docker_env start_docker_env
execute_robot_test scm ozonefs/ozonefs.robot execute_robot_test scm createmrenv.robot
## TODO: As of now the o3fs tests are unstable. #rm is the container name (resource manager) and not the rm command
execute_command_in_container rm sudo apk add --update py-pip
execute_command_in_container rm sudo pip install robotframework
export OZONE_HOME=/opt/ozone # reinitialize the directories to use
export OZONE_DIR=/opt/ozone
# shellcheck source=/dev/null
source "$COMPOSE_DIR/../../testlib.sh"
#execute_robot_test hadoop32 ozonefs/hadoopo3fs.robot execute_robot_test rm ozonefs/hadoopo3fs.robot
#execute_robot_test hadoop31 ozonefs/hadoopo3fs.robot execute_robot_test rm -v hadoop.version:2.7.7 mapreduce.robot
stop_docker_env stop_docker_env

View File

@ -0,0 +1,22 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
HDDS_VERSION=@hdds.version@
#TODO: swich to apache/hadoop. Older versions are not supported by apache/hadoop, yet.
# See: HADOOP-16092 for more details.
HADOOP_IMAGE=flokkr/hadoop
HADOOP_VERSION=3.1.2
HADOOP_RUNNER_VERSION=@docker.ozone-runner.version@

View File

@ -19,17 +19,18 @@ services:
datanode: datanode:
image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} image: apache/ozone-runner:${HADOOP_RUNNER_VERSION}
volumes: volumes:
- ../..:/opt/hadoop - ../../..:/opt/hadoop
ports: ports:
- 9864 - 9864
command: ["/opt/hadoop/bin/ozone","datanode"] command: ["/opt/hadoop/bin/ozone","datanode"]
env_file: env_file:
- docker-config - docker-config
- ../common-config
om: om:
image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} image: apache/ozone-runner:${HADOOP_RUNNER_VERSION}
hostname: om hostname: om
volumes: volumes:
- ../..:/opt/hadoop - ../../..:/opt/hadoop
ports: ports:
- 9874:9874 - 9874:9874
environment: environment:
@ -37,26 +38,29 @@ services:
ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
env_file: env_file:
- docker-config - docker-config
- ../common-config
command: ["/opt/hadoop/bin/ozone","om"] command: ["/opt/hadoop/bin/ozone","om"]
s3g: s3g:
image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} image: apache/ozone-runner:${HADOOP_RUNNER_VERSION}
hostname: s3g hostname: s3g
volumes: volumes:
- ../..:/opt/hadoop - ../../..:/opt/hadoop
ports: ports:
- 9878:9878 - 9878:9878
env_file: env_file:
- ./docker-config - ./docker-config
- ../common-config
command: ["/opt/hadoop/bin/ozone","s3g"] command: ["/opt/hadoop/bin/ozone","s3g"]
scm: scm:
image: apache/ozone-runner:${HADOOP_RUNNER_VERSION} image: apache/ozone-runner:${HADOOP_RUNNER_VERSION}
hostname: scm hostname: scm
volumes: volumes:
- ../..:/opt/hadoop - ../../..:/opt/hadoop
ports: ports:
- 9876:9876 - 9876:9876
env_file: env_file:
- docker-config - docker-config
- ../common-config
environment: environment:
ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
command: ["/opt/hadoop/bin/ozone","scm"] command: ["/opt/hadoop/bin/ozone","scm"]
@ -64,11 +68,12 @@ services:
image: ${HADOOP_IMAGE}:${HADOOP_VERSION} image: ${HADOOP_IMAGE}:${HADOOP_VERSION}
hostname: rm hostname: rm
volumes: volumes:
- ../..:/opt/ozone - ../../..:/opt/ozone
ports: ports:
- 8088:8088 - 8088:8088
env_file: env_file:
- ./docker-config - ./docker-config
- ../common-config
environment: environment:
HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
command: ["yarn", "resourcemanager"] command: ["yarn", "resourcemanager"]
@ -76,20 +81,11 @@ services:
image: ${HADOOP_IMAGE}:${HADOOP_VERSION} image: ${HADOOP_IMAGE}:${HADOOP_VERSION}
hostname: nm hostname: nm
volumes: volumes:
- ../..:/opt/ozone - ../../..:/opt/ozone
env_file: env_file:
- ./docker-config - ./docker-config
- ../common-config
environment: environment:
HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
WAIT_FOR: rm:8088 WAIT_FOR: rm:8088
command: ["yarn","nodemanager"] command: ["yarn","nodemanager"]
dns:
image: andyshinn/dnsmasq:2.76
ports:
- 53:53/udp
- 53:53/tcp
volumes:
- "/var/run/docker.sock:/var/run/docker.sock"
command:
- "-k"
- "-d"

View File

@ -0,0 +1,19 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
CORE-SITE.xml_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem
CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs
MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar

View File

@ -0,0 +1,45 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
export COMPOSE_DIR
# shellcheck source=/dev/null
source "$COMPOSE_DIR/../../testlib.sh"
start_docker_env
execute_robot_test scm createmrenv.robot
#rm is the container name (resource manager) and not the rm command
execute_command_in_container rm sudo apk add --update py-pip
execute_command_in_container rm sudo pip install robotframework
# reinitialize the directories to use
export OZONE_DIR=/opt/ozone
# shellcheck source=/dev/null
source "$COMPOSE_DIR/../../testlib.sh"
execute_robot_test rm ozonefs/hadoopo3fs.robot
execute_robot_test rm -v hadoop.version:3.1.2 mapreduce.robot
stop_docker_env
generate_report

View File

@ -14,7 +14,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
HDDS_VERSION=${hdds.version} HDDS_VERSION=@hdds.version@
HADOOP_IMAGE=apache/hadoop HADOOP_IMAGE=apache/hadoop
HADOOP_VERSION=3 HADOOP_VERSION=3
HADOOP_RUNNER_VERSION=${docker.ozone-runner.version} HADOOP_RUNNER_VERSION=@docker.ozone-runner.version@

View File

@ -0,0 +1,104 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
version: "3"
services:
datanode:
image: apache/ozone-runner:${HADOOP_RUNNER_VERSION}
volumes:
- ../../..:/opt/hadoop
ports:
- 9864
command: ["/opt/hadoop/bin/ozone","datanode"]
env_file:
- docker-config
- ../common-config
om:
image: apache/ozone-runner:${HADOOP_RUNNER_VERSION}
hostname: om
volumes:
- ../../..:/opt/hadoop
ports:
- 9874:9874
environment:
WAITFOR: scm:9876
ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
env_file:
- docker-config
- ../common-config
command: ["/opt/hadoop/bin/ozone","om"]
s3g:
image: apache/ozone-runner:${HADOOP_RUNNER_VERSION}
hostname: s3g
volumes:
- ../../..:/opt/hadoop
ports:
- 9878:9878
env_file:
- ./docker-config
- ../common-config
command: ["/opt/hadoop/bin/ozone","s3g"]
scm:
image: apache/ozone-runner:${HADOOP_RUNNER_VERSION}
hostname: scm
volumes:
- ../../..:/opt/hadoop
ports:
- 9876:9876
env_file:
- docker-config
- ../common-config
environment:
ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
command: ["/opt/hadoop/bin/ozone","scm"]
rm:
image: ${HADOOP_IMAGE}:${HADOOP_VERSION}
hostname: rm
volumes:
- ../../..:/opt/ozone
ports:
- 8088:8088
env_file:
- ./docker-config
- ../common-config
environment:
HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
command: ["yarn", "resourcemanager"]
nm:
image: ${HADOOP_IMAGE}:${HADOOP_VERSION}
hostname: nm
volumes:
- ../../..:/opt/ozone
env_file:
- ./docker-config
- ../common-config
environment:
HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
WAIT_FOR: rm:8088
command: ["yarn","nodemanager"]
# Optional section: comment out this part to get DNS resolution for all the containers.
# Add 127.0.0.1 (or the ip of your docker machine) to the resolv.conf to get local DNS resolution
# For all the containers (including resource managers and Node manager UI)
# dns:
# image: andyshinn/dnsmasq:2.76
# ports:
# - 53:53/udp
# - 53:53/tcp
# volumes:
# - "/var/run/docker.sock:/var/run/docker.sock"
# command:
# - "-k"
# - "-d"

View File

@ -0,0 +1,19 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
CORE-SITE.xml_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem
CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs
MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar

View File

@ -19,7 +19,7 @@ COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
export COMPOSE_DIR export COMPOSE_DIR
# shellcheck source=/dev/null # shellcheck source=/dev/null
source "$COMPOSE_DIR/../testlib.sh" source "$COMPOSE_DIR/../../testlib.sh"
start_docker_env start_docker_env
@ -27,8 +27,12 @@ execute_robot_test scm createmrenv.robot
# reinitialize the directories to use # reinitialize the directories to use
export OZONE_DIR=/opt/ozone export OZONE_DIR=/opt/ozone
# shellcheck source=/dev/null # shellcheck source=/dev/null
source "$COMPOSE_DIR/../testlib.sh" source "$COMPOSE_DIR/../../testlib.sh"
execute_robot_test rm ozonefs/hadoopo3fs.robot
execute_robot_test rm mapreduce.robot execute_robot_test rm mapreduce.robot
stop_docker_env stop_docker_env

View File

@ -1,100 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
version: "3"
services:
datanode:
image: apache/ozone-runner:${HADOOP_RUNNER_VERSION}
volumes:
- ../..:/opt/hadoop
ports:
- 9864
command: ["/opt/hadoop/bin/ozone","datanode"]
env_file:
- ./docker-config
om:
image: apache/ozone-runner:${HADOOP_RUNNER_VERSION}
hostname: om
volumes:
- ../..:/opt/hadoop
ports:
- 9874
environment:
ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
env_file:
- ./docker-config
command: ["/opt/hadoop/bin/ozone","om"]
scm:
image: apache/ozone-runner:${HADOOP_RUNNER_VERSION}
volumes:
- ../..:/opt/hadoop
ports:
- 9876
env_file:
- ./docker-config
environment:
ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
command: ["/opt/hadoop/bin/ozone","scm"]
hadoop32:
image: flokkr/hadoop:3.1.0
volumes:
- ../..:/opt/ozone
env_file:
- ./docker-config
environment:
HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
CORE-SITE.XML_fs.o3fs.impl: org.apache.hadoop.fs.ozone.OzoneFileSystem
command: ["watch","-n","100000","ls"]
hadoop31:
image: flokkr/hadoop:3.1.0
volumes:
- ../..:/opt/ozone
env_file:
- ./docker-config
environment:
HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-legacy-@project.version@.jar
CORE-SITE.XML_fs.o3fs.impl: org.apache.hadoop.fs.ozone.OzoneFileSystem
command: ["watch","-n","100000","ls"]
hadoop29:
image: flokkr/hadoop:2.9.0
volumes:
- ../..:/opt/ozone
env_file:
- ./docker-config
environment:
HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-legacy-@project.version@.jar
CORE-SITE.XML_fs.o3fs.impl: org.apache.hadoop.fs.ozone.BasicOzoneFileSystem
command: ["watch","-n","100000","ls"]
hadoop27:
image: flokkr/hadoop:2.7.3
volumes:
- ../..:/opt/ozone
env_file:
- ./docker-config
environment:
HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-legacy-@project.version@.jar
CORE-SITE.XML_fs.o3fs.impl: org.apache.hadoop.fs.ozone.BasicOzoneFileSystem
command: ["watch","-n","100000","ls"]
spark:
image: flokkr/spark
volumes:
- ../..:/opt/ozone
env_file:
- ./docker-config
environment:
HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-legacy-@project.version@.jar
CORE-SITE.XML_fs.o3fs.impl: org.apache.hadoop.fs.ozone.BasicOzoneFileSystem
command: ["watch","-n","100000","ls"]

View File

@ -1,38 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
CORE-SITE.XML_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem
OZONE-SITE.XML_ozone.om.address=om
OZONE-SITE.XML_ozone.om.http-address=om:9874
OZONE-SITE.XML_ozone.scm.names=scm
OZONE-SITE.XML_ozone.enabled=True
OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data
OZONE-SITE.XML_ozone.scm.block.client.address=scm
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
OZONE-SITE.XML_ozone.handler.type=distributed
OZONE-SITE.XML_ozone.scm.client.address=scm
OZONE-SITE.XML_ozone.scm.heartbeat.interval=3s
OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
OZONE-SITE.XML_ozone.replication=1
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN
LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.grpc.client.GrpcClientProtocolClient=WARN

View File

@ -1,56 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*** Settings ***
Documentation Test ozone fs usage from Hdfs and Spark
Library OperatingSystem
Library String
Resource ../../smoketest/env-compose.robot
Resource ../../smoketest/commonlib.robot
*** Variables ***
${DATANODE_HOST} datanode
*** Keywords ***
Test hadoop dfs
[arguments] ${prefix}
${random} = Generate Random String 5 [NUMBERS]
${result} = Execute on host ${prefix} hdfs dfs -put /opt/hadoop/NOTICE.txt o3fs://bucket1.vol1/${prefix}-${random}
${result} = Execute on host ${prefix} hdfs dfs -ls o3fs://bucket1.vol1/
Should contain ${result} ${prefix}-${random}
*** Test Cases ***
Create bucket and volume to test
${result} = Run tests on host scm createbucketenv.robot
Test hadoop 3.1
Test hadoop dfs hadoop31
Test hadoop 3.2
Test hadoop dfs hadoop31
Test hadoop 2.9
Test hadoop dfs hadoop29
Test hadoop 2.7
Test hadoop dfs hadoop27
Test spark 2.3
${legacyjar} = Execute on host spark bash -c 'find /opt/ozone/share/ozone/lib/ -name *legacy*.jar'
${postfix} = Generate Random String 5 [NUMBERS]
${result} = Execute on host spark /opt/spark/bin/spark-submit --jars ${legacyjar} --class org.apache.spark.examples.DFSReadWriteTest /opt/spark//examples/jars/spark-examples_2.11-2.3.0.jar /opt/spark/README.md o3fs://bucket1.vol1/spark-${postfix}

View File

@ -72,14 +72,19 @@ start_docker_env(){
## @param robot test file or directory relative to the smoketest dir ## @param robot test file or directory relative to the smoketest dir
execute_robot_test(){ execute_robot_test(){
CONTAINER="$1" CONTAINER="$1"
TEST="$2" shift 1 #Remove first argument which was the container name
# shellcheck disable=SC2206
ARGUMENTS=($@)
TEST="${ARGUMENTS[${#ARGUMENTS[@]}-1]}" #Use last element as the test name
unset 'ARGUMENTS[${#ARGUMENTS[@]}-1]' #Remove the last element, remainings are the custom parameters
TEST_NAME=$(basename "$TEST") TEST_NAME=$(basename "$TEST")
TEST_NAME="$(basename "$COMPOSE_DIR")-${TEST_NAME%.*}" TEST_NAME="$(basename "$COMPOSE_DIR")-${TEST_NAME%.*}"
set +e set +e
OUTPUT_NAME="$COMPOSE_ENV_NAME-$TEST_NAME-$CONTAINER" OUTPUT_NAME="$COMPOSE_ENV_NAME-$TEST_NAME-$CONTAINER"
OUTPUT_PATH="$RESULT_DIR_INSIDE/robot-$OUTPUT_NAME.xml" OUTPUT_PATH="$RESULT_DIR_INSIDE/robot-$OUTPUT_NAME.xml"
docker-compose -f "$COMPOSE_FILE" exec -T "$CONTAINER" mkdir -p "$RESULT_DIR_INSIDE" docker-compose -f "$COMPOSE_FILE" exec -T "$CONTAINER" mkdir -p "$RESULT_DIR_INSIDE"
docker-compose -f "$COMPOSE_FILE" exec -e SECURITY_ENABLED="${SECURITY_ENABLED}" -T "$CONTAINER" python -m robot --log NONE -N "$TEST_NAME" --report NONE "${OZONE_ROBOT_OPTS[@]}" --output "$OUTPUT_PATH" "$SMOKETEST_DIR_INSIDE/$TEST" # shellcheck disable=SC2068
docker-compose -f "$COMPOSE_FILE" exec -T -e SECURITY_ENABLED="${SECURITY_ENABLED}" "$CONTAINER" python -m robot ${ARGUMENTS[@]} --log NONE -N "$TEST_NAME" --report NONE "${OZONE_ROBOT_OPTS[@]}" --output "$OUTPUT_PATH" "$SMOKETEST_DIR_INSIDE/$TEST"
FULL_CONTAINER_NAME=$(docker-compose -f "$COMPOSE_FILE" ps | grep "_${CONTAINER}_" | head -n 1 | awk '{print $1}') FULL_CONTAINER_NAME=$(docker-compose -f "$COMPOSE_FILE" ps | grep "_${CONTAINER}_" | head -n 1 | awk '{print $1}')
docker cp "$FULL_CONTAINER_NAME:$OUTPUT_PATH" "$RESULT_DIR/" docker cp "$FULL_CONTAINER_NAME:$OUTPUT_PATH" "$RESULT_DIR/"
@ -87,6 +92,18 @@ execute_robot_test(){
} }
## @description Execute specific command in docker container
## @param container name
## @param specific command to execute
execute_command_in_container(){
set -e
# shellcheck disable=SC2068
docker-compose -f "$COMPOSE_FILE" exec -T $@
set +e
}
## @description Stops a docker-compose based test environment (with saving the logs) ## @description Stops a docker-compose based test environment (with saving the logs)
stop_docker_env(){ stop_docker_env(){
docker-compose -f "$COMPOSE_FILE" logs > "$RESULT_DIR/docker-$OUTPUT_NAME.log" docker-compose -f "$COMPOSE_FILE" logs > "$RESULT_DIR/docker-$OUTPUT_NAME.log"

View File

@ -29,4 +29,4 @@ Test hadoop dfs
${random} = Generate Random String 5 [NUMBERS] ${random} = Generate Random String 5 [NUMBERS]
${result} = Execute hdfs dfs -put /opt/hadoop/NOTICE.txt o3fs://bucket1.vol1/${PREFIX}-${random} ${result} = Execute hdfs dfs -put /opt/hadoop/NOTICE.txt o3fs://bucket1.vol1/${PREFIX}-${random}
${result} = Execute hdfs dfs -ls o3fs://bucket1.vol1/ ${result} = Execute hdfs dfs -ls o3fs://bucket1.vol1/
Should contain ${PREFIX}-${random} Should contain ${result} ${PREFIX}-${random}

View File

@ -64,8 +64,12 @@
<excludeArtifactIds> <excludeArtifactIds>
slf4j-api,slf4j-log4j12,log4j-api,log4j-core,log4j,hadoop-ozone-filesystem slf4j-api,slf4j-log4j12,log4j-api,log4j-core,log4j,hadoop-ozone-filesystem
</excludeArtifactIds> </excludeArtifactIds>
<markersDirectory>
${project.build.directory}/dependency-maven-plugin-markers-lib
</markersDirectory>
</configuration> </configuration>
</execution> </execution>
<execution> <execution>
<id>include-ozonefs</id> <id>include-ozonefs</id>
<goals> <goals>
@ -74,9 +78,33 @@
<phase>prepare-package</phase> <phase>prepare-package</phase>
<configuration> <configuration>
<outputDirectory>target/classes</outputDirectory> <outputDirectory>target/classes</outputDirectory>
<includeArtifactIds>hadoop-ozone-filesystem</includeArtifactIds> <includeArtifactIds>hadoop-ozone-filesystem,hadoop-ozone-common
</includeArtifactIds>
<includeScope>compile</includeScope> <includeScope>compile</includeScope>
<excludes>META-INF/*.SF</excludes> <excludes>META-INF/*.SF</excludes>
<markersDirectory>
${project.build.directory}/dependency-maven-plugin-markers-direct
</markersDirectory>
</configuration>
</execution>
<execution>
<id>include-token</id>
<goals>
<goal>unpack-dependencies</goal>
</goals>
<phase>prepare-package</phase>
<configuration>
<outputDirectory>target/classes</outputDirectory>
<includeArtifactIds>hadoop-ozone-common,hadoop-hdds-common</includeArtifactIds>
<includeScope>compile</includeScope>
<includes>
org/apache/hadoop/ozone/security/OzoneTokenIdentifier.class,org/apache/hadoop/hdds/security/token/OzoneBlockTokenIdentifier.class,org/apache/hadoop/ozone/protocol/proto/OzoneManagerProtocolProtos*,org/apache/hadoop/hdds/protocol/proto/HddsProtos*
</includes>
<excludes>META-INF/*.SF</excludes>
<markersDirectory>
${project.build.directory}/dependency-maven-plugin-markers-token
</markersDirectory>
</configuration> </configuration>
</execution> </execution>
</executions> </executions>

View File

@ -0,0 +1,45 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.ozone;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DelegateToFileSystem;
import org.apache.hadoop.ozone.OzoneConsts;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
/**
* ozone implementation of AbstractFileSystem.
* This impl delegates to the OzoneFileSystem
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class BasicOzFs extends DelegateToFileSystem {
public BasicOzFs(URI theUri, Configuration conf)
throws IOException, URISyntaxException {
super(theUri, new BasicOzoneFileSystem(), conf,
OzoneConsts.OZONE_URI_SCHEME, false);
}
}

View File

@ -21,6 +21,7 @@ import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.net.URI; import java.net.URI;
import java.util.ArrayList;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
@ -28,6 +29,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@ -235,30 +238,55 @@ public class BasicOzoneClientAdapterImpl implements OzoneClientAdapter {
} }
} }
public OzoneFileStatus getFileStatus(String pathKey) throws IOException { public FileStatusAdapter getFileStatus(String key, URI uri,
Path qualifiedPath, String userName)
throws IOException {
try { try {
incrementCounter(Statistic.OBJECTS_QUERY); incrementCounter(Statistic.OBJECTS_QUERY);
return bucket.getFileStatus(pathKey); OzoneFileStatus status = bucket.getFileStatus(key);
makeQualified(status, uri, qualifiedPath, userName);
return toFileStatusAdapter(status);
} catch (OMException e) { } catch (OMException e) {
if (e.getResult() == OMException.ResultCodes.FILE_NOT_FOUND) { if (e.getResult() == OMException.ResultCodes.FILE_NOT_FOUND) {
throw new throw new
FileNotFoundException(pathKey + ": No such file or directory!"); FileNotFoundException(key + ": No such file or directory!");
} }
throw e; throw e;
} }
} }
public void makeQualified(FileStatus status, URI uri, Path path,
String username) {
if (status instanceof OzoneFileStatus) {
((OzoneFileStatus) status)
.makeQualified(uri, path,
username, username);
}
}
@Override @Override
public Iterator<BasicKeyInfo> listKeys(String pathKey) { public Iterator<BasicKeyInfo> listKeys(String pathKey) {
incrementCounter(Statistic.OBJECTS_LIST); incrementCounter(Statistic.OBJECTS_LIST);
return new IteratorAdapter(bucket.listKeys(pathKey)); return new IteratorAdapter(bucket.listKeys(pathKey));
} }
public List<OzoneFileStatus> listStatus(String keyName, boolean recursive, public List<FileStatusAdapter> listStatus(String keyName, boolean recursive,
String startKey, long numEntries) throws IOException { String startKey, long numEntries, URI uri,
Path workingDir, String username) throws IOException {
try { try {
incrementCounter(Statistic.OBJECTS_LIST); incrementCounter(Statistic.OBJECTS_LIST);
return bucket.listStatus(keyName, recursive, startKey, numEntries); List<OzoneFileStatus> statuses = bucket
.listStatus(keyName, recursive, startKey, numEntries);
List<FileStatusAdapter> result = new ArrayList<>();
for (OzoneFileStatus status : statuses) {
Path qualifiedPath = status.getPath().makeQualified(uri, workingDir);
makeQualified(status, uri, qualifiedPath, username);
result.add(toFileStatusAdapter(status));
}
return result;
} catch (OMException e) { } catch (OMException e) {
if (e.getResult() == OMException.ResultCodes.FILE_NOT_FOUND) { if (e.getResult() == OMException.ResultCodes.FILE_NOT_FOUND) {
throw new FileNotFoundException(e.getMessage()); throw new FileNotFoundException(e.getMessage());
@ -372,4 +400,20 @@ public class BasicOzoneClientAdapterImpl implements OzoneClientAdapter {
} }
} }
} }
private FileStatusAdapter toFileStatusAdapter(OzoneFileStatus status) {
return new FileStatusAdapter(
status.getLen(),
status.getPath(),
status.isDirectory(),
status.getReplication(),
status.getBlockSize(),
status.getModificationTime(),
status.getAccessTime(),
status.getPermission().toShort(),
status.getOwner(),
status.getGroup(),
status.getPath()
);
}
} }

View File

@ -24,11 +24,12 @@ import java.net.URI;
import java.net.URISyntaxException; import java.net.URISyntaxException;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.Iterator; import java.util.Iterator;
import java.util.List;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List;
import java.util.Objects; import java.util.Objects;
import java.util.regex.Matcher; import java.util.regex.Matcher;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
@ -42,7 +43,6 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Progressable;
@ -497,13 +497,18 @@ public class BasicOzoneFileSystem extends FileSystem {
statistics.incrementReadOps(1); statistics.incrementReadOps(1);
LOG.trace("listStatus() path:{}", f); LOG.trace("listStatus() path:{}", f);
int numEntries = LISTING_PAGE_SIZE; int numEntries = LISTING_PAGE_SIZE;
LinkedList<OzoneFileStatus> statuses = new LinkedList<>(); LinkedList<FileStatus> statuses = new LinkedList<>();
List<OzoneFileStatus> tmpStatusList; List<FileStatus> tmpStatusList;
String startKey = ""; String startKey = "";
do { do {
tmpStatusList = tmpStatusList =
adapter.listStatus(pathToKey(f), false, startKey, numEntries); adapter.listStatus(pathToKey(f), false, startKey, numEntries, uri,
workingDir, getUsername())
.stream()
.map(this::convertFileStatus)
.collect(Collectors.toList());
if (!tmpStatusList.isEmpty()) { if (!tmpStatusList.isEmpty()) {
if (startKey.isEmpty()) { if (startKey.isEmpty()) {
statuses.addAll(tmpStatusList); statuses.addAll(tmpStatusList);
@ -517,10 +522,7 @@ public class BasicOzoneFileSystem extends FileSystem {
// exhausted. // exhausted.
} while (tmpStatusList.size() == numEntries); } while (tmpStatusList.size() == numEntries);
for (OzoneFileStatus status : statuses) {
status.makeQualified(uri, status.getPath().makeQualified(uri, workingDir),
getUsername(), getUsername());
}
return statuses.toArray(new FileStatus[0]); return statuses.toArray(new FileStatus[0]);
} }
@ -624,9 +626,9 @@ public class BasicOzoneFileSystem extends FileSystem {
LOG.trace("getFileStatus() path:{}", f); LOG.trace("getFileStatus() path:{}", f);
Path qualifiedPath = f.makeQualified(uri, workingDir); Path qualifiedPath = f.makeQualified(uri, workingDir);
String key = pathToKey(qualifiedPath); String key = pathToKey(qualifiedPath);
FileStatus status = convertFileStatus(
return adapter.getFileStatus(key) adapter.getFileStatus(key, uri, qualifiedPath, getUsername()));
.makeQualified(uri, qualifiedPath, getUsername(), getUsername()); return status;
} }
/** /**
@ -759,4 +761,30 @@ public class BasicOzoneFileSystem extends FileSystem {
} }
return true; return true;
} }
private FileStatus convertFileStatus(
FileStatusAdapter fileStatusAdapter) {
Path symLink = null;
try {
fileStatusAdapter.getSymlink();
} catch (Exception ex) {
//NOOP: If not symlink symlink remains null.
}
return new FileStatus(
fileStatusAdapter.getLength(),
fileStatusAdapter.isDir(),
fileStatusAdapter.getBlockReplication(),
fileStatusAdapter.getBlocksize(),
fileStatusAdapter.getModificationTime(),
fileStatusAdapter.getAccessTime(),
new FsPermission(fileStatusAdapter.getPermission()),
fileStatusAdapter.getOwner(),
fileStatusAdapter.getGroup(),
symLink,
fileStatusAdapter.getPath()
);
}
} }

View File

@ -0,0 +1,108 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.ozone;
import org.apache.hadoop.fs.Path;
/**
* Class to hold the internal information of a FileStatus.
* <p>
* As FileStatus class is not compatible between 3.x and 2.x hadoop we can
* use this adapter to hold all the required information. Hadoop 3.x FileStatus
* information can be converted to this class, and this class can be used to
* create hadoop 2.x FileStatus.
* <p>
* FileStatus (Hadoop 3.x) --> FileStatusAdapter --> FileStatus (Hadoop 2.x)
*/
public final class FileStatusAdapter {
private final long length;
private final Path path;
private final boolean isdir;
private final short blockReplication;
private final long blocksize;
private final long modificationTime;
private final long accessTime;
private final short permission;
private final String owner;
private final String group;
private final Path symlink;
@SuppressWarnings("checkstyle:ParameterNumber")
public FileStatusAdapter(long length, Path path, boolean isdir,
short blockReplication, long blocksize, long modificationTime,
long accessTime, short permission, String owner,
String group, Path symlink) {
this.length = length;
this.path = path;
this.isdir = isdir;
this.blockReplication = blockReplication;
this.blocksize = blocksize;
this.modificationTime = modificationTime;
this.accessTime = accessTime;
this.permission = permission;
this.owner = owner;
this.group = group;
this.symlink = symlink;
}
public Path getPath() {
return path;
}
public boolean isDir() {
return isdir;
}
public short getBlockReplication() {
return blockReplication;
}
public long getBlocksize() {
return blocksize;
}
public long getModificationTime() {
return modificationTime;
}
public long getAccessTime() {
return accessTime;
}
public short getPermission() {
return permission;
}
public String getOwner() {
return owner;
}
public String getGroup() {
return group;
}
public Path getSymlink() {
return symlink;
}
public long getLength() {
return length;
}
}

View File

@ -52,17 +52,19 @@ public class FilteredClassLoader extends URLClassLoader {
public FilteredClassLoader(URL[] urls, ClassLoader parent) { public FilteredClassLoader(URL[] urls, ClassLoader parent) {
super(urls, null); super(urls, null);
delegatedClasses.add("org.apache.hadoop.crypto.key.KeyProvider");
delegatedClasses.add("org.apache.hadoop.fs.ozone.OzoneClientAdapter"); delegatedClasses.add("org.apache.hadoop.fs.ozone.OzoneClientAdapter");
delegatedClasses.add("org.apache.hadoop.fs.ozone.FileStatusAdapter");
delegatedClasses.add("org.apache.hadoop.security.token.Token"); delegatedClasses.add("org.apache.hadoop.security.token.Token");
delegatedClasses.add("org.apache.hadoop.fs.ozone.BasicKeyInfo"); delegatedClasses.add("org.apache.hadoop.fs.ozone.BasicKeyInfo");
delegatedClasses.add("org.apache.hadoop.fs.ozone.OzoneFSOutputStream"); delegatedClasses.add("org.apache.hadoop.fs.ozone.OzoneFSOutputStream");
delegatedClasses.add("org.apache.hadoop.fs.ozone.OzoneFSStorageStatistics"); delegatedClasses.add("org.apache.hadoop.fs.ozone.OzoneFSStorageStatistics");
delegatedClasses.add("org.apache.hadoop.fs.ozone.Statistic"); delegatedClasses.add("org.apache.hadoop.fs.ozone.Statistic");
delegatedClasses.add("org.apache.hadoop.fs.Seekable"); delegatedClasses.add("org.apache.hadoop.fs.Seekable");
delegatedClasses.add("org.apache.hadoop.io.Text");
delegatedClasses.add("org.apache.hadoop.fs.Path");
delegatedClasses.addAll(StringUtils.getTrimmedStringCollection( delegatedClasses.addAll(StringUtils.getTrimmedStringCollection(
System.getenv("HADOOP_OZONE_DELEGATED_CLASSES"))); System.getenv("HADOOP_OZONE_DELEGATED_CLASSES")));
this.delegate = parent; this.delegate = parent;
systemClassLoader = getSystemClassLoader(); systemClassLoader = getSystemClassLoader();

View File

@ -17,18 +17,17 @@
*/ */
package org.apache.hadoop.fs.ozone; package org.apache.hadoop.fs.ozone;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.net.URI; import java.net.URI;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
import org.apache.hadoop.security.token.Token;
/** /**
* Lightweight adapter to separate hadoop/ozone classes. * Lightweight adapter to separate hadoop/ozone classes.
* <p> * <p>
@ -53,8 +52,9 @@ public interface OzoneClientAdapter {
Iterator<BasicKeyInfo> listKeys(String pathKey); Iterator<BasicKeyInfo> listKeys(String pathKey);
List<OzoneFileStatus> listStatus(String keyName, boolean recursive, List<FileStatusAdapter> listStatus(String keyName, boolean recursive,
String startKey, long numEntries) throws IOException; String startKey, long numEntries, URI uri,
Path workingDir, String username) throws IOException;
Token<OzoneTokenIdentifier> getDelegationToken(String renewer) Token<OzoneTokenIdentifier> getDelegationToken(String renewer)
throws IOException; throws IOException;
@ -65,5 +65,7 @@ public interface OzoneClientAdapter {
String getCanonicalServiceName(); String getCanonicalServiceName();
OzoneFileStatus getFileStatus(String pathKey) throws IOException; FileStatusAdapter getFileStatus(String key, URI uri,
Path qualifiedPath, String userName) throws IOException;
} }