From 1fc359fc101b3ff90c95d22a3f4cfa78b65ae47d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Elek=2C=20M=C3=A1rton?= Date: Tue, 4 Jun 2019 08:18:02 +0200 Subject: [PATCH] HDDS-1607. Create smoketest for non-secure mapreduce example (#869) * HDDS-1607. Create smoketest for non-secure mapreduce example. * remove hardcoded project version --- .../dist/src/main/compose/ozone-mr/.env | 19 +++ .../main/compose/ozone-mr/docker-compose.yaml | 95 +++++++++++++ .../src/main/compose/ozone-mr/docker-config | 130 ++++++++++++++++++ .../dist/src/main/compose/ozone-mr/test.sh | 36 +++++ .../dist/src/main/smoketest/createmrenv.robot | 48 +++++++ .../dist/src/main/smoketest/mapreduce.robot | 37 +++++ 6 files changed, 365 insertions(+) create mode 100644 hadoop-ozone/dist/src/main/compose/ozone-mr/.env create mode 100644 hadoop-ozone/dist/src/main/compose/ozone-mr/docker-compose.yaml create mode 100644 hadoop-ozone/dist/src/main/compose/ozone-mr/docker-config create mode 100755 hadoop-ozone/dist/src/main/compose/ozone-mr/test.sh create mode 100644 hadoop-ozone/dist/src/main/smoketest/createmrenv.robot create mode 100644 hadoop-ozone/dist/src/main/smoketest/mapreduce.robot diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/.env b/hadoop-ozone/dist/src/main/compose/ozone-mr/.env new file mode 100644 index 00000000000..ba24fed5f9d --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/.env @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +HDDS_VERSION=${hdds.version} +HADOOP_IMAGE=apache/hadoop +HADOOP_VERSION=3 diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-mr/docker-compose.yaml new file mode 100644 index 00000000000..1a7f87263d7 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/docker-compose.yaml @@ -0,0 +1,95 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +version: "3" +services: + datanode: + image: apache/hadoop-runner + volumes: + - ../..:/opt/hadoop + ports: + - 9864 + command: ["/opt/hadoop/bin/ozone","datanode"] + env_file: + - docker-config + om: + image: apache/hadoop-runner + hostname: om + volumes: + - ../..:/opt/hadoop + ports: + - 9874:9874 + environment: + WAITFOR: scm:9876 + ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION + env_file: + - docker-config + command: ["/opt/hadoop/bin/ozone","om"] + s3g: + image: apache/hadoop-runner + hostname: s3g + volumes: + - ../..:/opt/hadoop + ports: + - 9878:9878 + env_file: + - ./docker-config + command: ["/opt/hadoop/bin/ozone","s3g"] + scm: + image: apache/hadoop-runner:latest + hostname: scm + volumes: + - ../..:/opt/hadoop + ports: + - 9876:9876 + env_file: + - docker-config + environment: + ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION + command: ["/opt/hadoop/bin/ozone","scm"] + rm: + image: ${HADOOP_IMAGE}:${HADOOP_VERSION} + hostname: rm + volumes: + - ../..:/opt/ozone + ports: + - 8088:8088 + env_file: + - ./docker-config + environment: + HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar + command: ["yarn", "resourcemanager"] + nm: + image: ${HADOOP_IMAGE}:${HADOOP_VERSION} + hostname: nm + volumes: + - ../..:/opt/ozone + env_file: + - ./docker-config + environment: + HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar + WAIT_FOR: rm:8088 + command: ["yarn","nodemanager"] + dns: + image: andyshinn/dnsmasq:2.76 + ports: + - 53:53/udp + - 53:53/tcp + volumes: + - "/var/run/docker.sock:/var/run/docker.sock" + command: + - "-k" + - "-d" diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-mr/docker-config new file mode 100644 index 00000000000..216e2314f4d --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/docker-config @@ -0,0 +1,130 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +OZONE-SITE.XML_ozone.om.address=om +OZONE-SITE.XML_ozone.om.http-address=om:9874 +OZONE-SITE.XML_ozone.scm.names=scm +OZONE-SITE.XML_ozone.enabled=true +OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id +OZONE-SITE.XML_ozone.scm.block.client.address=scm +OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata +OZONE-SITE.XML_ozone.scm.client.address=scm +OZONE-SITE.XML_ozone.replication=1 + +OZONE-SITE.XML_hdds.datanode.dir=/data/hdds +HDFS-SITE.XML_dfs.datanode.address=0.0.0.0:1019 +HDFS-SITE.XML_dfs.datanode.http.address=0.0.0.0:1012 + +CORE-SITE.xml_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem +CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs +CORE-SITE.xml_fs.defaultFS=o3fs://bucket1.vol1/ + +MAPRED-SITE.XML_mapreduce.framework.name=yarn +MAPRED-SITE.XML_yarn.app.mapreduce.am.env=HADOOP_MAPRED_HOME=$HADOOP_HOME +MAPRED-SITE.XML_mapreduce.map.env=HADOOP_MAPRED_HOME=$HADOOP_HOME +MAPRED-SITE.XML_mapreduce.reduce.env=HADOOP_MAPRED_HOME=$HADOOP_HOME +MAPRED-SITE.XML_mapreduce.map.memory.mb=4096 +MAPRED-SITE.XML_mapreduce.reduce.memory.mb=4096 +MAPRED-SITE.XML_mapred.child.java.opts=-Xmx2g +MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-0.5.0-SNAPSHOT.jar + +YARN-SITE.XML_yarn.app.mapreduce.am.staging-dir=/user +YARN_SITE.XML_yarn.timeline-service.enabled=true +#YARN_SITE.XML_yarn.timeline-service.generic.application.history.enabled=true +#YARN_SITE.XML_yarn.timeline-service.hostname=jhs +#YARN_SITE.XML_yarn.log.server.url=http://jhs:8188/applicationhistory/logs/ + +YARN-SITE.XML_yarn.nodemanager.pmem-check-enabled=false +YARN-SITE.XML_yarn.nodemanager.delete.debug-delay-sec=6000 +YARN-SITE.XML_yarn.nodemanager.vmem-check-enabled=false +YARN-SITE.XML_yarn.nodemanager.aux-services=mapreduce_shuffle +YARN-SITE.XML_yarn.nodemanager.disk-health-checker.enable=false + +YARN-SITE.XML_yarn.resourcemanager.hostname=rm +YARN_SITE_XML_yarn.resourcemanager.system.metrics.publisher.enabled=true + +#YARN-SITE.XML_yarn.log-aggregation-enable=true +#YARN-SITE.XML_yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds=3600 + +#YARN-SITE.yarn.nodemanager.container-executor.class=org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor +#YARN-SITE.XML_yarn.nodemanager.linux-container-executor.path=/opt/hadoop/bin/container-executor +#YARN-SITE.XML_yarn.nodemanager.linux-container-executor.group=hadoop +YARN-SITE.XML_yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage=99 +YARN-SITE.XML_yarn.nodemanager.disk-health-checker.enable=false + +CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.maximum-applications=10000 +CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.maximum-am-resource-percent=0.1 +CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.resource-calculator=org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator +CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.queues=default +CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.capacity=100 +CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.user-limit-factor=1 +CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.maximum-capacity=100 +CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.state=RUNNING +CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.acl_submit_applications=* +CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.acl_administer_queue=* +CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.node-locality-delay=40 +CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.queue-mappings= +CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.queue-mappings-override.enable=false + +LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout +LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender +LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n +LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR +LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN +LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop=INFO +LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR + +#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation. +#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm + +#LOG4J2.PROPERTIES_* are for Ozone Audit Logging +LOG4J2.PROPERTIES_monitorInterval=30 +LOG4J2.PROPERTIES_filter=read,write +LOG4J2.PROPERTIES_filter.read.type=MarkerFilter +LOG4J2.PROPERTIES_filter.read.marker=READ +LOG4J2.PROPERTIES_filter.read.onMatch=DENY +LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL +LOG4J2.PROPERTIES_filter.write.type=MarkerFilter +LOG4J2.PROPERTIES_filter.write.marker=WRITE +LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL +LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL +LOG4J2.PROPERTIES_appenders=console, rolling +LOG4J2.PROPERTIES_appender.console.type=Console +LOG4J2.PROPERTIES_appender.console.name=STDOUT +LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout +LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n +LOG4J2.PROPERTIES_appender.rolling.type=RollingFile +LOG4J2.PROPERTIES_appender.rolling.name=RollingFile +LOG4J2.PROPERTIES_appender.rolling.fileName=${sys:hadoop.log.dir}/om-audit-${hostName}.log +LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz +LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout +LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n +LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies +LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy +LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400 +LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy +LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB +LOG4J2.PROPERTIES_loggers=audit +LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger +LOG4J2.PROPERTIES_logger.audit.name=OMAudit +LOG4J2.PROPERTIES_logger.audit.level=INFO +LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling +LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile +LOG4J2.PROPERTIES_rootLogger.level=INFO +LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout +LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT + diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-mr/test.sh new file mode 100755 index 00000000000..892a540b467 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/test.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +export COMPOSE_DIR + +# shellcheck source=/dev/null +source "$COMPOSE_DIR/../testlib.sh" + +start_docker_env + +execute_robot_test scm createmrenv.robot + +# reinitialize the directories to use +export OZONE_DIR=/opt/ozone +# shellcheck source=/dev/null +source "$COMPOSE_DIR/../testlib.sh" +execute_robot_test rm mapreduce.robot + +stop_docker_env + +generate_report diff --git a/hadoop-ozone/dist/src/main/smoketest/createmrenv.robot b/hadoop-ozone/dist/src/main/smoketest/createmrenv.robot new file mode 100644 index 00000000000..49d06aa3c66 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/createmrenv.robot @@ -0,0 +1,48 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Create directories required for MR test +Library OperatingSystem +Resource commonlib.robot +Test Timeout 2 minute + + +*** Variables *** +${volume} vol1 +${bucket} bucket1 + + +*** Keywords *** +Create volume + ${result} = Execute ozone sh volume create /${volume} --user hadoop --quota 100TB --root + Should not contain ${result} Failed + Should contain ${result} Creating Volume: ${volume} +Create bucket + Execute ozone sh bucket create /${volume}/${bucket} + +*** Test Cases *** +Create test volume, bucket and key + ${result} = Execute And Ignore Error ozone sh bucket info /${volume}/${bucket} + Run Keyword if "VOLUME_NOT_FOUND" in """${result}""" Create volume + Run Keyword if "VOLUME_NOT_FOUND" in """${result}""" Create bucket + Run Keyword if "BUCKET_NOT_FOUND" in """${result}""" Create bucket + ${result} = Execute ozone sh bucket info /${volume}/${bucket} + Should not contain ${result} NOT_FOUND + Execute ozone sh key put /vol1/bucket1/key1 LICENSE.txt + +Create user dir for hadoop + Execute ozone fs -mkdir /user + Execute ozone fs -mkdir /user/hadoop diff --git a/hadoop-ozone/dist/src/main/smoketest/mapreduce.robot b/hadoop-ozone/dist/src/main/smoketest/mapreduce.robot new file mode 100644 index 00000000000..a6086774890 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/mapreduce.robot @@ -0,0 +1,37 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Execute MR jobs +Library OperatingSystem +Resource commonlib.robot +Test Timeout 2 minute + + +*** Variables *** +${volume} vol1 +${bucket} bucket1 +${hadoop.version} 3.2.0 + + +*** Test cases *** +Execute PI calculation + ${output} = Execute yarn jar ./share/hadoop/mapreduce/hadoop-mapreduce-examples-${hadoop.version}.jar pi 3 3 + Should Contain ${output} completed successfully + +Execute WordCount + ${random} Generate Random String 2 [NUMBERS] + ${output} = Execute yarn jar ./share/hadoop/mapreduce/hadoop-mapreduce-examples-${hadoop.version}.jar wordcount o3fs://bucket1.vol1/key1 o3fs://bucket1.vol1/key1-${random}.count + Should Contain ${output} completed successfully