HDFS-13343. Ozone: Provide docker based acceptance testing on pseudo cluster.

Contributed by Elek, Marton.
This commit is contained in:
Anu Engineer 2018-03-23 22:39:19 -07:00 committed by Owen O'Malley
parent 4981716ab5
commit 6d9f069ede
7 changed files with 353 additions and 0 deletions

View File

@ -0,0 +1,38 @@
<!---
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
# Acceptance test suite for Ozone/Hdsl
This project contains acceptance tests for ozone/hdsl using docker-compose and [robot framework](http://robotframework.org/).
## Run
To run the acceptance tests, please activate the `ozone-acceptance-test` profile and do a full build.
Typically you need a `mvn install -Phdsl,ozone-acceptance-test,dist -DskipTests` for a build without unit tests but with acceptance test.
Notes:
1. You need a hadoop build in hadoop-dist/target directory.
2. The `ozone-acceptance-test` could be activated with profile even if the unit tests are disabled.
## Development
You can run manually the robot tests with `robot` cli. (See robotframework docs to install it.)
1. Go to the `src/test/robotframework`
2. Execute `robot -v basedir:${PWD}/../../.. -v VERSION:3.2.0-SNAPSHOT .`
You can also use select just one test with -t `"*testnamefragment*"`

View File

@ -0,0 +1,86 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId>
<version>3.2.0-SNAPSHOT</version>
<relativePath>../../hadoop-project</relativePath>
</parent>
<artifactId>hadoop-ozone-acceptance-test</artifactId>
<version>3.2.0-SNAPSHOT</version>
<description>Apache Hadoop Ozone Acceptance test</description>
<name>Apache Hadoop Ozone acceptance test</name>
<packaging>pom</packaging>
<build>
<plugins>
<plugin>
<artifactId>maven-resources-plugin</artifactId>
<executions>
<execution>
<id>copy-docker-compose</id>
<goals>
<goal>copy-resources</goal>
</goals>
<phase>process-test-resources</phase>
<configuration>
<outputDirectory>${project.build.directory}/compose
</outputDirectory>
<resources>
<resource>
<directory>src/test/compose</directory>
<filtering>true</filtering>
</resource>
</resources>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
<profiles>
<profile>
<id>ozone-acceptance-test</id>
<build>
<plugins>
<plugin>
<groupId>org.robotframework</groupId>
<artifactId>robotframework-maven-plugin</artifactId>
<version>1.4.7</version>
<executions>
<execution>
<goals>
<goal>run</goal>
</goals>
<configuration>
<variables>
<variable>version:${project.version}</variable>
<variable>basedir:${project.basedir}</variable>
</variables>
<skip>false</skip>
<skipTests>false</skipTests>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
</profiles>
</project>

View File

@ -0,0 +1,17 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
HADOOPDIR=../../hadoop-dist/target/hadoop-${project.version}

View File

@ -0,0 +1,61 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
version: "3"
services:
namenode:
image: elek/hadoop-runner:o3-refactor
hostname: namenode
volumes:
- ${HADOOPDIR}:/opt/hadoop
ports:
- 9870
environment:
ENSURE_NAMENODE_DIR: /data/namenode
env_file:
- ./docker-config
command: ["/opt/hadoop/bin/hdfs","namenode"]
datanode:
image: elek/hadoop-runner:o3-refactor
volumes:
- ${HADOOPDIR}:/opt/hadoop
ports:
- 9864
command: ["/opt/hadoop/bin/oz","datanode"]
env_file:
- ./docker-config
ksm:
image: elek/hadoop-runner:o3-refactor
volumes:
- ${HADOOPDIR}:/opt/hadoop
ports:
- 9874
environment:
ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
env_file:
- ./docker-config
command: ["/opt/hadoop/bin/oz","ksm"]
scm:
image: elek/hadoop-runner:o3-refactor
volumes:
- ${HADOOPDIR}:/opt/hadoop
ports:
- 9876
env_file:
- ./docker-config
environment:
ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
command: ["/opt/hadoop/bin/oz","scm"]

View File

@ -0,0 +1,34 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000
OZONE-SITE.XML_ozone.ksm.address=ksm
OZONE-SITE.XML_ozone.scm.names=scm
OZONE-SITE.XML_ozone.enabled=True
OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id
OZONE-SITE.XML_ozone.scm.block.client.address=scm
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
OZONE-SITE.XML_ozone.handler.type=distributed
OZONE-SITE.XML_ozone.scm.client.address=scm
HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HdslServerPlugin,org.apache.hadoop.ozone.web.ObjectStoreRestPlugin
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n

View File

@ -0,0 +1,116 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*** Settings ***
Documentation Smoke test to start cluster with docker-compose environments.
Library OperatingSystem
Suite Setup Startup Ozone Cluster
Suite Teardown Teardown Ozone Cluster
*** Variables ***
${COMMON_REST_HEADER} -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE root"
${version}
*** Test Cases ***
Daemons are running without error
Is daemon running without error ksm
Is daemon running without error scm
Is daemon running without error namenode
Is daemon running without error datanode
Check if datanode is connected to the scm
Wait Until Keyword Succeeds 2min 5sec Have healthy datanodes 1
Scale it up to 5 datanodes
Scale datanodes up 5
Wait Until Keyword Succeeds 3min 5sec Have healthy datanodes 5
Test rest interface
${result} = Execute on datanode curl -i -X POST ${COMMON_RESTHEADER} "http://localhost:9880/volume1"
Should contain ${result} 201 Created
${result} = Execute on datanode curl -i -X POST ${COMMON_RESTHEADER} "http://localhost:9880/volume1/bucket1"
Should contain ${result} 201 Created
${result} = Execute on datanode curl -i -X DELETE ${COMMON_RESTHEADER} "http://localhost:9880/volume1/bucket1"
Should contain ${result} 200 OK
${result} = Execute on datanode curl -i -X DELETE ${COMMON_RESTHEADER} "http://localhost:9880/volume1"
Should contain ${result} 200 OK
Test oz cli
Execute on datanode oz oz -createVolume http://localhost:9880/hive -user bilbo -quota 100TB -root
${result} = Execute on datanode oz oz -listVolume http://localhost:9880/ -user bilbo | grep -v Removed | jq '.[] | select(.volumeName=="hive")'
Should contain ${result} createdOn
Execute on datanode oz oz -createBucket http://localhost:9880/hive/bb1
${result} Execute on datanode oz oz -listBucket http://localhost:9880/hive/ | grep -v Removed | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
Should Be Equal ${result} hive
Execute on datanode oz oz -deleteBucket http://localhost:9880/hive/bb1
Execute on datanode oz oz -deleteVolume http://localhost:9880/hive -user bilbo
Check webui static resources
${result} = Execute on scm curl -s -I http://localhost:9876/static/bootstrap-3.0.2/js/bootstrap.min.js
Should contain ${result} 200
${result} = Execute on ksm curl -s -I http://localhost:9874/static/bootstrap-3.0.2/js/bootstrap.min.js
Should contain ${result} 200
Start freon testing
${result} = Execute on ksm oz freon -numOfVolumes 5 -numOfBuckets 5 -numOfKeys 5 -numOfThreads 10
Wait Until Keyword Succeeds 3min 10sec Should contain ${result} Number of Keys added: 125
Should Not Contain ${result} ERROR
*** Keywords ***
Startup Ozone Cluster
${rc} ${output} = Run docker compose down
${rc} ${output} = Run docker compose up -d
Should Be Equal As Integers ${rc} 0
Wait Until Keyword Succeeds 1min 5sec Is Daemon started ksm HTTP server of KSM is listening
Teardown Ozone Cluster
Run docker compose down
Is daemon running without error
[arguments] ${name}
${result} = Run docker ps
Should contain ${result} _${name}_1
${rc} ${result} = Run docker compose logs ${name}
Should not contain ${result} ERROR
Is Daemon started
[arguments] ${name} ${expression}
${rc} ${result} = Run docker compose logs
Should contain ${result} ${expression}
Have healthy datanodes
[arguments] ${requirednodes}
${result} = Execute on scm curl -s 'http://localhost:9876/jmx?qry=Hadoop:service=SCMNodeManager,name=SCMNodeManagerInfo' | jq -r '.beans[0].NodeCount[] | select(.key=="HEALTHY") | .value'
Should Be Equal ${result} ${requirednodes}
Scale datanodes up
[arguments] ${requirednodes}
Run docker compose scale datanode=${requirednodes}
Execute on
[arguments] ${componentname} ${command}
${rc} ${return} = Run docker compose exec ${componentname} ${command}
[return] ${return}
Run docker compose
[arguments] ${command}
Set Environment Variable HADOOPDIR ${basedir}/../../hadoop-dist/target/hadoop-${version}
${rc} ${output} = Run And Return Rc And Output docker-compose -f ${basedir}/target/compose/docker-compose.yaml ${command}
Should Be Equal As Integers ${rc} 0
[return] ${rc} ${output}

View File

@ -748,6 +748,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
<module>hadoop-ozone</module>
<module>hadoop-cblock</module>
<module>hadoop-hdsl</module>
<module>hadoop-ozone/acceptance-test</module>
</modules>
</profile>
</profiles>