HDFS-12469. Ozone: Create docker-compose definition to easily test real clusters. Contributed by Elek, Marton.

This commit is contained in:
Anu Engineer 2017-09-27 12:04:09 -07:00 committed by Owen O'Malley
parent a44f3a777d
commit 94090acf3a
4 changed files with 103 additions and 0 deletions

View File

@ -0,0 +1 @@
VERSION=3.1.0-SNAPSHOT

View File

@ -0,0 +1,40 @@
version: "3"
services:
namenode:
image: flokkr/hadoop-runner:latest
hostname: namenode
volumes:
- ../../../hadoop-dist/target/hadoop-${VERSION}:/opt/hadoop
ports:
- 50070:50070
- 9870:9870
environment:
ENSURE_NAMENODE_DIR: /data/namenode
env_file:
- ./docker-config
command: ["/opt/hadoop/bin/hdfs","namenode"]
datanode:
image: flokkr/hadoop-runner:latest
volumes:
- ../../../hadoop-dist/target/hadoop-${VERSION}:/opt/hadoop
ports:
- 9864
command: ["/opt/hadoop/bin/hdfs","datanode"]
ksm:
image: flokkr/hadoop-runner:latest
volumes:
- ../../../hadoop-dist/target/hadoop-${VERSION}:/opt/hadoop
ports:
- 9874:9874
env_file:
- ./docker-config
command: ["/opt/hadoop/bin/hdfs","ksm"]
scm:
image: flokkr/hadoop-runner:latest
volumes:
- ../../../hadoop-dist/target/hadoop-${VERSION}:/opt/hadoop
ports:
- 9876:9876
env_file:
- ./docker-config
command: ["/opt/hadoop/bin/hdfs","scm"]

View File

@ -0,0 +1,17 @@
CORE-SITE.XML_fs.defaultFS: "hdfs://namenode:9000"
OZONE-SITE.XML_ozone.ksm.address=ksm
OZONE-SITE.XML_ozone.scm.names=scm
OZONE-SITE.XML_ozone.enabled=True
OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id
OZONE-SITE.XML_ozone.scm.block.client.address=scm
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
OZONE-SITE.XML_ozone.handler.type=distributed
OZONE-SITE.XML_ozone.scm.client.address=scm
HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n

View File

@ -41,9 +41,54 @@ example of the tarball that will be generated.
* `~/apache/hadoop/hadoop-dist/target/${project.version}.tar.gz`
At this point we have an option to setup a physical cluster or run ozone via
docker.
Running Ozone via Docker
------------------------
This assumes that you have a running docker setup on the machine. Please run
these following commands to see ozone in action.
Go to the directory where the docker compose files exist.
- `cd dev-support/compose/ozone`
Tell docker to start ozone, this will start a KSM, SCM and a single datanode in
the background.
- `docker-compose up -d`
Now let us run some work load against ozone, to do that we will run corona.
This will log into the datanode and run bash.
- `docker-compose exec datanode bash`
- `cd hadoop/bin`
Now you can run the oz command shell or corona the ozone load generator.
This is the command to run corona.
- `./hdfs corona -mode offline -validateWrites -numOfVolumes 1 -numOfBuckets 10 -numOfKeys 100`
You can checkout the KSM UI to see the requests information.
- `http://localhost:9874/`
If you need more datanode you can scale up:
- `docker-compose scale datanode=3`
Running Ozone using a real cluster
----------------------------------
Please proceed to setup a hadoop cluster by creating the hdfs-site.xml and
other configuration files that are needed for your cluster.
### Ozone Configuration
Ozone relies on its own configuration file called `ozone-site.xml`. It is