Add MiniHDFS test fixture, started before integTest and shut down after.

Currently uses a hardcoded port (9999), need to apply MavenFilteringHack after it starts.
This commit is contained in:
Robert Muir 2015-12-20 16:00:37 -05:00
parent ae89c6e51c
commit 12a8428dfb
8 changed files with 155 additions and 3 deletions

View File

@ -198,6 +198,10 @@ class BuildPlugin implements Plugin<Project> {
* to iterate the transitive dependencies and add excludes.
*/
static void configureConfigurations(Project project) {
// we are not shipping these jars, we act like dumb consumers of these things
if (project.path.startsWith(':test:fixtures')) {
return
}
// fail on any conflicting dependency versions
project.configurations.all({ Configuration configuration ->
if (configuration.name.startsWith('_transitive_')) {
@ -205,6 +209,10 @@ class BuildPlugin implements Plugin<Project> {
// we just have them to find *what* transitive deps exist
return
}
if (configuration.name.endsWith('Fixture')) {
// just a self contained test-fixture configuration, likely transitive and hellacious
return
}
configuration.resolutionStrategy.failOnVersionConflict()
})

View File

@ -28,6 +28,10 @@ versions << [
'hadoop2': '2.7.1'
]
configurations {
hdfsFixture
}
dependencies {
compile "org.apache.hadoop:hadoop-client:${versions.hadoop2}"
compile "org.apache.hadoop:hadoop-common:${versions.hadoop2}"
@ -44,12 +48,26 @@ dependencies {
compile 'commons-lang:commons-lang:2.6'
compile 'javax.servlet:servlet-api:2.5'
// we need this one, its not really 'provided' compile 'org.slf4j:slf4j-api:${versions.slf4j}'
hdfsFixture project(':test:fixtures:hdfs-fixture')
}
dependencyLicenses {
mapping from: /hadoop-.*/, to: 'hadoop'
}
task hdfsFixture(type: org.elasticsearch.gradle.test.Fixture) {
dependsOn project.configurations.hdfsFixture
executable = new File(project.javaHome, 'bin/java')
args '-cp', "${ -> project.configurations.hdfsFixture.asPath }",
'hdfs.MiniHDFS',
baseDir
}
integTest {
dependsOn hdfsFixture
}
compileJava.options.compilerArgs << '-Xlint:-deprecation,-rawtypes'
thirdPartyAudit.missingClasses = true

View File

@ -9,7 +9,7 @@
body:
type: hdfs
settings:
uri: "hdfs://localhost:40737"
uri: "hdfs://localhost:9999"
path: "foo/bar"
# Get repository

View File

@ -11,7 +11,7 @@ setup:
body:
type: hdfs
settings:
uri: "hdfs://localhost:40737"
uri: "hdfs://localhost:9999"
path: "foo/bar"
- do:

View File

@ -11,7 +11,7 @@ setup:
body:
type: hdfs
settings:
uri: "hdfs://localhost:40737"
uri: "hdfs://localhost:9999"
path: "foo/bar"
- do:

View File

@ -10,6 +10,7 @@ List projects = [
'distribution:rpm',
'test:framework',
'test:fixtures:example-fixture',
'test:fixtures:hdfs-fixture',
'modules:lang-expression',
'modules:lang-groovy',
'modules:lang-mustache',

39
test/fixtures/hdfs-fixture/build.gradle vendored Normal file
View File

@ -0,0 +1,39 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
apply plugin: 'elasticsearch.build'
versions << [
'hadoop2': '2.7.1'
]
// we create MiniHdfsCluster which needs hadoop-hdfs and hadoop-common test jars.
// tests jars don't bring in their dependencies, so we need their normal jars too.
dependencies {
compile "org.apache.hadoop:hadoop-hdfs:${versions.hadoop2}:tests"
compile "org.apache.hadoop:hadoop-hdfs:${versions.hadoop2}"
compile "org.apache.hadoop:hadoop-common:${versions.hadoop2}:tests"
compile "org.apache.hadoop:hadoop-common:${versions.hadoop2}"
}
// just a test fixture: we aren't using jars in releases
thirdPartyAudit.enabled = false
// TODO: add a simple HDFS client test for this fixture
test.enabled = false

View File

@ -0,0 +1,86 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package hdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.log4j.BasicConfigurator;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardCopyOption;
import java.util.Locale;
import java.lang.management.ManagementFactory;
/**
* MiniHDFS test fixture. There is a CLI tool, but here we can
* easily properly setup logging, avoid parsing JSON, etc.
*/
public class MiniHDFS {
private static String PORT_FILE_NAME = "ports";
private static String PID_FILE_NAME = "pid";
public static void main(String[] args) throws Exception {
if (args.length != 1) {
throw new IllegalArgumentException("MiniHDFS <baseDirectory>");
}
// configure logging, so we see all HDFS server logs if something goes wrong
BasicConfigurator.configure();
// configure Paths
Path baseDir = Paths.get(args[0]);
// hadoop-home/, so logs will not complain
Path hadoopHome = baseDir.resolve("hadoop-home");
Files.createDirectories(hadoopHome);
System.setProperty("hadoop.home.dir", hadoopHome.toAbsolutePath().toString());
// hdfs-data/, where any data is going
Path hdfsHome = baseDir.resolve("hdfs-data");
// start cluster
Configuration cfg = new Configuration();
cfg.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsHome.toAbsolutePath().toString());
// lower default permission: TODO: needed?
cfg.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY, "766");
// TODO: remove hardcoded port!
MiniDFSCluster dfs = new MiniDFSCluster.Builder(cfg).nameNodePort(9999).build();
// write our PID file
Path tmp = Files.createTempFile(baseDir, null, null);
String pid = ManagementFactory.getRuntimeMXBean().getName().split("@")[0];
Files.write(tmp, pid.getBytes(StandardCharsets.UTF_8));
Files.move(tmp, baseDir.resolve(PID_FILE_NAME), StandardCopyOption.ATOMIC_MOVE);
// write our port file
tmp = Files.createTempFile(baseDir, null, null);
Files.write(tmp, Integer.toString(dfs.getNameNodePort()).getBytes(StandardCharsets.UTF_8));
Files.move(tmp, baseDir.resolve(PORT_FILE_NAME), StandardCopyOption.ATOMIC_MOVE);
// don't rely on hadoop thread leaks, wait forever, until you kill me
Thread.sleep(Long.MAX_VALUE);
}
}