Fix unit tests (also works from IDE).

This commit is contained in:
Robert Muir 2015-12-19 02:43:27 -05:00
parent 3269beeb4d
commit 9df447295c
3 changed files with 70 additions and 109 deletions

View File

@ -1,57 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.repositories.hdfs;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RawLocalFileSystem;
import org.elasticsearch.common.SuppressForbidden;
import java.io.File;
import java.io.IOException;
/**
* Extends LFS to improve some operations to keep the security permissions at
* bay. In particular mkdir is smarter and doesn't have to walk all the file
* hierarchy but rather only limits itself to the parent/working dir and creates
* a file only when necessary.
*/
public class TestingFs extends LocalFileSystem {
private static class ImprovedRawLocalFileSystem extends RawLocalFileSystem {
@Override
@SuppressForbidden(reason = "the Hadoop API depends on java.io.File")
public boolean mkdirs(Path f) throws IOException {
File wd = pathToFile(getWorkingDirectory());
File local = pathToFile(f);
if (wd.equals(local) || local.exists()) {
return true;
}
return mkdirs(f.getParent()) && local.mkdir();
}
}
public TestingFs() {
super(new ImprovedRawLocalFileSystem());
// use the build path instead of the starting dir as that one has read permissions
//setWorkingDirectory(new Path(getClass().getProtectionDomain().getCodeSource().getLocation().toString()));
setWorkingDirectory(new Path(System.getProperty("java.io.tmpdir")));
}
}

View File

@ -26,30 +26,19 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.repositories.RepositoryException;
import org.elasticsearch.repositories.RepositoryMissingException;
import org.elasticsearch.repositories.hdfs.TestingFs;
import org.elasticsearch.plugin.hadoop.hdfs.HdfsPlugin;
import org.elasticsearch.snapshots.SnapshotState;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.elasticsearch.test.ESIntegTestCase.Scope;
import org.elasticsearch.test.store.MockFSDirectoryService;
import org.junit.After;
import org.junit.Before;
import java.nio.file.Path;
import java.util.Collection;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
/**
* You must specify {@code -Dtests.thirdparty=true}
*/
// Make sure to start MiniHDFS cluster before
// otherwise, one will get some wierd PrivateCredentialPermission exception
// caused by the HDFS fallback code (which doesn't do much anyway)
// @ThirdParty
@ClusterScope(scope = Scope.SUITE, numDataNodes = 1, transportClientRatio = 0.0)
public class HdfsTests extends ESIntegTestCase {
@ -78,31 +67,17 @@ public class HdfsTests extends ESIntegTestCase {
return pluginList(HdfsPlugin.class);
}
private String path;
private int port;
@Before
public final void wipeBefore() throws Exception {
wipeRepositories();
//port = MiniHDFS.getPort();
//path = "build/data/repo-" + randomInt();
}
@After
public final void wipeAfter() throws Exception {
wipeRepositories();
}
public void testSimpleWorkflow() {
Client client = client();
String path = "foo";
logger.info("--> creating hdfs repository with path [{}]", path);
Path dir = createTempDir();
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
.setType("hdfs")
.setSettings(Settings.settingsBuilder()
.put("uri", "hdfs://127.0.0.1:" + port)
.put("conf.fs.es-hdfs.impl", TestingFs.class.getName())
// .put("uri", "es-hdfs:///")
.put("uri", dir.toUri().toString())
.put("conf.fs.AbstractFileSystem.file.impl", TestingFs.class.getName())
.put("path", path)
.put("conf", "additional-cfg.xml, conf-2.xml")
.put("chunk_size", randomIntBetween(100, 1000) + "k")
@ -177,17 +152,17 @@ public class HdfsTests extends ESIntegTestCase {
// RepositoryVerificationException.class
public void testWrongPath() {
Path dir = createTempDir();
Client client = client();
logger.info("--> creating hdfs repository with path [{}]", path);
try {
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
.setType("hdfs")
.setSettings(Settings.settingsBuilder()
.put("uri", "hdfs://127.0.0.1:" + port)
// .put("uri", "es-hdfs:///")
.put("conf.fs.es-hdfs.impl", TestingFs.class.getName())
.put("path", path + "a@b$c#11:22")
.put("uri", dir.toUri().toString())
.put("conf.fs.AbstractFileSystem.file.impl", TestingFs.class.getName())
.put("path", "a@b$c#11:22")
.put("chunk_size", randomIntBetween(100, 1000) + "k")
.put("compress", randomBoolean()))
.get();
@ -201,23 +176,6 @@ public class HdfsTests extends ESIntegTestCase {
}
}
/**
* Deletes repositories, supports wildcard notation.
*/
public static void wipeRepositories(String... repositories) {
// if nothing is provided, delete all
if (repositories.length == 0) {
repositories = new String[]{"*"};
}
for (String repository : repositories) {
try {
client().admin().cluster().prepareDeleteRepository(repository).execute().actionGet();
} catch (RepositoryMissingException ex) {
// ignore
}
}
}
private long count(Client client, String index) {
return client.prepareSearch(index).setSize(0).get().getHits().totalHits();
}

View File

@ -0,0 +1,60 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugin.hadoop.hdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DelegateToFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RawLocalFileSystem;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.lucene.util.LuceneTestCase;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
/**
* Extends LFS to improve some operations to keep the security permissions at
* bay. In particular it never tries to execute!
*/
public class TestingFs extends DelegateToFileSystem {
private static class ImprovedRawLocalFileSystem extends RawLocalFileSystem {
@Override
public Path getInitialWorkingDirectory() {
// sets working dir to a tmp dir for testing
return new Path(LuceneTestCase.createTempDir().toString());
}
@Override
public void setPermission(Path p, FsPermission permission) {
// no execution, thank you very much!
}
}
public TestingFs(URI uri, Configuration configuration) throws URISyntaxException, IOException {
super(URI.create("file:///"), new ImprovedRawLocalFileSystem(), configuration, "file", false);
}
@Override
public void checkPath(Path path) {
// we do evil stuff, we admit it.
}
}