SOLR-14033: Fix Hadoop tests with security manager

This removes the Solr security manager hacks
for Hadoop. It does so by:
* Using a fake group mapping class instead of ShellGroupMapping
* Copies a few Hadoop classes and modifies them for tests with no Shell
* Nulls out some of the static variables in the tests

The Hadoop files were copied from Apache Hadoop 3.2.0
and copied to the test package to be only picked up
during tests. They were modified to remove the need to
shell out for access. The assumption is that these
HDFS integration tests only run on Unix based systems
and therefore Windows compatibility was removed in some
of the modified classes. The long term goal is to remove
these custom Hadoop classes. All the copied classes are
in the org.apache.hadoop package.

Signed-off-by: Kevin Risden <krisden@apache.org>
This commit is contained in:
Kevin Risden 2019-12-07 20:03:25 -05:00
parent 20d3284a09
commit 48775ea18e
No known key found for this signature in database
GPG Key ID: 040FAE3292C5F73F
26 changed files with 3464 additions and 247 deletions

View File

@ -103,6 +103,8 @@ grant {
permission java.lang.RuntimePermission "loadLibrary.jaas"; permission java.lang.RuntimePermission "loadLibrary.jaas";
permission java.lang.RuntimePermission "loadLibrary.jaas_unix"; permission java.lang.RuntimePermission "loadLibrary.jaas_unix";
permission java.lang.RuntimePermission "loadLibrary.jaas_nt"; permission java.lang.RuntimePermission "loadLibrary.jaas_nt";
// needed by hadoop common RawLocalFileSystem for java nio getOwner
permission java.lang.RuntimePermission "accessUserInformation";
// needed by hadoop hdfs // needed by hadoop hdfs
permission java.lang.RuntimePermission "readFileDescriptor"; permission java.lang.RuntimePermission "readFileDescriptor";
permission java.lang.RuntimePermission "writeFileDescriptor"; permission java.lang.RuntimePermission "writeFileDescriptor";

View File

@ -38,6 +38,7 @@
<dependency org="commons-codec" name="commons-codec" rev="${/commons-codec/commons-codec}" conf="compile"/> <dependency org="commons-codec" name="commons-codec" rev="${/commons-codec/commons-codec}" conf="compile"/>
<dependency org="commons-io" name="commons-io" rev="${/commons-io/commons-io}" conf="compile"/> <dependency org="commons-io" name="commons-io" rev="${/commons-io/commons-io}" conf="compile"/>
<dependency org="org.apache.commons" name="commons-exec" rev="${/org.apache.commons/commons-exec}" conf="compile"/> <dependency org="org.apache.commons" name="commons-exec" rev="${/org.apache.commons/commons-exec}" conf="compile"/>
<dependency org="org.apache.commons" name="commons-compress" rev="${/org.apache.commons/commons-compress}" conf="compile"/>
<dependency org="commons-fileupload" name="commons-fileupload" rev="${/commons-fileupload/commons-fileupload}" conf="compile"/> <dependency org="commons-fileupload" name="commons-fileupload" rev="${/commons-fileupload/commons-fileupload}" conf="compile"/>
<dependency org="commons-cli" name="commons-cli" rev="${/commons-cli/commons-cli}" conf="compile"/> <dependency org="commons-cli" name="commons-cli" rev="${/commons-cli/commons-cli}" conf="compile"/>
<dependency org="org.apache.commons" name="commons-text" rev="${/org.apache.commons/commons-text}" conf="compile"/> <dependency org="org.apache.commons" name="commons-text" rev="${/org.apache.commons/commons-text}" conf="compile"/>

View File

@ -37,7 +37,7 @@ public class FSHDFSUtils {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
// internal, for tests // internal, for tests
public static AtomicLong RECOVER_LEASE_SUCCESS_COUNT = new AtomicLong(); public static final AtomicLong RECOVER_LEASE_SUCCESS_COUNT = new AtomicLong();
public interface CallerInfo { public interface CallerInfo {
boolean isCallerClosed(); boolean isCallerClosed();

View File

@ -0,0 +1,23 @@
<?xml version="1.0"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<configuration>
<property>
<name>hadoop.security.group.mapping</name>
<value>org.apache.solr.cloud.hdfs.FakeGroupMapping</value>
</property>
</configuration>

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,182 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.file.Files;
import static java.nio.file.Files.createLink;
/**
* Class for creating hardlinks.
* Supports Unix/Linux, Windows via winutils , and Mac OS X.
*
* The HardLink class was formerly a static inner class of FSUtil,
* and the methods provided were blatantly non-thread-safe.
* To enable volume-parallel Update snapshots, we now provide static
* threadsafe methods that allocate new buffer string arrays
* upon each call. We also provide an API to hardlink all files in a
* directory with a single command, which is up to 128 times more
* efficient - and minimizes the impact of the extra buffer creations.
*/
public class HardLink {
public final LinkStats linkStats; //not static
public HardLink() {
linkStats = new LinkStats();
}
/**
* This abstract class bridges the OS-dependent implementations of the
* needed functionality for querying link counts.
* The particular implementation class is chosen during
* static initialization phase of the HardLink class.
* The "getter" methods construct shell command strings.
*/
private static abstract class HardLinkCommandGetter {
/**
* Get the command string to query the hardlink count of a file
*/
abstract String[] linkCount(File file) throws IOException;
}
/*
* ****************************************************
* Complexity is above. User-visible functionality is below
* ****************************************************
*/
/**
* Creates a hardlink
* @param file - existing source file
* @param linkName - desired target link file
*/
public static void createHardLink(File file, File linkName)
throws IOException {
if (file == null) {
throw new IOException(
"invalid arguments to createHardLink: source file is null");
}
if (linkName == null) {
throw new IOException(
"invalid arguments to createHardLink: link name is null");
}
createLink(linkName.toPath(), file.toPath());
}
/**
* Creates hardlinks from multiple existing files within one parent
* directory, into one target directory.
* @param parentDir - directory containing source files
* @param fileBaseNames - list of path-less file names, as returned by
* parentDir.list()
* @param linkDir - where the hardlinks should be put. It must already exist.
*/
public static void createHardLinkMult(File parentDir, String[] fileBaseNames,
File linkDir) throws IOException {
if (parentDir == null) {
throw new IOException(
"invalid arguments to createHardLinkMult: parent directory is null");
}
if (linkDir == null) {
throw new IOException(
"invalid arguments to createHardLinkMult: link directory is null");
}
if (fileBaseNames == null) {
throw new IOException(
"invalid arguments to createHardLinkMult: "
+ "filename list can be empty but not null");
}
if (!linkDir.exists()) {
throw new FileNotFoundException(linkDir + " not found.");
}
for (String name : fileBaseNames) {
createLink(linkDir.toPath().resolve(name),
parentDir.toPath().resolve(name));
}
}
/**
* Retrieves the number of links to the specified file.
*/
public static int getLinkCount(File fileName) throws IOException {
if (fileName == null) {
throw new IOException(
"invalid argument to getLinkCount: file name is null");
}
if (!fileName.exists()) {
throw new FileNotFoundException(fileName + " not found.");
}
return (Integer)Files.getAttribute(fileName.toPath(), "unix:nlink");
}
/* Create an IOException for failing to get link count. */
private static IOException createIOException(File f, String message,
String error, int exitvalue, Exception cause) {
final String s = "Failed to get link count on file " + f
+ ": message=" + message
+ "; error=" + error
+ "; exit value=" + exitvalue;
return (cause == null) ? new IOException(s) : new IOException(s, cause);
}
/**
* HardLink statistics counters and methods.
* Not multi-thread safe, obviously.
* Init is called during HardLink instantiation, above.
*
* These are intended for use by knowledgeable clients, not internally,
* because many of the internal methods are static and can't update these
* per-instance counters.
*/
public static class LinkStats {
public int countDirs = 0;
public int countSingleLinks = 0;
public int countMultLinks = 0;
public int countFilesMultLinks = 0;
public int countEmptyDirs = 0;
public int countPhysicalFileCopies = 0;
public void clear() {
countDirs = 0;
countSingleLinks = 0;
countMultLinks = 0;
countFilesMultLinks = 0;
countEmptyDirs = 0;
countPhysicalFileCopies = 0;
}
public String report() {
return "HardLinkStats: " + countDirs + " Directories, including "
+ countEmptyDirs + " Empty Directories, "
+ countSingleLinks
+ " single Link operations, " + countMultLinks
+ " multi-Link operations, linking " + countFilesMultLinks
+ " files, total " + (countSingleLinks + countFilesMultLinks)
+ " linkable files. Also physically copied "
+ countPhysicalFileCopies + " other files.";
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -25,9 +25,7 @@ import java.io.FileNotFoundException;
import java.io.FileOutputStream; import java.io.FileOutputStream;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.io.OutputStreamWriter;
import java.io.RandomAccessFile; import java.io.RandomAccessFile;
import java.io.Writer;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
@ -46,9 +44,7 @@ import java.util.concurrent.atomic.AtomicLong;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CachingGetSpaceUsed;
import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.GetSpaceUsed;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
@ -119,9 +115,6 @@ class BlockPoolSlice {
} }
}; };
// TODO:FEDERATION scalability issue - a thread per DU is needed
private final GetSpaceUsed dfsUsage;
/** /**
* Create a blook pool slice * Create a blook pool slice
* @param bpid Block pool Id * @param bpid Block pool Id
@ -178,12 +171,6 @@ class BlockPoolSlice {
fileIoProvider.mkdirs(volume, rbwDir); fileIoProvider.mkdirs(volume, rbwDir);
fileIoProvider.mkdirs(volume, tmpDir); fileIoProvider.mkdirs(volume, tmpDir);
// Use cached value initially if available. Or the following call will
// block until the initial du command completes.
this.dfsUsage = new CachingGetSpaceUsed.Builder().setPath(bpDir)
.setConf(conf)
.setInitialUsed(loadDfsUsed())
.build();
if (addReplicaThreadPool == null) { if (addReplicaThreadPool == null) {
// initialize add replica fork join pool // initialize add replica fork join pool
initializeAddReplicaPool(conf); initializeAddReplicaPool(conf);
@ -192,11 +179,8 @@ class BlockPoolSlice {
shutdownHook = new Runnable() { shutdownHook = new Runnable() {
@Override @Override
public void run() { public void run() {
if (!dfsUsedSaved) {
saveDfsUsed();
addReplicaThreadPool.shutdownNow(); addReplicaThreadPool.shutdownNow();
} }
}
}; };
ShutdownHookManager.get().addShutdownHook(shutdownHook, ShutdownHookManager.get().addShutdownHook(shutdownHook,
SHUTDOWN_HOOK_PRIORITY); SHUTDOWN_HOOK_PRIORITY);
@ -243,92 +227,13 @@ class BlockPoolSlice {
/** Run DU on local drives. It must be synchronized from caller. */ /** Run DU on local drives. It must be synchronized from caller. */
void decDfsUsed(long value) { void decDfsUsed(long value) {
if (dfsUsage instanceof CachingGetSpaceUsed) {
((CachingGetSpaceUsed)dfsUsage).incDfsUsed(-value);
}
} }
long getDfsUsed() throws IOException { long getDfsUsed() throws IOException {
return dfsUsage.getUsed(); return 0L;
} }
void incDfsUsed(long value) { void incDfsUsed(long value) {
if (dfsUsage instanceof CachingGetSpaceUsed) {
((CachingGetSpaceUsed)dfsUsage).incDfsUsed(value);
}
}
/**
* Read in the cached DU value and return it if it is less than
* cachedDfsUsedCheckTime which is set by
* dfs.datanode.cached-dfsused.check.interval.ms parameter. Slight imprecision
* of dfsUsed is not critical and skipping DU can significantly shorten the
* startup time. If the cached value is not available or too old, -1 is
* returned.
*/
long loadDfsUsed() {
long cachedDfsUsed;
long mtime;
Scanner sc;
try {
sc = new Scanner(new File(currentDir, DU_CACHE_FILE), "UTF-8");
} catch (FileNotFoundException fnfe) {
return -1;
}
try {
// Get the recorded dfsUsed from the file.
if (sc.hasNextLong()) {
cachedDfsUsed = sc.nextLong();
} else {
return -1;
}
// Get the recorded mtime from the file.
if (sc.hasNextLong()) {
mtime = sc.nextLong();
} else {
return -1;
}
// Return the cached value if mtime is okay.
if (mtime > 0 && (timer.now() - mtime < cachedDfsUsedCheckTime)) {
FsDatasetImpl.LOG.info("Cached dfsUsed found for " + currentDir + ": " +
cachedDfsUsed);
return cachedDfsUsed;
}
return -1;
} finally {
sc.close();
}
}
/**
* Write the current dfsUsed to the cache file.
*/
void saveDfsUsed() {
File outFile = new File(currentDir, DU_CACHE_FILE);
if (!fileIoProvider.deleteWithExistsCheck(volume, outFile)) {
FsDatasetImpl.LOG.warn("Failed to delete old dfsUsed file in " +
outFile.getParent());
}
try {
long used = getDfsUsed();
try (Writer out = new OutputStreamWriter(
new FileOutputStream(outFile), "UTF-8")) {
// mtime is written last, so that truncated writes won't be valid.
out.write(Long.toString(used) + " " + Long.toString(timer.now()));
// This is only called as part of the volume shutdown.
// We explicitly avoid calling flush with fileIoProvider which triggers
// volume check upon io exception to avoid cyclic volume checks.
out.flush();
}
} catch (IOException ioe) {
// If write failed, the volume might be bad. Since the cache file is
// not critical, log the error and continue.
FsDatasetImpl.LOG.warn("Failed to write dfsUsed to " + outFile, ioe);
}
} }
/** /**
@ -362,13 +267,7 @@ class BlockPoolSlice {
File addFinalizedBlock(Block b, ReplicaInfo replicaInfo) throws IOException { File addFinalizedBlock(Block b, ReplicaInfo replicaInfo) throws IOException {
File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId()); File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId());
fileIoProvider.mkdirsWithExistsCheck(volume, blockDir); fileIoProvider.mkdirsWithExistsCheck(volume, blockDir);
File blockFile = FsDatasetImpl.moveBlockFiles(b, replicaInfo, blockDir); return FsDatasetImpl.moveBlockFiles(b, replicaInfo, blockDir);
File metaFile = FsDatasetUtil.getMetaFile(blockFile, b.getGenerationStamp());
if (dfsUsage instanceof CachingGetSpaceUsed) {
((CachingGetSpaceUsed) dfsUsage).incDfsUsed(
b.getNumBytes() + metaFile.length());
}
return blockFile;
} }
/** /**
@ -851,17 +750,11 @@ class BlockPoolSlice {
void shutdown(BlockListAsLongs blocksListToPersist) { void shutdown(BlockListAsLongs blocksListToPersist) {
saveReplicas(blocksListToPersist); saveReplicas(blocksListToPersist);
saveDfsUsed();
dfsUsedSaved = true;
// Remove the shutdown hook to avoid any memory leak // Remove the shutdown hook to avoid any memory leak
if (shutdownHook != null) { if (shutdownHook != null) {
ShutdownHookManager.get().removeShutdownHook(shutdownHook); ShutdownHookManager.get().removeShutdownHook(shutdownHook);
} }
if (dfsUsage instanceof CachingGetSpaceUsed) {
IOUtils.cleanupWithLogger(LOG, ((CachingGetSpaceUsed) dfsUsage));
}
} }
private boolean readReplicasFromCache(ReplicaMap volumeMap, private boolean readReplicasFromCache(ReplicaMap volumeMap,

View File

@ -0,0 +1,54 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
/**
*
* NameNodeResourceChecker provides a method -
* <code>hasAvailableDiskSpace</code> - which will return true if and only if
* the NameNode has disk space available on all required volumes, and any volume
* which is configured to be redundant. Volumes containing file system edits dirs
* are added by default, and arbitrary extra volumes may be configured as well.
*/
@InterfaceAudience.Private
public class NameNodeResourceChecker {
/**
* Create a NameNodeResourceChecker, which will check the edits dirs and any
* additional dirs to check set in <code>conf</code>.
*/
public NameNodeResourceChecker(Configuration conf) throws IOException {
}
/**
* Return true if disk space is available on at least one of the configured
* redundant volumes, and all of the configured required volumes.
*
* @return True if the configured amount of disk space is available on at
* least one redundant volume and all of the required volumes, false
* otherwise.
*/
public boolean hasAvailableDiskSpace() {
return true;
}
}

View File

@ -0,0 +1,370 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicReference;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Class that provides utility functions for checking disk problem
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class DiskChecker {
public static final Logger LOG = LoggerFactory.getLogger(DiskChecker.class);
public static class DiskErrorException extends IOException {
public DiskErrorException(String msg) {
super(msg);
}
public DiskErrorException(String msg, Throwable cause) {
super(msg, cause);
}
}
public static class DiskOutOfSpaceException extends IOException {
public DiskOutOfSpaceException(String msg) {
super(msg);
}
}
// Provider that abstracts some FileOutputStream operations for
// testability.
private static AtomicReference<FileIoProvider> fileIoProvider =
new AtomicReference<>(new DefaultFileIoProvider());
/**
* Create the directory if it doesn't exist and check that dir is readable,
* writable and executable
*
* @param dir dir to check
* @throws DiskErrorException exception checking dir
*/
public static void checkDir(File dir) throws DiskErrorException {
checkDirInternal(dir);
}
/**
* Create the directory if it doesn't exist and check that dir is
* readable, writable and executable. Perform some disk IO to
* ensure that the disk is usable for writes.
*
* @param dir dir to check
* @throws DiskErrorException exception checking dir
*/
public static void checkDirWithDiskIo(File dir)
throws DiskErrorException {
checkDirInternal(dir);
doDiskIo(dir);
}
private static void checkDirInternal(File dir)
throws DiskErrorException {
if (!mkdirsWithExistsCheck(dir)) {
throw new DiskErrorException("Cannot create directory: "
+ dir.toString());
}
checkAccessByFileMethods(dir);
}
/**
* Create the local directory if necessary, check permissions and also ensure
* it can be read from and written into.
*
* @param localFS local filesystem
* @param dir directory
* @param expected permission
* @throws DiskErrorException exception checking dir
* @throws IOException exception checking dir
*/
public static void checkDir(LocalFileSystem localFS, Path dir,
FsPermission expected)
throws DiskErrorException, IOException {
checkDirInternal(localFS, dir, expected);
}
/**
* Create the local directory if necessary, also ensure permissions
* allow it to be read from and written into. Perform some diskIO
* to ensure that the disk is usable for writes.
*
* @param localFS local filesystem
* @param dir directory
* @param expected permission
* @throws DiskErrorException exception checking dir
* @throws IOException exception checking dir
*/
public static void checkDirWithDiskIo(LocalFileSystem localFS, Path dir,
FsPermission expected)
throws DiskErrorException, IOException {
checkDirInternal(localFS, dir, expected);
doDiskIo(localFS.pathToFile(dir));
}
private static void checkDirInternal(LocalFileSystem localFS, Path dir,
FsPermission expected)
throws DiskErrorException, IOException {
mkdirsWithExistsAndPermissionCheck(localFS, dir, expected);
checkAccessByFileMethods(localFS.pathToFile(dir));
}
/**
* Checks that the current running process can read, write, and execute the
* given directory by using methods of the File object.
*
* @param dir File to check
* @throws DiskErrorException if dir is not readable, not writable, or not
* executable
*/
private static void checkAccessByFileMethods(File dir)
throws DiskErrorException {
if (!dir.isDirectory()) {
throw new DiskErrorException("Not a directory: "
+ dir.toString());
}
if (!FileUtil.canRead(dir)) {
throw new DiskErrorException("Directory is not readable: "
+ dir.toString());
}
if (!FileUtil.canWrite(dir)) {
throw new DiskErrorException("Directory is not writable: "
+ dir.toString());
}
}
/**
* The semantics of mkdirsWithExistsCheck method is different from the mkdirs
* method provided in the Sun's java.io.File class in the following way:
* While creating the non-existent parent directories, this method checks for
* the existence of those directories if the mkdir fails at any point (since
* that directory might have just been created by some other process).
* If both mkdir() and the exists() check fails for any seemingly
* non-existent directory, then we signal an error; Sun's mkdir would signal
* an error (return false) if a directory it is attempting to create already
* exists or the mkdir fails.
* @param dir dir to create and check
* @return true on success, false on failure
*/
private static boolean mkdirsWithExistsCheck(File dir) {
if (dir.mkdir() || dir.exists()) {
return true;
}
File canonDir;
try {
canonDir = dir.getCanonicalFile();
} catch (IOException e) {
return false;
}
String parent = canonDir.getParent();
return (parent != null) &&
(mkdirsWithExistsCheck(new File(parent)) &&
(canonDir.mkdir() || canonDir.exists()));
}
/**
* Create the directory or check permissions if it already exists.
*
* The semantics of mkdirsWithExistsAndPermissionCheck method is different
* from the mkdirs method provided in the Sun's java.io.File class in the
* following way:
* While creating the non-existent parent directories, this method checks for
* the existence of those directories if the mkdir fails at any point (since
* that directory might have just been created by some other process).
* If both mkdir() and the exists() check fails for any seemingly
* non-existent directory, then we signal an error; Sun's mkdir would signal
* an error (return false) if a directory it is attempting to create already
* exists or the mkdir fails.
*
* @param localFS local filesystem
* @param dir directory to be created or checked
* @param expected expected permission
* @throws IOException exception making dir and checking
*/
static void mkdirsWithExistsAndPermissionCheck(
LocalFileSystem localFS, Path dir, FsPermission expected)
throws IOException {
File directory = localFS.pathToFile(dir);
boolean created = false;
if (!directory.exists())
created = mkdirsWithExistsCheck(directory);
if (created || !localFS.getFileStatus(dir).getPermission().equals(expected))
localFS.setPermission(dir, expected);
}
// State related to running disk IO checks.
private static final String DISK_IO_FILE_PREFIX =
"DiskChecker.OK_TO_DELETE_.";
@VisibleForTesting
static final int DISK_IO_MAX_ITERATIONS = 3;
/**
* Performs some disk IO by writing to a new file in the given directory
* and sync'ing file contents to disk.
*
* This increases the likelihood of catching catastrophic disk/controller
* failures sooner.
*
* @param dir directory to be checked.
* @throws DiskErrorException if we hit an error while trying to perform
* disk IO against the file.
*/
private static void doDiskIo(File dir) throws DiskErrorException {
try {
IOException ioe = null;
for (int i = 0; i < DISK_IO_MAX_ITERATIONS; ++i) {
final File file = getFileNameForDiskIoCheck(dir, i+1);
try {
diskIoCheckWithoutNativeIo(file);
return;
} catch (IOException e) {
// Let's retry a few times before we really give up and
// declare the disk as bad.
ioe = e;
}
}
throw ioe; // Just rethrow the last exception to signal failure.
} catch(IOException e) {
throw new DiskErrorException("Error checking directory " + dir, e);
}
}
/**
* Try to perform some disk IO by writing to the given file
* without using Native IO.
*
* @param file file to check
* @throws IOException if there was a non-retriable error.
*/
private static void diskIoCheckWithoutNativeIo(File file)
throws IOException {
FileOutputStream fos = null;
try {
final FileIoProvider provider = fileIoProvider.get();
fos = provider.get(file);
provider.write(fos, new byte[1]);
fos.getFD().sync();
fos.close();
fos = null;
if (!file.delete() && file.exists()) {
throw new IOException("Failed to delete " + file);
}
file = null;
} finally {
IOUtils.cleanupWithLogger(LOG, fos);
FileUtils.deleteQuietly(file);
}
}
/**
* Generate a path name for a test file under the given directory.
*
* @return file object.
*/
@VisibleForTesting
static File getFileNameForDiskIoCheck(File dir, int iterationCount) {
if (iterationCount < DISK_IO_MAX_ITERATIONS) {
// Use file names of the format prefix.001 by default.
return new File(dir,
DISK_IO_FILE_PREFIX + String.format("%03d", iterationCount));
} else {
// If the first few checks then fail, try using a randomly generated
// file name.
return new File(dir, DISK_IO_FILE_PREFIX + UUID.randomUUID());
}
}
/**
* An interface that abstracts operations on {@link FileOutputStream}
* objects for testability.
*/
interface FileIoProvider {
FileOutputStream get(File f) throws FileNotFoundException;
void write(FileOutputStream fos, byte[] data) throws IOException;
}
/**
* The default implementation of {@link FileIoProvider}.
*/
private static class DefaultFileIoProvider implements FileIoProvider {
/**
* See {@link FileOutputStream#FileOutputStream(File)}.
*/
@Override
public FileOutputStream get(File f) throws FileNotFoundException {
return new FileOutputStream(f);
}
/**
* See {@link FileOutputStream#write(byte[])}.
*/
@Override
public void write(FileOutputStream fos, byte[] data) throws IOException {
fos.write(data);
}
}
/**
* Replace the {@link FileIoProvider} for tests.
* This method MUST NOT be used outside of unit tests.
*
* @param newFosProvider FileIoProvider
* @return the old FileIoProvider.
*/
@VisibleForTesting
static FileIoProvider replaceFileOutputStreamProvider(
FileIoProvider newFosProvider) {
return fileIoProvider.getAndSet(newFosProvider);
}
/**
* Retrieve the current {@link FileIoProvider}.
* This method MUST NOT be used outside of unit tests.
*
* @return the current FileIoProvider.
*/
@VisibleForTesting
static FileIoProvider getFileOutputStreamProvider() {
return fileIoProvider.get();
}
}

View File

@ -93,7 +93,6 @@ public class SharedFSAutoReplicaFailoverTest extends AbstractFullDistribZkTestBa
Set<Future<Object>> pending; Set<Future<Object>> pending;
private final Map<String, String> collectionUlogDirMap = new HashMap<>(); private final Map<String, String> collectionUlogDirMap = new HashMap<>();
@BeforeClass @BeforeClass
public static void hdfsFailoverBeforeClass() throws Exception { public static void hdfsFailoverBeforeClass() throws Exception {
System.setProperty("solr.hdfs.blockcache.blocksperbank", "512"); System.setProperty("solr.hdfs.blockcache.blocksperbank", "512");
@ -104,10 +103,13 @@ public class SharedFSAutoReplicaFailoverTest extends AbstractFullDistribZkTestBa
@AfterClass @AfterClass
public static void hdfsFailoverAfterClass() throws Exception { public static void hdfsFailoverAfterClass() throws Exception {
try {
HdfsTestUtil.teardownClass(dfsCluster); HdfsTestUtil.teardownClass(dfsCluster);
} finally {
System.clearProperty("solr.hdfs.blockcache.blocksperbank"); System.clearProperty("solr.hdfs.blockcache.blocksperbank");
dfsCluster = null; dfsCluster = null;
} }
}
@Before @Before
@Override @Override

View File

@ -0,0 +1,40 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.cloud.hdfs;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.security.GroupMappingServiceProvider;
/**
* Fake mapping for Hadoop to prevent falling back to Shell group provider
*/
public class FakeGroupMapping implements GroupMappingServiceProvider {
@Override
public List<String> getGroups(String user) {
return Collections.singletonList("supergroup");
}
@Override
public void cacheGroupsRefresh() {
}
@Override
public void cacheGroupsAdd(List<String> groups) {
}
}

View File

@ -40,8 +40,7 @@ public class HDFSCollectionsAPITest extends SolrCloudTestCase {
@BeforeClass @BeforeClass
public static void setupClass() throws Exception { public static void setupClass() throws Exception {
configureCluster(2) configureCluster(2).configure();
.configure();
dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath()); dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
@ -54,6 +53,7 @@ public class HDFSCollectionsAPITest extends SolrCloudTestCase {
public static void teardownClass() throws Exception { public static void teardownClass() throws Exception {
try { try {
shutdownCluster(); // need to close before the MiniDFSCluster shutdownCluster(); // need to close before the MiniDFSCluster
cluster = null;
} finally { } finally {
try { try {
HdfsTestUtil.teardownClass(dfsCluster); HdfsTestUtil.teardownClass(dfsCluster);

View File

@ -33,8 +33,6 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
@ThreadLeakFilters(defaultFilters = true, filters = { @ThreadLeakFilters(defaultFilters = true, filters = {
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s) BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
}) })
// commented 20-July-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 26-Mar-2018
// commented out on: 24-Dec-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 17-Aug-2018
public class HdfsBasicDistributedZk2Test extends BasicDistributedZk2Test { public class HdfsBasicDistributedZk2Test extends BasicDistributedZk2Test {
private static MiniDFSCluster dfsCluster; private static MiniDFSCluster dfsCluster;
@ -45,14 +43,15 @@ public class HdfsBasicDistributedZk2Test extends BasicDistributedZk2Test {
@AfterClass @AfterClass
public static void teardownClass() throws Exception { public static void teardownClass() throws Exception {
try {
HdfsTestUtil.teardownClass(dfsCluster); HdfsTestUtil.teardownClass(dfsCluster);
} finally {
dfsCluster = null; dfsCluster = null;
} }
}
@Override @Override
protected String getDataDir(String dataDir) throws IOException { protected String getDataDir(String dataDir) throws IOException {
return HdfsTestUtil.getDataDir(dfsCluster, dataDir); return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
} }
} }

View File

@ -52,9 +52,12 @@ public class HdfsRecoverLeaseTest extends SolrTestCaseJ4 {
@AfterClass @AfterClass
public static void afterClass() throws Exception { public static void afterClass() throws Exception {
try {
HdfsTestUtil.teardownClass(dfsCluster); HdfsTestUtil.teardownClass(dfsCluster);
} finally {
dfsCluster = null; dfsCluster = null;
} }
}
@Before @Before
public void setUp() throws Exception { public void setUp() throws Exception {

View File

@ -44,9 +44,12 @@ public class HdfsSyncSliceTest extends SyncSliceTest {
@AfterClass @AfterClass
public static void teardownClass() throws Exception { public static void teardownClass() throws Exception {
try {
HdfsTestUtil.teardownClass(dfsCluster); HdfsTestUtil.teardownClass(dfsCluster);
} finally {
dfsCluster = null; dfsCluster = null;
} }
}
@Override @Override
protected String getDataDir(String dataDir) throws IOException { protected String getDataDir(String dataDir) throws IOException {

View File

@ -20,11 +20,11 @@ import java.io.File;
import java.lang.invoke.MethodHandles; import java.lang.invoke.MethodHandles;
import java.net.URI; import java.net.URI;
import java.util.Enumeration; import java.util.Enumeration;
import java.util.HashMap;
import java.util.Locale; import java.util.Locale;
import java.util.Map; import java.util.Map;
import java.util.Timer; import java.util.Timer;
import java.util.TimerTask; import java.util.TimerTask;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ForkJoinPool; import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.ForkJoinWorkerThread; import java.util.concurrent.ForkJoinWorkerThread;
import java.util.regex.Pattern; import java.util.regex.Pattern;
@ -56,7 +56,8 @@ public class HdfsTestUtil {
private static final boolean HA_TESTING_ENABLED = false; // SOLR-XXX private static final boolean HA_TESTING_ENABLED = false; // SOLR-XXX
private static Map<MiniDFSCluster,Timer> timers = new ConcurrentHashMap<>(); private static Map<MiniDFSCluster,Timer> timers = new HashMap<>();
private static final Object TIMERS_LOCK = new Object();
private static FSDataOutputStream badTlogOutStream; private static FSDataOutputStream badTlogOutStream;
@ -145,7 +146,12 @@ public class HdfsTestUtil {
int rnd = random().nextInt(10000); int rnd = random().nextInt(10000);
Timer timer = new Timer(); Timer timer = new Timer();
synchronized (TIMERS_LOCK) {
if (timers == null) {
timers = new HashMap<>();
}
timers.put(dfsCluster, timer); timers.put(dfsCluster, timer);
}
timer.schedule(new TimerTask() { timer.schedule(new TimerTask() {
@Override @Override
@ -156,7 +162,12 @@ public class HdfsTestUtil {
} else if (haTesting && rndMode == 2) { } else if (haTesting && rndMode == 2) {
int rnd = random().nextInt(30000); int rnd = random().nextInt(30000);
Timer timer = new Timer(); Timer timer = new Timer();
synchronized (TIMERS_LOCK) {
if (timers == null) {
timers = new HashMap<>();
}
timers.put(dfsCluster, timer); timers.put(dfsCluster, timer);
}
timer.schedule(new TimerTask() { timer.schedule(new TimerTask() {
@Override @Override
@ -196,19 +207,23 @@ public class HdfsTestUtil {
public static Configuration getClientConfiguration(MiniDFSCluster dfsCluster) { public static Configuration getClientConfiguration(MiniDFSCluster dfsCluster) {
Configuration conf = getBasicConfiguration(dfsCluster.getConfiguration(0)); Configuration conf = getBasicConfiguration(dfsCluster.getConfiguration(0));
if (dfsCluster.getNameNodeInfos().length > 1) { if (dfsCluster.getNumNameNodes() > 1) {
HATestUtil.setFailoverConfigurations(dfsCluster, conf); HATestUtil.setFailoverConfigurations(dfsCluster, conf);
} }
return conf; return conf;
} }
public static void teardownClass(MiniDFSCluster dfsCluster) throws Exception { public static void teardownClass(MiniDFSCluster dfsCluster) throws Exception {
HdfsUtil.TEST_CONF = null;
if (badTlogOutStream != null) { if (badTlogOutStream != null) {
IOUtils.closeQuietly(badTlogOutStream); IOUtils.closeQuietly(badTlogOutStream);
badTlogOutStream = null;
} }
if (badTlogOutStreamFs != null) { if (badTlogOutStreamFs != null) {
IOUtils.closeQuietly(badTlogOutStreamFs); IOUtils.closeQuietly(badTlogOutStreamFs);
badTlogOutStreamFs = null;
} }
try { try {
@ -218,10 +233,17 @@ public class HdfsTestUtil {
log.error("Exception trying to reset solr.directoryFactory", e); log.error("Exception trying to reset solr.directoryFactory", e);
} }
if (dfsCluster != null) { if (dfsCluster != null) {
synchronized (TIMERS_LOCK) {
if (timers != null) {
Timer timer = timers.remove(dfsCluster); Timer timer = timers.remove(dfsCluster);
if (timer != null) { if (timer != null) {
timer.cancel(); timer.cancel();
} }
if (timers.isEmpty()) {
timers = null;
}
}
}
try { try {
dfsCluster.shutdown(true); dfsCluster.shutdown(true);
} catch (Error e) { } catch (Error e) {

View File

@ -73,6 +73,7 @@ public class HdfsWriteToMultipleCollectionsTest extends BasicDistributedZkTest {
HdfsTestUtil.teardownClass(dfsCluster); HdfsTestUtil.teardownClass(dfsCluster);
} finally { } finally {
dfsCluster = null; dfsCluster = null;
schemaString = null;
} }
} }

View File

@ -71,6 +71,7 @@ public class CheckHdfsIndexTest extends AbstractFullDistribZkTestBase {
HdfsTestUtil.teardownClass(dfsCluster); HdfsTestUtil.teardownClass(dfsCluster);
} finally { } finally {
dfsCluster = null; dfsCluster = null;
path = null;
} }
} }

View File

@ -65,9 +65,10 @@ import static org.apache.solr.update.processor.DistributingUpdateProcessorFactor
// TODO: longer term this should be combined with TestRecovery somehow ?? // TODO: longer term this should be combined with TestRecovery somehow ??
public class TestRecoveryHdfs extends SolrTestCaseJ4 { public class TestRecoveryHdfs extends SolrTestCaseJ4 {
// means that we've seen the leader and have version info (i.e. we are a non-leader replica) // means that we've seen the leader and have version info (i.e. we are a non-leader replica)
private static String FROM_LEADER = DistribPhase.FROMLEADER.toString(); private static final String FROM_LEADER = DistribPhase.FROMLEADER.toString();
private static int timeout=60; // acquire timeout in seconds. change this to a huge number when debugging to prevent threads from advancing. // acquire timeout in seconds. change this to a huge number when debugging to prevent threads from advancing.
private static final int TIMEOUT = 60;
private static MiniDFSCluster dfsCluster; private static MiniDFSCluster dfsCluster;
private static String hdfsUri; private static String hdfsUri;
@ -102,7 +103,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
HdfsTestUtil.teardownClass(dfsCluster); HdfsTestUtil.teardownClass(dfsCluster);
} finally { } finally {
dfsCluster = null; dfsCluster = null;
hdfsDataDir = null; hdfsUri = null;
System.clearProperty("solr.ulog.dir"); System.clearProperty("solr.ulog.dir");
System.clearProperty("test.build.data"); System.clearProperty("test.build.data");
System.clearProperty("test.cache.data"); System.clearProperty("test.cache.data");
@ -142,7 +143,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
UpdateLog.testing_logReplayHook = () -> { UpdateLog.testing_logReplayHook = () -> {
try { try {
assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS)); assertTrue(logReplay.tryAcquire(TIMEOUT, TimeUnit.SECONDS));
} catch (Exception e) { } catch (Exception e) {
throw new RuntimeException(e); throw new RuntimeException(e);
} }
@ -183,7 +184,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
assertJQ(req("qt","/get", "getVersions",""+versions.size()),"/versions==" + versions); assertJQ(req("qt","/get", "getVersions",""+versions.size()),"/versions==" + versions);
// wait until recovery has finished // wait until recovery has finished
assertTrue(logReplayFinish.tryAcquire(timeout, TimeUnit.SECONDS)); assertTrue(logReplayFinish.tryAcquire(TIMEOUT, TimeUnit.SECONDS));
assertJQ(req("q","*:*") ,"/response/numFound==3"); assertJQ(req("q","*:*") ,"/response/numFound==3");
@ -203,7 +204,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
// h.getCore().getUpdateHandler().getUpdateLog().recoverFromLog(); // h.getCore().getUpdateHandler().getUpdateLog().recoverFromLog();
// wait until recovery has finished // wait until recovery has finished
assertTrue(logReplayFinish.tryAcquire(timeout, TimeUnit.SECONDS)); assertTrue(logReplayFinish.tryAcquire(TIMEOUT, TimeUnit.SECONDS));
assertJQ(req("q","*:*") ,"/response/numFound==5"); assertJQ(req("q","*:*") ,"/response/numFound==5");
assertJQ(req("q","id:A2") ,"/response/numFound==0"); assertJQ(req("q","id:A2") ,"/response/numFound==0");
@ -235,7 +236,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
UpdateLog.testing_logReplayHook = () -> { UpdateLog.testing_logReplayHook = () -> {
try { try {
assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS)); assertTrue(logReplay.tryAcquire(TIMEOUT, TimeUnit.SECONDS));
} catch (Exception e) { } catch (Exception e) {
throw new RuntimeException(e); throw new RuntimeException(e);
} }
@ -386,7 +387,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
UpdateLog.testing_logReplayHook = () -> { UpdateLog.testing_logReplayHook = () -> {
try { try {
assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS)); assertTrue(logReplay.tryAcquire(TIMEOUT, TimeUnit.SECONDS));
} catch (Exception e) { } catch (Exception e) {
throw new RuntimeException(e); throw new RuntimeException(e);
} }
@ -606,7 +607,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
UpdateLog.testing_logReplayHook = () -> { UpdateLog.testing_logReplayHook = () -> {
try { try {
assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS)); assertTrue(logReplay.tryAcquire(TIMEOUT, TimeUnit.SECONDS));
} catch (Exception e) { } catch (Exception e) {
throw new RuntimeException(e); throw new RuntimeException(e);
} }
@ -661,7 +662,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
UpdateLog.testing_logReplayHook = () -> { UpdateLog.testing_logReplayHook = () -> {
try { try {
assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS)); assertTrue(logReplay.tryAcquire(TIMEOUT, TimeUnit.SECONDS));
} catch (Exception e) { } catch (Exception e) {
throw new RuntimeException(e); throw new RuntimeException(e);
} }
@ -718,7 +719,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
assertJQ(req("qt","/get", "getVersions",""+maxReq), "/versions==" + versions.subList(0,Math.min(maxReq,start))); assertJQ(req("qt","/get", "getVersions",""+maxReq), "/versions==" + versions.subList(0,Math.min(maxReq,start)));
logReplay.release(1000); logReplay.release(1000);
assertTrue(logReplayFinish.tryAcquire(timeout, TimeUnit.SECONDS)); assertTrue(logReplayFinish.tryAcquire(TIMEOUT, TimeUnit.SECONDS));
assertJQ(req("qt","/get", "getVersions",""+maxReq), "/versions==" + versions.subList(0,Math.min(maxReq,start))); assertJQ(req("qt","/get", "getVersions",""+maxReq), "/versions==" + versions.subList(0,Math.min(maxReq,start)));
@ -768,7 +769,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
UpdateLog.testing_logReplayHook = () -> { UpdateLog.testing_logReplayHook = () -> {
try { try {
assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS)); assertTrue(logReplay.tryAcquire(TIMEOUT, TimeUnit.SECONDS));
} catch (Exception e) { } catch (Exception e) {
throw new RuntimeException(e); throw new RuntimeException(e);
} }
@ -802,7 +803,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
logReplayFinish.drainPermits(); logReplayFinish.drainPermits();
ignoreException("OutOfBoundsException"); // this is what the corrupted log currently produces... subject to change. ignoreException("OutOfBoundsException"); // this is what the corrupted log currently produces... subject to change.
createCore(); createCore();
assertTrue(logReplayFinish.tryAcquire(timeout, TimeUnit.SECONDS)); assertTrue(logReplayFinish.tryAcquire(TIMEOUT, TimeUnit.SECONDS));
resetExceptionIgnores(); resetExceptionIgnores();
assertJQ(req("q","*:*") ,"/response/numFound==3"); assertJQ(req("q","*:*") ,"/response/numFound==3");
@ -896,7 +897,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
UpdateLog.testing_logReplayHook = () -> { UpdateLog.testing_logReplayHook = () -> {
try { try {
assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS)); assertTrue(logReplay.tryAcquire(TIMEOUT, TimeUnit.SECONDS));
} catch (Exception e) { } catch (Exception e) {
throw new RuntimeException(e); throw new RuntimeException(e);
} }
@ -948,7 +949,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
logReplayFinish.drainPermits(); logReplayFinish.drainPermits();
ignoreException("OutOfBoundsException"); // this is what the corrupted log currently produces... subject to change. ignoreException("OutOfBoundsException"); // this is what the corrupted log currently produces... subject to change.
createCore(); createCore();
assertTrue(logReplayFinish.tryAcquire(timeout, TimeUnit.SECONDS)); assertTrue(logReplayFinish.tryAcquire(TIMEOUT, TimeUnit.SECONDS));
resetExceptionIgnores(); resetExceptionIgnores();
assertJQ(req("q","*:*") ,"/response/numFound==6"); assertJQ(req("q","*:*") ,"/response/numFound==6");

View File

@ -62,9 +62,12 @@ public class HdfsDirectoryTest extends SolrTestCaseJ4 {
@AfterClass @AfterClass
public static void afterClass() throws Exception { public static void afterClass() throws Exception {
try {
HdfsTestUtil.teardownClass(dfsCluster); HdfsTestUtil.teardownClass(dfsCluster);
} finally {
dfsCluster = null; dfsCluster = null;
} }
}
@Before @Before
public void setUp() throws Exception { public void setUp() throws Exception {

View File

@ -48,9 +48,12 @@ public class HdfsLockFactoryTest extends SolrTestCaseJ4 {
@AfterClass @AfterClass
public static void afterClass() throws Exception { public static void afterClass() throws Exception {
try {
HdfsTestUtil.teardownClass(dfsCluster); HdfsTestUtil.teardownClass(dfsCluster);
} finally {
dfsCluster = null; dfsCluster = null;
} }
}
@Test @Test
public void testBasic() throws IOException { public void testBasic() throws IOException {

View File

@ -70,8 +70,8 @@ public class TestHdfsUpdateLog extends SolrTestCaseJ4 {
try { try {
HdfsTestUtil.teardownClass(dfsCluster); HdfsTestUtil.teardownClass(dfsCluster);
} finally { } finally {
hdfsDataDir = null;
dfsCluster = null; dfsCluster = null;
hdfsUri = null;
System.clearProperty("solr.ulog.dir"); System.clearProperty("solr.ulog.dir");
System.clearProperty("test.build.data"); System.clearProperty("test.build.data");
System.clearProperty("test.cache.data"); System.clearProperty("test.cache.data");

View File

@ -331,6 +331,7 @@ public abstract class SolrTestCaseJ4 extends SolrTestCase {
if (null != testExecutor) { if (null != testExecutor) {
ExecutorUtil.shutdownAndAwaitTermination(testExecutor); ExecutorUtil.shutdownAndAwaitTermination(testExecutor);
testExecutor = null;
} }
resetExceptionIgnores(); resetExceptionIgnores();
@ -489,6 +490,7 @@ public abstract class SolrTestCaseJ4 extends SolrTestCase {
changedFactory = false; changedFactory = false;
if (savedFactory != null) { if (savedFactory != null) {
System.setProperty("solr.directoryFactory", savedFactory); System.setProperty("solr.directoryFactory", savedFactory);
savedFactory = null;
} else { } else {
System.clearProperty("solr.directoryFactory"); System.clearProperty("solr.directoryFactory");
} }
@ -895,6 +897,7 @@ public abstract class SolrTestCaseJ4 extends SolrTestCase {
lrf = null; lrf = null;
configString = schemaString = null; configString = schemaString = null;
initCoreDataDir = null; initCoreDataDir = null;
hdfsDataDir = null;
} }
/** Validates an update XML String is successful /** Validates an update XML String is successful

View File

@ -275,10 +275,13 @@ public class SolrCloudTestCase extends SolrTestCaseJ4 {
@AfterClass @AfterClass
public static void shutdownCluster() throws Exception { public static void shutdownCluster() throws Exception {
if (cluster != null) { if (cluster != null) {
try {
cluster.shutdown(); cluster.shutdown();
} } finally {
cluster = null; cluster = null;
} }
}
}
@Before @Before
public void checkClusterConfiguration() { public void checkClusterConfiguration() {

View File

@ -20,8 +20,7 @@ import java.security.AccessController;
import java.security.PrivilegedAction; import java.security.PrivilegedAction;
/** /**
* A {@link SecurityManager} that prevents tests calling {@link System#exit(int)}, * A {@link SecurityManager} that prevents tests calling {@link System#exit(int)}.
* and implements some hacks for hadoop.
* Only the test runner itself is allowed to exit the JVM. * Only the test runner itself is allowed to exit the JVM.
* All other security checks are handled by the default security policy. * All other security checks are handled by the default security policy.
* <p> * <p>
@ -43,93 +42,6 @@ public final class SolrSecurityManager extends SecurityManager {
super(); super();
} }
// TODO: move this stuff into a Solr (non-test) SecurityManager!
/**
* {@inheritDoc}
* <p>This method implements hacks to workaround hadoop's garbage Shell and FileUtil code
*/
@Override
public void checkExec(String cmd) {
// NOTE: it would be tempting to just allow anything from hadoop's Shell class, but then
// that would just give an easy vector for RCE (use hadoop Shell instead of e.g. ProcessBuilder)
// so we whitelist actual caller impl methods instead.
for (StackTraceElement element : Thread.currentThread().getStackTrace()) {
// hadoop insists on shelling out to get the user's supplementary groups?
if ("org.apache.hadoop.security.ShellBasedUnixGroupsMapping".equals(element.getClassName()) &&
"getGroups".equals(element.getMethodName())) {
return;
}
// hadoop insists on shelling out to parse 'df' command instead of using FileStore?
if ("org.apache.hadoop.fs.DF".equals(element.getClassName()) &&
"getFilesystem".equals(element.getMethodName())) {
return;
}
// hadoop insists on shelling out to parse 'du' command instead of using FileStore?
if ("org.apache.hadoop.fs.DU".equals(element.getClassName()) &&
"refresh".equals(element.getMethodName())) {
return;
}
// hadoop insists on shelling out to parse 'ls' command instead of java nio apis?
if ("org.apache.hadoop.util.DiskChecker".equals(element.getClassName()) &&
"checkDir".equals(element.getMethodName())) {
return;
}
// hadoop insists on shelling out to parse 'stat' command instead of Files.getAttributes?
if ("org.apache.hadoop.fs.HardLink".equals(element.getClassName()) &&
"getLinkCount".equals(element.getMethodName())) {
return;
}
// hadoop "canExecute" method doesn't handle securityexception and fails completely.
// so, lie to it, and tell it we will happily execute, so it does not crash.
if ("org.apache.hadoop.fs.FileUtil".equals(element.getClassName()) &&
"canExecute".equals(element.getMethodName())) {
return;
}
}
super.checkExec(cmd);
}
/**
* {@inheritDoc}
* <p>This method implements hacks to workaround hadoop's garbage FileUtil code
*/
@Override
public void checkWrite(String file) {
for (StackTraceElement element : Thread.currentThread().getStackTrace()) {
// hadoop "canWrite" method doesn't handle securityexception and fails completely.
// so, lie to it, and tell it we will happily write, so it does not crash.
if ("org.apache.hadoop.fs.FileUtil".equals(element.getClassName()) &&
"canWrite".equals(element.getMethodName())) {
return;
}
}
super.checkWrite(file);
}
/**
* {@inheritDoc}
* <p>This method implements hacks to workaround hadoop's garbage FileUtil code
*/
@Override
public void checkRead(String file) {
for (StackTraceElement element : Thread.currentThread().getStackTrace()) {
// hadoop "createPermissionsDiagnosisString" method doesn't handle securityexception and fails completely.
// it insists on climbing up full directory tree!
// so, lie to it, and tell it we will happily read, so it does not crash.
if ("org.apache.hadoop.hdfs.MiniDFSCluster".equals(element.getClassName()) &&
"createPermissionsDiagnosisString".equals(element.getMethodName())) {
return;
}
// hadoop "canRead" method doesn't handle securityexception and fails completely.
// so, lie to it, and tell it we will happily read, so it does not crash.
if ("org.apache.hadoop.fs.FileUtil".equals(element.getClassName()) &&
"canRead".equals(element.getMethodName())) {
return;
}
}
super.checkRead(file);
}
/** /**
* {@inheritDoc} * {@inheritDoc}
* <p>This method inspects the stack trace and checks who is calling * <p>This method inspects the stack trace and checks who is calling