diff --git a/lucene/tools/junit4/solr-tests.policy b/lucene/tools/junit4/solr-tests.policy index 7de5660027f..00dae246fed 100644 --- a/lucene/tools/junit4/solr-tests.policy +++ b/lucene/tools/junit4/solr-tests.policy @@ -103,6 +103,8 @@ grant { permission java.lang.RuntimePermission "loadLibrary.jaas"; permission java.lang.RuntimePermission "loadLibrary.jaas_unix"; permission java.lang.RuntimePermission "loadLibrary.jaas_nt"; + // needed by hadoop common RawLocalFileSystem for java nio getOwner + permission java.lang.RuntimePermission "accessUserInformation"; // needed by hadoop hdfs permission java.lang.RuntimePermission "readFileDescriptor"; permission java.lang.RuntimePermission "writeFileDescriptor"; diff --git a/solr/core/ivy.xml b/solr/core/ivy.xml index f7843a1eff8..5b6c53a0261 100644 --- a/solr/core/ivy.xml +++ b/solr/core/ivy.xml @@ -38,6 +38,7 @@ + diff --git a/solr/core/src/java/org/apache/solr/util/FSHDFSUtils.java b/solr/core/src/java/org/apache/solr/util/FSHDFSUtils.java index 8f784d41dfd..dc834ab4846 100644 --- a/solr/core/src/java/org/apache/solr/util/FSHDFSUtils.java +++ b/solr/core/src/java/org/apache/solr/util/FSHDFSUtils.java @@ -37,7 +37,7 @@ public class FSHDFSUtils { private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); // internal, for tests - public static AtomicLong RECOVER_LEASE_SUCCESS_COUNT = new AtomicLong(); + public static final AtomicLong RECOVER_LEASE_SUCCESS_COUNT = new AtomicLong(); public interface CallerInfo { boolean isCallerClosed(); diff --git a/solr/core/src/test-files/core-site.xml b/solr/core/src/test-files/core-site.xml new file mode 100644 index 00000000000..df15d529354 --- /dev/null +++ b/solr/core/src/test-files/core-site.xml @@ -0,0 +1,23 @@ + + + + + hadoop.security.group.mapping + org.apache.solr.cloud.hdfs.FakeGroupMapping + + diff --git a/solr/core/src/test/org/apache/hadoop/fs/FileUtil.java b/solr/core/src/test/org/apache/hadoop/fs/FileUtil.java new file mode 100644 index 00000000000..98e281dc821 --- /dev/null +++ b/solr/core/src/test/org/apache/hadoop/fs/FileUtil.java @@ -0,0 +1,1652 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.BufferedReader; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.net.InetAddress; +import java.net.URI; +import java.net.UnknownHostException; +import java.nio.charset.Charset; +import java.nio.file.AccessDeniedException; +import java.nio.file.FileSystems; +import java.nio.file.Files; +import java.util.ArrayList; +import java.util.Enumeration; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.jar.Attributes; +import java.util.jar.JarOutputStream; +import java.util.jar.Manifest; +import java.util.zip.GZIPInputStream; +import java.util.zip.ZipEntry; +import java.util.zip.ZipFile; +import java.util.zip.ZipInputStream; + +import org.apache.commons.collections.map.CaseInsensitiveMap; +import org.apache.commons.compress.archivers.tar.TarArchiveEntry; +import org.apache.commons.compress.archivers.tar.TarArchiveInputStream; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.io.nativeio.NativeIO; +import org.apache.hadoop.util.Shell; +import org.apache.hadoop.util.Shell.ShellCommandExecutor; +import org.apache.hadoop.util.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A collection of file-processing util methods + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class FileUtil { + + private static final Logger LOG = LoggerFactory.getLogger(FileUtil.class); + + /* The error code is defined in winutils to indicate insufficient + * privilege to create symbolic links. This value need to keep in + * sync with the constant of the same name in: + * "src\winutils\common.h" + * */ + public static final int SYMLINK_NO_PRIVILEGE = 2; + + /** + * Buffer size for copy the content of compressed file to new file. + */ + private static final int BUFFER_SIZE = 8_192; + + /** + * convert an array of FileStatus to an array of Path + * + * @param stats + * an array of FileStatus objects + * @return an array of paths corresponding to the input + */ + public static Path[] stat2Paths(FileStatus[] stats) { + if (stats == null) + return null; + Path[] ret = new Path[stats.length]; + for (int i = 0; i < stats.length; ++i) { + ret[i] = stats[i].getPath(); + } + return ret; + } + + /** + * convert an array of FileStatus to an array of Path. + * If stats if null, return path + * @param stats + * an array of FileStatus objects + * @param path + * default path to return in stats is null + * @return an array of paths corresponding to the input + */ + public static Path[] stat2Paths(FileStatus[] stats, Path path) { + if (stats == null) + return new Path[]{path}; + else + return stat2Paths(stats); + } + + /** + * Register all files recursively to be deleted on exit. + * @param file File/directory to be deleted + */ + public static void fullyDeleteOnExit(final File file) { + file.deleteOnExit(); + if (file.isDirectory()) { + File[] files = file.listFiles(); + if (files != null) { + for (File child : files) { + fullyDeleteOnExit(child); + } + } + } + } + + /** + * Delete a directory and all its contents. If + * we return false, the directory may be partially-deleted. + * (1) If dir is symlink to a file, the symlink is deleted. The file pointed + * to by the symlink is not deleted. + * (2) If dir is symlink to a directory, symlink is deleted. The directory + * pointed to by symlink is not deleted. + * (3) If dir is a normal file, it is deleted. + * (4) If dir is a normal directory, then dir and all its contents recursively + * are deleted. + */ + public static boolean fullyDelete(final File dir) { + return fullyDelete(dir, false); + } + + /** + * Delete a directory and all its contents. If + * we return false, the directory may be partially-deleted. + * (1) If dir is symlink to a file, the symlink is deleted. The file pointed + * to by the symlink is not deleted. + * (2) If dir is symlink to a directory, symlink is deleted. The directory + * pointed to by symlink is not deleted. + * (3) If dir is a normal file, it is deleted. + * (4) If dir is a normal directory, then dir and all its contents recursively + * are deleted. + * @param dir the file or directory to be deleted + * @param tryGrantPermissions true if permissions should be modified to delete a file. + * @return true on success false on failure. + */ + public static boolean fullyDelete(final File dir, boolean tryGrantPermissions) { + if (tryGrantPermissions) { + // try to chmod +rwx the parent folder of the 'dir': + File parent = dir.getParentFile(); + grantPermissions(parent); + } + if (deleteImpl(dir, false)) { + // dir is (a) normal file, (b) symlink to a file, (c) empty directory or + // (d) symlink to a directory + return true; + } + // handle nonempty directory deletion + if (!fullyDeleteContents(dir, tryGrantPermissions)) { + return false; + } + return deleteImpl(dir, true); + } + + /** + * Returns the target of the given symlink. Returns the empty string if + * the given path does not refer to a symlink or there is an error + * accessing the symlink. + * @param f File representing the symbolic link. + * @return The target of the symbolic link, empty string on error or if not + * a symlink. + */ + public static String readLink(File f) { + /* NB: Use readSymbolicLink in java.nio.file.Path once available. Could + * use getCanonicalPath in File to get the target of the symlink but that + * does not indicate if the given path refers to a symlink. + */ + + if (f == null) { + LOG.warn("Can not read a null symLink"); + return ""; + } + + try { + return Shell.execCommand( + Shell.getReadlinkCommand(f.toString())).trim(); + } catch (IOException x) { + return ""; + } + } + + /* + * Pure-Java implementation of "chmod +rwx f". + */ + private static void grantPermissions(final File f) { + FileUtil.setExecutable(f, true); + FileUtil.setReadable(f, true); + FileUtil.setWritable(f, true); + } + + private static boolean deleteImpl(final File f, final boolean doLog) { + if (f == null) { + LOG.warn("null file argument."); + return false; + } + final boolean wasDeleted = f.delete(); + if (wasDeleted) { + return true; + } + final boolean ex = f.exists(); + if (doLog && ex) { + LOG.warn("Failed to delete file or dir [" + + f.getAbsolutePath() + "]: it still exists."); + } + return !ex; + } + + /** + * Delete the contents of a directory, not the directory itself. If + * we return false, the directory may be partially-deleted. + * If dir is a symlink to a directory, all the contents of the actual + * directory pointed to by dir will be deleted. + */ + public static boolean fullyDeleteContents(final File dir) { + return fullyDeleteContents(dir, false); + } + + /** + * Delete the contents of a directory, not the directory itself. If + * we return false, the directory may be partially-deleted. + * If dir is a symlink to a directory, all the contents of the actual + * directory pointed to by dir will be deleted. + * @param tryGrantPermissions if 'true', try grant +rwx permissions to this + * and all the underlying directories before trying to delete their contents. + */ + public static boolean fullyDeleteContents(final File dir, final boolean tryGrantPermissions) { + if (tryGrantPermissions) { + // to be able to list the dir and delete files from it + // we must grant the dir rwx permissions: + grantPermissions(dir); + } + boolean deletionSucceeded = true; + final File[] contents = dir.listFiles(); + if (contents != null) { + for (int i = 0; i < contents.length; i++) { + if (contents[i].isFile()) { + if (!deleteImpl(contents[i], true)) {// normal file or symlink to another file + deletionSucceeded = false; + continue; // continue deletion of other files/dirs under dir + } + } else { + // Either directory or symlink to another directory. + // Try deleting the directory as this might be a symlink + boolean b = false; + b = deleteImpl(contents[i], false); + if (b){ + //this was indeed a symlink or an empty directory + continue; + } + // if not an empty directory or symlink let + // fullydelete handle it. + if (!fullyDelete(contents[i], tryGrantPermissions)) { + deletionSucceeded = false; + // continue deletion of other files/dirs under dir + } + } + } + } + return deletionSucceeded; + } + + /** + * Recursively delete a directory. + * + * @param fs {@link FileSystem} on which the path is present + * @param dir directory to recursively delete + * @throws IOException Exception on delete + * @deprecated Use {@link FileSystem#delete(Path, boolean)} + */ + @Deprecated + public static void fullyDelete(FileSystem fs, Path dir) + throws IOException { + fs.delete(dir, true); + } + + // + // If the destination is a subdirectory of the source, then + // generate exception + // + private static void checkDependencies(FileSystem srcFS, + Path src, + FileSystem dstFS, + Path dst) + throws IOException { + if (srcFS == dstFS) { + String srcq = srcFS.makeQualified(src).toString() + Path.SEPARATOR; + String dstq = dstFS.makeQualified(dst).toString() + Path.SEPARATOR; + if (dstq.startsWith(srcq)) { + if (srcq.length() == dstq.length()) { + throw new IOException("Cannot copy " + src + " to itself."); + } else { + throw new IOException("Cannot copy " + src + " to its subdirectory " + + dst); + } + } + } + } + + /** Copy files between FileSystems. */ + public static boolean copy(FileSystem srcFS, Path src, + FileSystem dstFS, Path dst, + boolean deleteSource, + Configuration conf) throws IOException { + return copy(srcFS, src, dstFS, dst, deleteSource, true, conf); + } + + public static boolean copy(FileSystem srcFS, Path[] srcs, + FileSystem dstFS, Path dst, + boolean deleteSource, + boolean overwrite, Configuration conf) + throws IOException { + boolean gotException = false; + boolean returnVal = true; + StringBuilder exceptions = new StringBuilder(); + + if (srcs.length == 1) + return copy(srcFS, srcs[0], dstFS, dst, deleteSource, overwrite, conf); + + // Check if dest is directory + try { + FileStatus sdst = dstFS.getFileStatus(dst); + if (!sdst.isDirectory()) + throw new IOException("copying multiple files, but last argument `" + + dst + "' is not a directory"); + } catch (FileNotFoundException e) { + throw new IOException( + "`" + dst + "': specified destination directory " + + "does not exist", e); + } + + for (Path src : srcs) { + try { + if (!copy(srcFS, src, dstFS, dst, deleteSource, overwrite, conf)) + returnVal = false; + } catch (IOException e) { + gotException = true; + exceptions.append(e.getMessage()); + exceptions.append("\n"); + } + } + if (gotException) { + throw new IOException(exceptions.toString()); + } + return returnVal; + } + + /** Copy files between FileSystems. */ + public static boolean copy(FileSystem srcFS, Path src, + FileSystem dstFS, Path dst, + boolean deleteSource, + boolean overwrite, + Configuration conf) throws IOException { + FileStatus fileStatus = srcFS.getFileStatus(src); + return copy(srcFS, fileStatus, dstFS, dst, deleteSource, overwrite, conf); + } + + /** Copy files between FileSystems. */ + public static boolean copy(FileSystem srcFS, FileStatus srcStatus, + FileSystem dstFS, Path dst, + boolean deleteSource, + boolean overwrite, + Configuration conf) throws IOException { + Path src = srcStatus.getPath(); + dst = checkDest(src.getName(), dstFS, dst, overwrite); + if (srcStatus.isDirectory()) { + checkDependencies(srcFS, src, dstFS, dst); + if (!dstFS.mkdirs(dst)) { + return false; + } + FileStatus contents[] = srcFS.listStatus(src); + for (int i = 0; i < contents.length; i++) { + copy(srcFS, contents[i], dstFS, + new Path(dst, contents[i].getPath().getName()), + deleteSource, overwrite, conf); + } + } else { + InputStream in=null; + OutputStream out = null; + try { + in = srcFS.open(src); + out = dstFS.create(dst, overwrite); + IOUtils.copyBytes(in, out, conf, true); + } catch (IOException e) { + IOUtils.closeStream(out); + IOUtils.closeStream(in); + throw e; + } + } + if (deleteSource) { + return srcFS.delete(src, true); + } else { + return true; + } + + } + + /** Copy local files to a FileSystem. */ + public static boolean copy(File src, + FileSystem dstFS, Path dst, + boolean deleteSource, + Configuration conf) throws IOException { + dst = checkDest(src.getName(), dstFS, dst, false); + + if (src.isDirectory()) { + if (!dstFS.mkdirs(dst)) { + return false; + } + File contents[] = listFiles(src); + for (int i = 0; i < contents.length; i++) { + copy(contents[i], dstFS, new Path(dst, contents[i].getName()), + deleteSource, conf); + } + } else if (src.isFile()) { + InputStream in = null; + OutputStream out =null; + try { + in = new FileInputStream(src); + out = dstFS.create(dst); + IOUtils.copyBytes(in, out, conf); + } catch (IOException e) { + IOUtils.closeStream( out ); + IOUtils.closeStream( in ); + throw e; + } + } else if (!src.canRead()) { + throw new IOException(src.toString() + + ": Permission denied"); + + } else { + throw new IOException(src.toString() + + ": No such file or directory"); + } + if (deleteSource) { + return FileUtil.fullyDelete(src); + } else { + return true; + } + } + + /** Copy FileSystem files to local files. */ + public static boolean copy(FileSystem srcFS, Path src, + File dst, boolean deleteSource, + Configuration conf) throws IOException { + FileStatus filestatus = srcFS.getFileStatus(src); + return copy(srcFS, filestatus, dst, deleteSource, conf); + } + + /** Copy FileSystem files to local files. */ + private static boolean copy(FileSystem srcFS, FileStatus srcStatus, + File dst, boolean deleteSource, + Configuration conf) throws IOException { + Path src = srcStatus.getPath(); + if (srcStatus.isDirectory()) { + if (!dst.mkdirs()) { + return false; + } + FileStatus contents[] = srcFS.listStatus(src); + for (int i = 0; i < contents.length; i++) { + copy(srcFS, contents[i], + new File(dst, contents[i].getPath().getName()), + deleteSource, conf); + } + } else { + InputStream in = srcFS.open(src); + IOUtils.copyBytes(in, new FileOutputStream(dst), conf); + } + if (deleteSource) { + return srcFS.delete(src, true); + } else { + return true; + } + } + + private static Path checkDest(String srcName, FileSystem dstFS, Path dst, + boolean overwrite) throws IOException { + FileStatus sdst; + try { + sdst = dstFS.getFileStatus(dst); + } catch (FileNotFoundException e) { + sdst = null; + } + if (null != sdst) { + if (sdst.isDirectory()) { + if (null == srcName) { + throw new PathIsDirectoryException(dst.toString()); + } + return checkDest(null, dstFS, new Path(dst, srcName), overwrite); + } else if (!overwrite) { + throw new PathExistsException(dst.toString(), + "Target " + dst + " already exists"); + } + } + return dst; + } + + /** + * Convert a os-native filename to a path that works for the shell. + * @param filename The filename to convert + * @return The unix pathname + * @throws IOException on windows, there can be problems with the subprocess + */ + public static String makeShellPath(String filename) throws IOException { + return filename; + } + + /** + * Convert a os-native filename to a path that works for the shell. + * @param file The filename to convert + * @return The unix pathname + * @throws IOException on windows, there can be problems with the subprocess + */ + public static String makeShellPath(File file) throws IOException { + return makeShellPath(file, false); + } + + /** + * Convert a os-native filename to a path that works for the shell + * and avoids script injection attacks. + * @param file The filename to convert + * @return The unix pathname + * @throws IOException on windows, there can be problems with the subprocess + */ + public static String makeSecureShellPath(File file) throws IOException { + if (Shell.WINDOWS) { + // Currently it is never called, but it might be helpful in the future. + throw new UnsupportedOperationException("Not implemented for Windows"); + } else { + return makeShellPath(file, false).replace("'", "'\\''"); + } + } + + /** + * Convert a os-native filename to a path that works for the shell. + * @param file The filename to convert + * @param makeCanonicalPath + * Whether to make canonical path for the file passed + * @return The unix pathname + * @throws IOException on windows, there can be problems with the subprocess + */ + public static String makeShellPath(File file, boolean makeCanonicalPath) + throws IOException { + if (makeCanonicalPath) { + return makeShellPath(file.getCanonicalPath()); + } else { + return makeShellPath(file.toString()); + } + } + + /** + * Takes an input dir and returns the du on that local directory. Very basic + * implementation. + * + * @param dir + * The input dir to get the disk space of this local dir + * @return The total disk space of the input local directory + */ + public static long getDU(File dir) { + long size = 0; + if (!dir.exists()) + return 0; + if (!dir.isDirectory()) { + return dir.length(); + } else { + File[] allFiles = dir.listFiles(); + if(allFiles != null) { + for (int i = 0; i < allFiles.length; i++) { + boolean isSymLink; + try { + isSymLink = org.apache.commons.io.FileUtils.isSymlink(allFiles[i]); + } catch(IOException ioe) { + isSymLink = true; + } + if(!isSymLink) { + size += getDU(allFiles[i]); + } + } + } + return size; + } + } + + /** + * Given a stream input it will unzip the it in the unzip directory. + * passed as the second parameter + * @param inputStream The zip file as input + * @param toDir The unzip directory where to unzip the zip file. + * @throws IOException an exception occurred + */ + public static void unZip(InputStream inputStream, File toDir) + throws IOException { + try (ZipInputStream zip = new ZipInputStream(inputStream)) { + int numOfFailedLastModifiedSet = 0; + String targetDirPath = toDir.getCanonicalPath() + File.separator; + for(ZipEntry entry = zip.getNextEntry(); + entry != null; + entry = zip.getNextEntry()) { + if (!entry.isDirectory()) { + File file = new File(toDir, entry.getName()); + if (!file.getCanonicalPath().startsWith(targetDirPath)) { + throw new IOException("expanding " + entry.getName() + + " would create file outside of " + toDir); + } + File parent = file.getParentFile(); + if (!parent.mkdirs() && + !parent.isDirectory()) { + throw new IOException("Mkdirs failed to create " + + parent.getAbsolutePath()); + } + try (OutputStream out = new FileOutputStream(file)) { + IOUtils.copyBytes(zip, out, BUFFER_SIZE); + } + if (!file.setLastModified(entry.getTime())) { + numOfFailedLastModifiedSet++; + } + } + } + if (numOfFailedLastModifiedSet > 0) { + LOG.warn("Could not set last modfied time for {} file(s)", + numOfFailedLastModifiedSet); + } + } + } + + /** + * Given a File input it will unzip it in the unzip directory. + * passed as the second parameter + * @param inFile The zip file as input + * @param unzipDir The unzip directory where to unzip the zip file. + * @throws IOException An I/O exception has occurred + */ + public static void unZip(File inFile, File unzipDir) throws IOException { + Enumeration entries; + ZipFile zipFile = new ZipFile(inFile); + + try { + entries = zipFile.entries(); + String targetDirPath = unzipDir.getCanonicalPath() + File.separator; + while (entries.hasMoreElements()) { + ZipEntry entry = entries.nextElement(); + if (!entry.isDirectory()) { + InputStream in = zipFile.getInputStream(entry); + try { + File file = new File(unzipDir, entry.getName()); + if (!file.getCanonicalPath().startsWith(targetDirPath)) { + throw new IOException("expanding " + entry.getName() + + " would create file outside of " + unzipDir); + } + if (!file.getParentFile().mkdirs()) { + if (!file.getParentFile().isDirectory()) { + throw new IOException("Mkdirs failed to create " + + file.getParentFile().toString()); + } + } + OutputStream out = new FileOutputStream(file); + try { + byte[] buffer = new byte[8192]; + int i; + while ((i = in.read(buffer)) != -1) { + out.write(buffer, 0, i); + } + } finally { + out.close(); + } + } finally { + in.close(); + } + } + } + } finally { + zipFile.close(); + } + } + + /** + * Run a command and send the contents of an input stream to it. + * @param inputStream Input stream to forward to the shell command + * @param command shell command to run + * @throws IOException read or write failed + * @throws InterruptedException command interrupted + * @throws ExecutionException task submit failed + */ + private static void runCommandOnStream( + InputStream inputStream, String command) + throws IOException, InterruptedException, ExecutionException { + ExecutorService executor = null; + ProcessBuilder builder = new ProcessBuilder(); + builder.command( + Shell.WINDOWS ? "cmd" : "bash", + Shell.WINDOWS ? "/c" : "-c", + command); + Process process = builder.start(); + int exitCode; + try { + // Consume stdout and stderr, to avoid blocking the command + executor = Executors.newFixedThreadPool(2); + Future output = executor.submit(() -> { + try { + // Read until the output stream receives an EOF and closed. + if (LOG.isDebugEnabled()) { + // Log directly to avoid out of memory errors + try (BufferedReader reader = + new BufferedReader( + new InputStreamReader(process.getInputStream(), + Charset.forName("UTF-8")))) { + String line; + while((line = reader.readLine()) != null) { + LOG.debug(line); + } + } + } else { + org.apache.commons.io.IOUtils.copy( + process.getInputStream(), + new IOUtils.NullOutputStream()); + } + } catch (IOException e) { + LOG.debug(e.getMessage()); + } + }); + Future error = executor.submit(() -> { + try { + // Read until the error stream receives an EOF and closed. + if (LOG.isDebugEnabled()) { + // Log directly to avoid out of memory errors + try (BufferedReader reader = + new BufferedReader( + new InputStreamReader(process.getErrorStream(), + Charset.forName("UTF-8")))) { + String line; + while((line = reader.readLine()) != null) { + LOG.debug(line); + } + } + } else { + org.apache.commons.io.IOUtils.copy( + process.getErrorStream(), + new IOUtils.NullOutputStream()); + } + } catch (IOException e) { + LOG.debug(e.getMessage()); + } + }); + + // Pass the input stream to the command to process + try { + org.apache.commons.io.IOUtils.copy( + inputStream, process.getOutputStream()); + } finally { + process.getOutputStream().close(); + } + + // Wait for both stdout and stderr futures to finish + error.get(); + output.get(); + } finally { + // Clean up the threads + if (executor != null) { + executor.shutdown(); + } + // Wait to avoid leaking the child process + exitCode = process.waitFor(); + } + + if (exitCode != 0) { + throw new IOException( + String.format( + "Error executing command. %s " + + "Process exited with exit code %d.", + command, exitCode)); + } + } + + /** + * Given a Tar File as input it will untar the file in a the untar directory + * passed as the second parameter + * + * This utility will untar ".tar" files and ".tar.gz","tgz" files. + * + * @param inputStream The tar file as input. + * @param untarDir The untar directory where to untar the tar file. + * @param gzipped The input stream is gzipped + * TODO Use magic number and PusbackInputStream to identify + * @throws IOException an exception occurred + * @throws InterruptedException command interrupted + * @throws ExecutionException task submit failed + */ + public static void unTar(InputStream inputStream, File untarDir, + boolean gzipped) + throws IOException, InterruptedException, ExecutionException { + if (!untarDir.mkdirs()) { + if (!untarDir.isDirectory()) { + throw new IOException("Mkdirs failed to create " + untarDir); + } + } + + if(Shell.WINDOWS) { + // Tar is not native to Windows. Use simple Java based implementation for + // tests and simple tar archives + unTarUsingJava(inputStream, untarDir, gzipped); + } else { + // spawn tar utility to untar archive for full fledged unix behavior such + // as resolving symlinks in tar archives + unTarUsingTar(inputStream, untarDir, gzipped); + } + } + + /** + * Given a Tar File as input it will untar the file in a the untar directory + * passed as the second parameter + * + * This utility will untar ".tar" files and ".tar.gz","tgz" files. + * + * @param inFile The tar file as input. + * @param untarDir The untar directory where to untar the tar file. + * @throws IOException exception untarring + */ + public static void unTar(File inFile, File untarDir) throws IOException { + if (!untarDir.mkdirs()) { + if (!untarDir.isDirectory()) { + throw new IOException("Mkdirs failed to create " + untarDir); + } + } + + boolean gzipped = inFile.toString().endsWith("gz"); + if(Shell.WINDOWS) { + // Tar is not native to Windows. Use simple Java based implementation for + // tests and simple tar archives + unTarUsingJava(inFile, untarDir, gzipped); + } + else { + // spawn tar utility to untar archive for full fledged unix behavior such + // as resolving symlinks in tar archives + unTarUsingTar(inFile, untarDir, gzipped); + } + } + + private static void unTarUsingTar(InputStream inputStream, File untarDir, + boolean gzipped) + throws IOException, InterruptedException, ExecutionException { + StringBuilder untarCommand = new StringBuilder(); + if (gzipped) { + untarCommand.append("gzip -dc | ("); + } + untarCommand.append("cd '"); + untarCommand.append(FileUtil.makeSecureShellPath(untarDir)); + untarCommand.append("' && "); + untarCommand.append("tar -x "); + + if (gzipped) { + untarCommand.append(")"); + } + runCommandOnStream(inputStream, untarCommand.toString()); + } + + private static void unTarUsingTar(File inFile, File untarDir, + boolean gzipped) throws IOException { + StringBuffer untarCommand = new StringBuffer(); + if (gzipped) { + untarCommand.append(" gzip -dc '"); + untarCommand.append(FileUtil.makeSecureShellPath(inFile)); + untarCommand.append("' | ("); + } + untarCommand.append("cd '"); + untarCommand.append(FileUtil.makeSecureShellPath(untarDir)); + untarCommand.append("' && "); + untarCommand.append("tar -xf "); + + if (gzipped) { + untarCommand.append(" -)"); + } else { + untarCommand.append(FileUtil.makeSecureShellPath(inFile)); + } + String[] shellCmd = { "bash", "-c", untarCommand.toString() }; + ShellCommandExecutor shexec = new ShellCommandExecutor(shellCmd); + shexec.execute(); + int exitcode = shexec.getExitCode(); + if (exitcode != 0) { + throw new IOException("Error untarring file " + inFile + + ". Tar process exited with exit code " + exitcode); + } + } + + static void unTarUsingJava(File inFile, File untarDir, + boolean gzipped) throws IOException { + InputStream inputStream = null; + TarArchiveInputStream tis = null; + try { + if (gzipped) { + inputStream = new BufferedInputStream(new GZIPInputStream( + new FileInputStream(inFile))); + } else { + inputStream = new BufferedInputStream(new FileInputStream(inFile)); + } + + tis = new TarArchiveInputStream(inputStream); + + for (TarArchiveEntry entry = tis.getNextTarEntry(); entry != null;) { + unpackEntries(tis, entry, untarDir); + entry = tis.getNextTarEntry(); + } + } finally { + IOUtils.cleanupWithLogger(LOG, tis, inputStream); + } + } + + private static void unTarUsingJava(InputStream inputStream, File untarDir, + boolean gzipped) throws IOException { + TarArchiveInputStream tis = null; + try { + if (gzipped) { + inputStream = new BufferedInputStream(new GZIPInputStream( + inputStream)); + } else { + inputStream = + new BufferedInputStream(inputStream); + } + + tis = new TarArchiveInputStream(inputStream); + + for (TarArchiveEntry entry = tis.getNextTarEntry(); entry != null;) { + unpackEntries(tis, entry, untarDir); + entry = tis.getNextTarEntry(); + } + } finally { + IOUtils.cleanupWithLogger(LOG, tis, inputStream); + } + } + + private static void unpackEntries(TarArchiveInputStream tis, + TarArchiveEntry entry, File outputDir) throws IOException { + String targetDirPath = outputDir.getCanonicalPath() + File.separator; + File outputFile = new File(outputDir, entry.getName()); + if (!outputFile.getCanonicalPath().startsWith(targetDirPath)) { + throw new IOException("expanding " + entry.getName() + + " would create entry outside of " + outputDir); + } + + if (entry.isDirectory()) { + File subDir = new File(outputDir, entry.getName()); + if (!subDir.mkdirs() && !subDir.isDirectory()) { + throw new IOException("Mkdirs failed to create tar internal dir " + + outputDir); + } + + for (TarArchiveEntry e : entry.getDirectoryEntries()) { + unpackEntries(tis, e, subDir); + } + + return; + } + + if (entry.isSymbolicLink()) { + // Create symbolic link relative to tar parent dir + Files.createSymbolicLink(FileSystems.getDefault() + .getPath(outputDir.getPath(), entry.getName()), + FileSystems.getDefault().getPath(entry.getLinkName())); + return; + } + + if (!outputFile.getParentFile().exists()) { + if (!outputFile.getParentFile().mkdirs()) { + throw new IOException("Mkdirs failed to create tar internal dir " + + outputDir); + } + } + + if (entry.isLink()) { + File src = new File(outputDir, entry.getLinkName()); + HardLink.createHardLink(src, outputFile); + return; + } + + int count; + byte data[] = new byte[2048]; + try (BufferedOutputStream outputStream = new BufferedOutputStream( + new FileOutputStream(outputFile));) { + + while ((count = tis.read(data)) != -1) { + outputStream.write(data, 0, count); + } + + outputStream.flush(); + } + } + + /** + * Class for creating hardlinks. + * Supports Unix, WindXP. + * @deprecated Use {@link org.apache.hadoop.fs.HardLink} + */ + @Deprecated + public static class HardLink extends org.apache.hadoop.fs.HardLink { + // This is a stub to assist with coordinated change between + // COMMON and HDFS projects. It will be removed after the + // corresponding change is committed to HDFS. + } + + /** + * Create a soft link between a src and destination + * only on a local disk. HDFS does not support this. + * On Windows, when symlink creation fails due to security + * setting, we will log a warning. The return code in this + * case is 2. + * + * @param target the target for symlink + * @param linkname the symlink + * @return 0 on success + */ + public static int symLink(String target, String linkname) throws IOException{ + + if (target == null || linkname == null) { + LOG.warn("Can not create a symLink with a target = " + target + + " and link =" + linkname); + return 1; + } + + // Run the input paths through Java's File so that they are converted to the + // native OS form + File targetFile = new File( + Path.getPathWithoutSchemeAndAuthority(new Path(target)).toString()); + File linkFile = new File( + Path.getPathWithoutSchemeAndAuthority(new Path(linkname)).toString()); + + String[] cmd = Shell.getSymlinkCommand( + targetFile.toString(), + linkFile.toString()); + + ShellCommandExecutor shExec; + try { + if (Shell.WINDOWS && + linkFile.getParentFile() != null && + !new Path(target).isAbsolute()) { + // Relative links on Windows must be resolvable at the time of + // creation. To ensure this we run the shell command in the directory + // of the link. + // + shExec = new ShellCommandExecutor(cmd, linkFile.getParentFile()); + } else { + shExec = new ShellCommandExecutor(cmd); + } + shExec.execute(); + } catch (Shell.ExitCodeException ec) { + int returnVal = ec.getExitCode(); + if (Shell.WINDOWS && returnVal == SYMLINK_NO_PRIVILEGE) { + LOG.warn("Fail to create symbolic links on Windows. " + + "The default security settings in Windows disallow non-elevated " + + "administrators and all non-administrators from creating symbolic links. " + + "This behavior can be changed in the Local Security Policy management console"); + } else if (returnVal != 0) { + LOG.warn("Command '" + StringUtils.join(" ", cmd) + "' failed " + + returnVal + " with: " + ec.getMessage()); + } + return returnVal; + } catch (IOException e) { + if (LOG.isDebugEnabled()) { + LOG.debug("Error while create symlink " + linkname + " to " + target + + "." + " Exception: " + StringUtils.stringifyException(e)); + } + throw e; + } + return shExec.getExitCode(); + } + + /** + * Change the permissions on a filename. + * @param filename the name of the file to change + * @param perm the permission string + * @return the exit code from the command + * @throws IOException exception on chmod + */ + public static int chmod(String filename, String perm + ) throws IOException { + return chmod(filename, perm, false); + } + + /** + * Change the permissions on a file / directory, recursively, if + * needed. + * @param filename name of the file whose permissions are to change + * @param perm permission string + * @param recursive true, if permissions should be changed recursively + * @return the exit code from the command. + * @throws IOException exception on chmod + */ + public static int chmod(String filename, String perm, boolean recursive) + throws IOException { + String [] cmd = Shell.getSetPermissionCommand(perm, recursive); + String[] args = new String[cmd.length + 1]; + System.arraycopy(cmd, 0, args, 0, cmd.length); + args[cmd.length] = new File(filename).getPath(); + ShellCommandExecutor shExec = new ShellCommandExecutor(args); + try { + shExec.execute(); + }catch(IOException e) { + if(LOG.isDebugEnabled()) { + LOG.debug("Error while changing permission : " + filename + +" Exception: " + StringUtils.stringifyException(e)); + } + } + return shExec.getExitCode(); + } + + /** + * Set the ownership on a file / directory. User name and group name + * cannot both be null. + * @param file the file to change + * @param username the new user owner name + * @param groupname the new group owner name + * @throws IOException exception on setOwner + */ + public static void setOwner(File file, String username, + String groupname) throws IOException { + if (username == null && groupname == null) { + throw new IOException("username == null && groupname == null"); + } + String arg = (username == null ? "" : username) + + (groupname == null ? "" : ":" + groupname); + String [] cmd = Shell.getSetOwnerCommand(arg); + execCommand(file, cmd); + } + + /** + * Platform independent implementation for {@link File#setReadable(boolean)} + * File#setReadable does not work as expected on Windows. + * @param f input file + * @param readable set to readable or not + * @return true on success, false otherwise + */ + public static boolean setReadable(File f, boolean readable) { + if (Shell.WINDOWS) { + try { + String permission = readable ? "u+r" : "u-r"; + FileUtil.chmod(f.getCanonicalPath(), permission, false); + return true; + } catch (IOException ex) { + return false; + } + } else { + return f.setReadable(readable); + } + } + + /** + * Platform independent implementation for {@link File#setWritable(boolean)} + * File#setWritable does not work as expected on Windows. + * @param f input file + * @param writable set to writable or not + * @return true on success, false otherwise + */ + public static boolean setWritable(File f, boolean writable) { + if (Shell.WINDOWS) { + try { + String permission = writable ? "u+w" : "u-w"; + FileUtil.chmod(f.getCanonicalPath(), permission, false); + return true; + } catch (IOException ex) { + return false; + } + } else { + return f.setWritable(writable); + } + } + + /** + * Platform independent implementation for {@link File#setExecutable(boolean)} + * File#setExecutable does not work as expected on Windows. + * Note: revoking execute permission on folders does not have the same + * behavior on Windows as on Unix platforms. Creating, deleting or renaming + * a file within that folder will still succeed on Windows. + * @param f input file + * @param executable set to executable or not + * @return true on success, false otherwise + */ + public static boolean setExecutable(File f, boolean executable) { + if (Shell.WINDOWS) { + try { + String permission = executable ? "u+x" : "u-x"; + FileUtil.chmod(f.getCanonicalPath(), permission, false); + return true; + } catch (IOException ex) { + return false; + } + } else { + return f.setExecutable(executable); + } + } + + /** + * Platform independent implementation for {@link File#canRead()} + * @param f input file + * @return On Unix, same as {@link File#canRead()} + * On Windows, true if process has read access on the path + */ + public static boolean canRead(File f) { + if (Shell.WINDOWS) { + try { + return NativeIO.Windows.access(f.getCanonicalPath(), + NativeIO.Windows.AccessRight.ACCESS_READ); + } catch (IOException e) { + return false; + } + } else { + return f.canRead(); + } + } + + /** + * Platform independent implementation for {@link File#canWrite()} + * @param f input file + * @return On Unix, same as {@link File#canWrite()} + * On Windows, true if process has write access on the path + */ + public static boolean canWrite(File f) { + if (Shell.WINDOWS) { + try { + return NativeIO.Windows.access(f.getCanonicalPath(), + NativeIO.Windows.AccessRight.ACCESS_WRITE); + } catch (IOException e) { + return false; + } + } else { + return f.canWrite(); + } + } + + /** + * Platform independent implementation for {@link File#canExecute()} + * @param f input file + * @return On Unix, same as {@link File#canExecute()} + * On Windows, true if process has execute access on the path + */ + public static boolean canExecute(File f) { + if (Shell.WINDOWS) { + try { + return NativeIO.Windows.access(f.getCanonicalPath(), + NativeIO.Windows.AccessRight.ACCESS_EXECUTE); + } catch (IOException e) { + return false; + } + } else { + try { + return f.canExecute(); + } catch (SecurityException e) { + return false; + } + } + } + + /** + * Set permissions to the required value. Uses the java primitives instead + * of forking if group == other. + * @param f the file to change + * @param permission the new permissions + * @throws IOException exception on setPermission + */ + public static void setPermission(File f, FsPermission permission + ) throws IOException { + FsAction user = permission.getUserAction(); + FsAction group = permission.getGroupAction(); + FsAction other = permission.getOtherAction(); + + // use the native/fork if the group/other permissions are different + // or if the native is available or on Windows + if (group != other || NativeIO.isAvailable() || Shell.WINDOWS) { + execSetPermission(f, permission); + return; + } + + boolean rv = true; + + // read perms + rv = f.setReadable(group.implies(FsAction.READ), false); + checkReturnValue(rv, f, permission); + if (group.implies(FsAction.READ) != user.implies(FsAction.READ)) { + rv = f.setReadable(user.implies(FsAction.READ), true); + checkReturnValue(rv, f, permission); + } + + // write perms + rv = f.setWritable(group.implies(FsAction.WRITE), false); + checkReturnValue(rv, f, permission); + if (group.implies(FsAction.WRITE) != user.implies(FsAction.WRITE)) { + rv = f.setWritable(user.implies(FsAction.WRITE), true); + checkReturnValue(rv, f, permission); + } + + // exec perms + rv = f.setExecutable(group.implies(FsAction.EXECUTE), false); + checkReturnValue(rv, f, permission); + if (group.implies(FsAction.EXECUTE) != user.implies(FsAction.EXECUTE)) { + rv = f.setExecutable(user.implies(FsAction.EXECUTE), true); + checkReturnValue(rv, f, permission); + } + } + + private static void checkReturnValue(boolean rv, File p, + FsPermission permission + ) throws IOException { + if (!rv) { + throw new IOException("Failed to set permissions of path: " + p + + " to " + + String.format("%04o", permission.toShort())); + } + } + + private static void execSetPermission(File f, + FsPermission permission + ) throws IOException { + if (NativeIO.isAvailable()) { + NativeIO.POSIX.chmod(f.getCanonicalPath(), permission.toShort()); + } else { + execCommand(f, Shell.getSetPermissionCommand( + String.format("%04o", permission.toShort()), false)); + } + } + + static String execCommand(File f, String... cmd) throws IOException { + String[] args = new String[cmd.length + 1]; + System.arraycopy(cmd, 0, args, 0, cmd.length); + args[cmd.length] = f.getCanonicalPath(); + return Shell.execCommand(args); + } + + /** + * Create a tmp file for a base file. + * @param basefile the base file of the tmp + * @param prefix file name prefix of tmp + * @param isDeleteOnExit if true, the tmp will be deleted when the VM exits + * @return a newly created tmp file + * @exception IOException If a tmp file cannot created + * @see java.io.File#createTempFile(String, String, File) + * @see java.io.File#deleteOnExit() + */ + public static File createLocalTempFile(final File basefile, + final String prefix, + final boolean isDeleteOnExit) + throws IOException { + File tmp = File.createTempFile(prefix + basefile.getName(), + "", basefile.getParentFile()); + if (isDeleteOnExit) { + tmp.deleteOnExit(); + } + return tmp; + } + + /** + * Move the src file to the name specified by target. + * @param src the source file + * @param target the target file + * @exception IOException If this operation fails + */ + public static void replaceFile(File src, File target) throws IOException { + /* renameTo() has two limitations on Windows platform. + * src.renameTo(target) fails if + * 1) If target already exists OR + * 2) If target is already open for reading/writing. + */ + if (!src.renameTo(target)) { + int retries = 5; + while (target.exists() && !target.delete() && retries-- >= 0) { + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + throw new IOException("replaceFile interrupted."); + } + } + if (!src.renameTo(target)) { + throw new IOException("Unable to rename " + src + + " to " + target); + } + } + } + + /** + * A wrapper for {@link File#listFiles()}. This java.io API returns null + * when a dir is not a directory or for any I/O error. Instead of having + * null check everywhere File#listFiles() is used, we will add utility API + * to get around this problem. For the majority of cases where we prefer + * an IOException to be thrown. + * @param dir directory for which listing should be performed + * @return list of files or empty list + * @exception IOException for invalid directory or for a bad disk. + */ + public static File[] listFiles(File dir) throws IOException { + File[] files = dir.listFiles(); + if(files == null) { + throw new IOException("Invalid directory or I/O error occurred for dir: " + + dir.toString()); + } + return files; + } + + /** + * A wrapper for {@link File#list()}. This java.io API returns null + * when a dir is not a directory or for any I/O error. Instead of having + * null check everywhere File#list() is used, we will add utility API + * to get around this problem. For the majority of cases where we prefer + * an IOException to be thrown. + * @param dir directory for which listing should be performed + * @return list of file names or empty string list + * @exception AccessDeniedException for unreadable directory + * @exception IOException for invalid directory or for bad disk + */ + public static String[] list(File dir) throws IOException { + if (!canRead(dir)) { + throw new AccessDeniedException(dir.toString(), null, + FSExceptionMessages.PERMISSION_DENIED); + } + String[] fileNames = dir.list(); + if(fileNames == null) { + throw new IOException("Invalid directory or I/O error occurred for dir: " + + dir.toString()); + } + return fileNames; + } + + public static String[] createJarWithClassPath(String inputClassPath, Path pwd, + Map callerEnv) throws IOException { + return createJarWithClassPath(inputClassPath, pwd, pwd, callerEnv); + } + + /** + * Create a jar file at the given path, containing a manifest with a classpath + * that references all specified entries. + * + * Some platforms may have an upper limit on command line length. For example, + * the maximum command line length on Windows is 8191 characters, but the + * length of the classpath may exceed this. To work around this limitation, + * use this method to create a small intermediate jar with a manifest that + * contains the full classpath. It returns the absolute path to the new jar, + * which the caller may set as the classpath for a new process. + * + * Environment variable evaluation is not supported within a jar manifest, so + * this method expands environment variables before inserting classpath entries + * to the manifest. The method parses environment variables according to + * platform-specific syntax (%VAR% on Windows, or $VAR otherwise). On Windows, + * environment variables are case-insensitive. For example, %VAR% and %var% + * evaluate to the same value. + * + * Specifying the classpath in a jar manifest does not support wildcards, so + * this method expands wildcards internally. Any classpath entry that ends + * with * is translated to all files at that path with extension .jar or .JAR. + * + * @param inputClassPath String input classpath to bundle into the jar manifest + * @param pwd Path to working directory to save jar + * @param targetDir path to where the jar execution will have its working dir + * @param callerEnv Map<String, String> caller's environment variables to use + * for expansion + * @return String[] with absolute path to new jar in position 0 and + * unexpanded wild card entry path in position 1 + * @throws IOException if there is an I/O error while writing the jar file + */ + public static String[] createJarWithClassPath(String inputClassPath, Path pwd, + Path targetDir, + Map callerEnv) throws IOException { + // Replace environment variables, case-insensitive on Windows + @SuppressWarnings("unchecked") + Map env = Shell.WINDOWS ? new CaseInsensitiveMap(callerEnv) : + callerEnv; + String[] classPathEntries = inputClassPath.split(File.pathSeparator); + for (int i = 0; i < classPathEntries.length; ++i) { + classPathEntries[i] = StringUtils.replaceTokens(classPathEntries[i], + StringUtils.ENV_VAR_PATTERN, env); + } + File workingDir = new File(pwd.toString()); + if (!workingDir.mkdirs()) { + // If mkdirs returns false because the working directory already exists, + // then this is acceptable. If it returns false due to some other I/O + // error, then this method will fail later with an IOException while saving + // the jar. + LOG.debug("mkdirs false for " + workingDir + ", execution will continue"); + } + + StringBuilder unexpandedWildcardClasspath = new StringBuilder(); + // Append all entries + List classPathEntryList = new ArrayList( + classPathEntries.length); + for (String classPathEntry: classPathEntries) { + if (classPathEntry.length() == 0) { + continue; + } + if (classPathEntry.endsWith("*")) { + // Append all jars that match the wildcard + List jars = getJarsInDirectory(classPathEntry); + if (!jars.isEmpty()) { + for (Path jar: jars) { + classPathEntryList.add(jar.toUri().toURL().toExternalForm()); + } + } else { + unexpandedWildcardClasspath.append(File.pathSeparator); + unexpandedWildcardClasspath.append(classPathEntry); + } + } else { + // Append just this entry + File fileCpEntry = null; + if(!new Path(classPathEntry).isAbsolute()) { + fileCpEntry = new File(targetDir.toString(), classPathEntry); + } + else { + fileCpEntry = new File(classPathEntry); + } + String classPathEntryUrl = fileCpEntry.toURI().toURL() + .toExternalForm(); + + // File.toURI only appends trailing '/' if it can determine that it is a + // directory that already exists. (See JavaDocs.) If this entry had a + // trailing '/' specified by the caller, then guarantee that the + // classpath entry in the manifest has a trailing '/', and thus refers to + // a directory instead of a file. This can happen if the caller is + // creating a classpath jar referencing a directory that hasn't been + // created yet, but will definitely be created before running. + if (classPathEntry.endsWith(Path.SEPARATOR) && + !classPathEntryUrl.endsWith(Path.SEPARATOR)) { + classPathEntryUrl = classPathEntryUrl + Path.SEPARATOR; + } + classPathEntryList.add(classPathEntryUrl); + } + } + String jarClassPath = StringUtils.join(" ", classPathEntryList); + + // Create the manifest + Manifest jarManifest = new Manifest(); + jarManifest.getMainAttributes().putValue( + Attributes.Name.MANIFEST_VERSION.toString(), "1.0"); + jarManifest.getMainAttributes().putValue( + Attributes.Name.CLASS_PATH.toString(), jarClassPath); + + // Write the manifest to output JAR file + File classPathJar = File.createTempFile("classpath-", ".jar", workingDir); + try (FileOutputStream fos = new FileOutputStream(classPathJar); + BufferedOutputStream bos = new BufferedOutputStream(fos)) { + JarOutputStream jos = new JarOutputStream(bos, jarManifest); + jos.close(); + } + String[] jarCp = {classPathJar.getCanonicalPath(), + unexpandedWildcardClasspath.toString()}; + return jarCp; + } + + /** + * Returns all jars that are in the directory. It is useful in expanding a + * wildcard path to return all jars from the directory to use in a classpath. + * It operates only on local paths. + * + * @param path the path to the directory. The path may include the wildcard. + * @return the list of jars as URLs, or an empty list if there are no jars, or + * the directory does not exist locally + */ + public static List getJarsInDirectory(String path) { + return getJarsInDirectory(path, true); + } + + /** + * Returns all jars that are in the directory. It is useful in expanding a + * wildcard path to return all jars from the directory to use in a classpath. + * + * @param path the path to the directory. The path may include the wildcard. + * @return the list of jars as URLs, or an empty list if there are no jars, or + * the directory does not exist + */ + public static List getJarsInDirectory(String path, boolean useLocal) { + List paths = new ArrayList<>(); + try { + // add the wildcard if it is not provided + if (!path.endsWith("*")) { + path += File.separator + "*"; + } + Path globPath = new Path(path).suffix("{.jar,.JAR}"); + FileContext context = useLocal ? + FileContext.getLocalFSFileContext() : + FileContext.getFileContext(globPath.toUri()); + FileStatus[] files = context.util().globStatus(globPath); + if (files != null) { + for (FileStatus file: files) { + paths.add(file.getPath()); + } + } + } catch (IOException ignore) {} // return the empty list + return paths; + } + + public static boolean compareFs(FileSystem srcFs, FileSystem destFs) { + if (srcFs==null || destFs==null) { + return false; + } + URI srcUri = srcFs.getUri(); + URI dstUri = destFs.getUri(); + if (srcUri.getScheme()==null) { + return false; + } + if (!srcUri.getScheme().equals(dstUri.getScheme())) { + return false; + } + String srcHost = srcUri.getHost(); + String dstHost = dstUri.getHost(); + if ((srcHost!=null) && (dstHost!=null)) { + if (srcHost.equals(dstHost)) { + return srcUri.getPort()==dstUri.getPort(); + } + try { + srcHost = InetAddress.getByName(srcHost).getCanonicalHostName(); + dstHost = InetAddress.getByName(dstHost).getCanonicalHostName(); + } catch (UnknownHostException ue) { + if (LOG.isDebugEnabled()) { + LOG.debug("Could not compare file-systems. Unknown host: ", ue); + } + return false; + } + if (!srcHost.equals(dstHost)) { + return false; + } + } else if (srcHost==null && dstHost!=null) { + return false; + } else if (srcHost!=null) { + return false; + } + // check for ports + return srcUri.getPort()==dstUri.getPort(); + } +} diff --git a/solr/core/src/test/org/apache/hadoop/fs/HardLink.java b/solr/core/src/test/org/apache/hadoop/fs/HardLink.java new file mode 100644 index 00000000000..f3a173e9336 --- /dev/null +++ b/solr/core/src/test/org/apache/hadoop/fs/HardLink.java @@ -0,0 +1,182 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.file.Files; + +import static java.nio.file.Files.createLink; + +/** + * Class for creating hardlinks. + * Supports Unix/Linux, Windows via winutils , and Mac OS X. + * + * The HardLink class was formerly a static inner class of FSUtil, + * and the methods provided were blatantly non-thread-safe. + * To enable volume-parallel Update snapshots, we now provide static + * threadsafe methods that allocate new buffer string arrays + * upon each call. We also provide an API to hardlink all files in a + * directory with a single command, which is up to 128 times more + * efficient - and minimizes the impact of the extra buffer creations. + */ +public class HardLink { + + public final LinkStats linkStats; //not static + + public HardLink() { + linkStats = new LinkStats(); + } + + /** + * This abstract class bridges the OS-dependent implementations of the + * needed functionality for querying link counts. + * The particular implementation class is chosen during + * static initialization phase of the HardLink class. + * The "getter" methods construct shell command strings. + */ + private static abstract class HardLinkCommandGetter { + /** + * Get the command string to query the hardlink count of a file + */ + abstract String[] linkCount(File file) throws IOException; + } + + /* + * **************************************************** + * Complexity is above. User-visible functionality is below + * **************************************************** + */ + + /** + * Creates a hardlink + * @param file - existing source file + * @param linkName - desired target link file + */ + public static void createHardLink(File file, File linkName) + throws IOException { + if (file == null) { + throw new IOException( + "invalid arguments to createHardLink: source file is null"); + } + if (linkName == null) { + throw new IOException( + "invalid arguments to createHardLink: link name is null"); + } + createLink(linkName.toPath(), file.toPath()); + } + + /** + * Creates hardlinks from multiple existing files within one parent + * directory, into one target directory. + * @param parentDir - directory containing source files + * @param fileBaseNames - list of path-less file names, as returned by + * parentDir.list() + * @param linkDir - where the hardlinks should be put. It must already exist. + */ + public static void createHardLinkMult(File parentDir, String[] fileBaseNames, + File linkDir) throws IOException { + if (parentDir == null) { + throw new IOException( + "invalid arguments to createHardLinkMult: parent directory is null"); + } + if (linkDir == null) { + throw new IOException( + "invalid arguments to createHardLinkMult: link directory is null"); + } + if (fileBaseNames == null) { + throw new IOException( + "invalid arguments to createHardLinkMult: " + + "filename list can be empty but not null"); + } + if (!linkDir.exists()) { + throw new FileNotFoundException(linkDir + " not found."); + } + for (String name : fileBaseNames) { + createLink(linkDir.toPath().resolve(name), + parentDir.toPath().resolve(name)); + } + } + + /** + * Retrieves the number of links to the specified file. + */ + public static int getLinkCount(File fileName) throws IOException { + if (fileName == null) { + throw new IOException( + "invalid argument to getLinkCount: file name is null"); + } + if (!fileName.exists()) { + throw new FileNotFoundException(fileName + " not found."); + } + + return (Integer)Files.getAttribute(fileName.toPath(), "unix:nlink"); + } + + /* Create an IOException for failing to get link count. */ + private static IOException createIOException(File f, String message, + String error, int exitvalue, Exception cause) { + + final String s = "Failed to get link count on file " + f + + ": message=" + message + + "; error=" + error + + "; exit value=" + exitvalue; + return (cause == null) ? new IOException(s) : new IOException(s, cause); + } + + + /** + * HardLink statistics counters and methods. + * Not multi-thread safe, obviously. + * Init is called during HardLink instantiation, above. + * + * These are intended for use by knowledgeable clients, not internally, + * because many of the internal methods are static and can't update these + * per-instance counters. + */ + public static class LinkStats { + public int countDirs = 0; + public int countSingleLinks = 0; + public int countMultLinks = 0; + public int countFilesMultLinks = 0; + public int countEmptyDirs = 0; + public int countPhysicalFileCopies = 0; + + public void clear() { + countDirs = 0; + countSingleLinks = 0; + countMultLinks = 0; + countFilesMultLinks = 0; + countEmptyDirs = 0; + countPhysicalFileCopies = 0; + } + + public String report() { + return "HardLinkStats: " + countDirs + " Directories, including " + + countEmptyDirs + " Empty Directories, " + + countSingleLinks + + " single Link operations, " + countMultLinks + + " multi-Link operations, linking " + countFilesMultLinks + + " files, total " + (countSingleLinks + countFilesMultLinks) + + " linkable files. Also physically copied " + + countPhysicalFileCopies + " other files."; + } + } +} diff --git a/solr/core/src/test/org/apache/hadoop/fs/RawLocalFileSystem.java b/solr/core/src/test/org/apache/hadoop/fs/RawLocalFileSystem.java new file mode 100644 index 00000000000..4ee69b9cdbc --- /dev/null +++ b/solr/core/src/test/org/apache/hadoop/fs/RawLocalFileSystem.java @@ -0,0 +1,1044 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +import com.google.common.annotations.VisibleForTesting; + +import java.io.BufferedOutputStream; +import java.io.DataOutput; +import java.io.EOFException; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.io.FileDescriptor; +import java.net.URI; +import java.nio.ByteBuffer; +import java.nio.file.Files; +import java.nio.file.LinkOption; +import java.nio.file.NoSuchFileException; +import java.nio.file.Paths; +import java.nio.file.attribute.BasicFileAttributes; +import java.nio.file.attribute.BasicFileAttributeView; +import java.nio.file.attribute.FileTime; +import java.nio.file.attribute.PosixFileAttributes; +import java.nio.file.attribute.PosixFilePermissions; +import java.util.Arrays; +import java.util.EnumSet; +import java.util.Optional; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.io.nativeio.NativeIO; +import org.apache.hadoop.util.Progressable; +import org.apache.hadoop.util.Shell; +import org.apache.hadoop.util.StringUtils; + +/**************************************************************** + * Implement the FileSystem API for the raw local filesystem. + * + *****************************************************************/ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class RawLocalFileSystem extends FileSystem { + static final URI NAME = URI.create("file:///"); + private Path workingDir; + // Temporary workaround for HADOOP-9652. + private static boolean useDeprecatedFileStatus = true; + + @VisibleForTesting + public static void useStatIfAvailable() { + useDeprecatedFileStatus = !Stat.isAvailable(); + } + + public RawLocalFileSystem() { + workingDir = getInitialWorkingDirectory(); + } + + private Path makeAbsolute(Path f) { + if (f.isAbsolute()) { + return f; + } else { + return new Path(workingDir, f); + } + } + + /** Convert a path to a File. */ + public File pathToFile(Path path) { + checkPath(path); + if (!path.isAbsolute()) { + path = new Path(getWorkingDirectory(), path); + } + return new File(path.toUri().getPath()); + } + + @Override + public URI getUri() { return NAME; } + + @Override + public void initialize(URI uri, Configuration conf) throws IOException { + super.initialize(uri, conf); + setConf(conf); + } + + /******************************************************* + * For open()'s FSInputStream. + *******************************************************/ + class LocalFSFileInputStream extends FSInputStream implements HasFileDescriptor { + private FileInputStream fis; + private long position; + + public LocalFSFileInputStream(Path f) throws IOException { + fis = new FileInputStream(pathToFile(f)); + } + + @Override + public void seek(long pos) throws IOException { + if (pos < 0) { + throw new EOFException( + FSExceptionMessages.NEGATIVE_SEEK); + } + fis.getChannel().position(pos); + this.position = pos; + } + + @Override + public long getPos() throws IOException { + return this.position; + } + + @Override + public boolean seekToNewSource(long targetPos) throws IOException { + return false; + } + + /* + * Just forward to the fis + */ + @Override + public int available() throws IOException { return fis.available(); } + @Override + public void close() throws IOException { fis.close(); } + @Override + public boolean markSupported() { return false; } + + @Override + public int read() throws IOException { + try { + int value = fis.read(); + if (value >= 0) { + this.position++; + statistics.incrementBytesRead(1); + } + return value; + } catch (IOException e) { // unexpected exception + throw new FSError(e); // assume native fs error + } + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + // parameter check + validatePositionedReadArgs(position, b, off, len); + try { + int value = fis.read(b, off, len); + if (value > 0) { + this.position += value; + statistics.incrementBytesRead(value); + } + return value; + } catch (IOException e) { // unexpected exception + throw new FSError(e); // assume native fs error + } + } + + @Override + public int read(long position, byte[] b, int off, int len) + throws IOException { + // parameter check + validatePositionedReadArgs(position, b, off, len); + if (len == 0) { + return 0; + } + + ByteBuffer bb = ByteBuffer.wrap(b, off, len); + try { + int value = fis.getChannel().read(bb, position); + if (value > 0) { + statistics.incrementBytesRead(value); + } + return value; + } catch (IOException e) { + throw new FSError(e); + } + } + + @Override + public long skip(long n) throws IOException { + long value = fis.skip(n); + if (value > 0) { + this.position += value; + } + return value; + } + + @Override + public FileDescriptor getFileDescriptor() throws IOException { + return fis.getFD(); + } + } + + @Override + public FSDataInputStream open(Path f, int bufferSize) throws IOException { + getFileStatus(f); + return new FSDataInputStream(new BufferedFSInputStream( + new LocalFSFileInputStream(f), bufferSize)); + } + + @Override + public FSDataInputStream open(PathHandle fd, int bufferSize) + throws IOException { + if (!(fd instanceof LocalFileSystemPathHandle)) { + fd = new LocalFileSystemPathHandle(fd.bytes()); + } + LocalFileSystemPathHandle id = (LocalFileSystemPathHandle) fd; + id.verify(getFileStatus(new Path(id.getPath()))); + return new FSDataInputStream(new BufferedFSInputStream( + new LocalFSFileInputStream(new Path(id.getPath())), bufferSize)); + } + + /********************************************************* + * For create()'s FSOutputStream. + *********************************************************/ + class LocalFSFileOutputStream extends OutputStream { + private FileOutputStream fos; + + private LocalFSFileOutputStream(Path f, boolean append, + FsPermission permission) throws IOException { + File file = pathToFile(f); + if (!append && permission == null) { + permission = FsPermission.getFileDefault(); + } + if (permission == null) { + this.fos = new FileOutputStream(file, append); + } else { + permission = permission.applyUMask(FsPermission.getUMask(getConf())); + if (Shell.WINDOWS && NativeIO.isAvailable()) { + this.fos = NativeIO.Windows.createFileOutputStreamWithMode(file, + append, permission.toShort()); + } else { + this.fos = new FileOutputStream(file, append); + boolean success = false; + try { + setPermission(f, permission); + success = true; + } finally { + if (!success) { + IOUtils.cleanup(LOG, this.fos); + } + } + } + } + } + + /* + * Just forward to the fos + */ + @Override + public void close() throws IOException { fos.close(); } + @Override + public void flush() throws IOException { fos.flush(); } + @Override + public void write(byte[] b, int off, int len) throws IOException { + try { + fos.write(b, off, len); + } catch (IOException e) { // unexpected exception + throw new FSError(e); // assume native fs error + } + } + + @Override + public void write(int b) throws IOException { + try { + fos.write(b); + } catch (IOException e) { // unexpected exception + throw new FSError(e); // assume native fs error + } + } + } + + @Override + public FSDataOutputStream append(Path f, int bufferSize, + Progressable progress) throws IOException { + FileStatus status = getFileStatus(f); + if (status.isDirectory()) { + throw new IOException("Cannot append to a diretory (=" + f + " )"); + } + return new FSDataOutputStream(new BufferedOutputStream( + createOutputStreamWithMode(f, true, null), bufferSize), statistics, + status.getLen()); + } + + @Override + public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize, + short replication, long blockSize, Progressable progress) + throws IOException { + return create(f, overwrite, true, bufferSize, replication, blockSize, + progress, null); + } + + private FSDataOutputStream create(Path f, boolean overwrite, + boolean createParent, int bufferSize, short replication, long blockSize, + Progressable progress, FsPermission permission) throws IOException { + if (exists(f) && !overwrite) { + throw new FileAlreadyExistsException("File already exists: " + f); + } + Path parent = f.getParent(); + if (parent != null && !mkdirs(parent)) { + throw new IOException("Mkdirs failed to create " + parent.toString()); + } + return new FSDataOutputStream(new BufferedOutputStream( + createOutputStreamWithMode(f, false, permission), bufferSize), + statistics); + } + + protected OutputStream createOutputStream(Path f, boolean append) + throws IOException { + return createOutputStreamWithMode(f, append, null); + } + + protected OutputStream createOutputStreamWithMode(Path f, boolean append, + FsPermission permission) throws IOException { + return new LocalFSFileOutputStream(f, append, permission); + } + + @Override + public FSDataOutputStream createNonRecursive(Path f, FsPermission permission, + EnumSet flags, int bufferSize, short replication, long blockSize, + Progressable progress) throws IOException { + if (exists(f) && !flags.contains(CreateFlag.OVERWRITE)) { + throw new FileAlreadyExistsException("File already exists: " + f); + } + return new FSDataOutputStream(new BufferedOutputStream( + createOutputStreamWithMode(f, false, permission), bufferSize), + statistics); + } + + @Override + public FSDataOutputStream create(Path f, FsPermission permission, + boolean overwrite, int bufferSize, short replication, long blockSize, + Progressable progress) throws IOException { + + FSDataOutputStream out = create(f, overwrite, true, bufferSize, replication, + blockSize, progress, permission); + return out; + } + + @Override + public FSDataOutputStream createNonRecursive(Path f, FsPermission permission, + boolean overwrite, + int bufferSize, short replication, long blockSize, + Progressable progress) throws IOException { + FSDataOutputStream out = create(f, overwrite, false, bufferSize, replication, + blockSize, progress, permission); + return out; + } + + @Override + public void concat(final Path trg, final Path [] psrcs) throws IOException { + final int bufferSize = 4096; + try(FSDataOutputStream out = create(trg)) { + for (Path src : psrcs) { + try(FSDataInputStream in = open(src)) { + IOUtils.copyBytes(in, out, bufferSize, false); + } + } + } + } + + @Override + public boolean rename(Path src, Path dst) throws IOException { + // Attempt rename using Java API. + File srcFile = pathToFile(src); + File dstFile = pathToFile(dst); + if (srcFile.renameTo(dstFile)) { + return true; + } + + // Else try POSIX style rename on Windows only + if (Shell.WINDOWS && + handleEmptyDstDirectoryOnWindows(src, srcFile, dst, dstFile)) { + return true; + } + + // The fallback behavior accomplishes the rename by a full copy. + if (LOG.isDebugEnabled()) { + LOG.debug("Falling through to a copy of " + src + " to " + dst); + } + return FileUtil.copy(this, src, this, dst, true, getConf()); + } + + @VisibleForTesting + public final boolean handleEmptyDstDirectoryOnWindows(Path src, File srcFile, + Path dst, File dstFile) throws IOException { + + // Enforce POSIX rename behavior that a source directory replaces an + // existing destination if the destination is an empty directory. On most + // platforms, this is already handled by the Java API call above. Some + // platforms (notably Windows) do not provide this behavior, so the Java API + // call renameTo(dstFile) fails. Delete destination and attempt rename + // again. + try { + FileStatus sdst = this.getFileStatus(dst); + String[] dstFileList = dstFile.list(); + if (dstFileList != null) { + if (sdst.isDirectory() && dstFileList.length == 0) { + if (LOG.isDebugEnabled()) { + LOG.debug("Deleting empty destination and renaming " + src + + " to " + dst); + } + if (this.delete(dst, false) && srcFile.renameTo(dstFile)) { + return true; + } + } + } + } catch (FileNotFoundException ignored) { + } + return false; + } + + @Override + public boolean truncate(Path f, final long newLength) throws IOException { + FileStatus status = getFileStatus(f); + if(status == null) { + throw new FileNotFoundException("File " + f + " not found"); + } + if(status.isDirectory()) { + throw new IOException("Cannot truncate a directory (=" + f + ")"); + } + long oldLength = status.getLen(); + if(newLength > oldLength) { + throw new IllegalArgumentException( + "Cannot truncate to a larger file size. Current size: " + oldLength + + ", truncate size: " + newLength + "."); + } + try (FileOutputStream out = new FileOutputStream(pathToFile(f), true)) { + try { + out.getChannel().truncate(newLength); + } catch(IOException e) { + throw new FSError(e); + } + } + return true; + } + + /** + * Delete the given path to a file or directory. + * @param p the path to delete + * @param recursive to delete sub-directories + * @return true if the file or directory and all its contents were deleted + * @throws IOException if p is non-empty and recursive is false + */ + @Override + public boolean delete(Path p, boolean recursive) throws IOException { + File f = pathToFile(p); + if (!f.exists()) { + //no path, return false "nothing to delete" + return false; + } + if (f.isFile()) { + return f.delete(); + } else if (!recursive && f.isDirectory() && + (FileUtil.listFiles(f).length != 0)) { + throw new IOException("Directory " + f.toString() + " is not empty"); + } + return FileUtil.fullyDelete(f); + } + + /** + * {@inheritDoc} + * + * (Note: Returned list is not sorted in any given order, + * due to reliance on Java's {@link File#list()} API.) + */ + @Override + public FileStatus[] listStatus(Path f) throws IOException { + File localf = pathToFile(f); + FileStatus[] results; + + if (!localf.exists()) { + throw new FileNotFoundException("File " + f + " does not exist"); + } + + if (localf.isDirectory()) { + String[] names = FileUtil.list(localf); + results = new FileStatus[names.length]; + int j = 0; + for (int i = 0; i < names.length; i++) { + try { + // Assemble the path using the Path 3 arg constructor to make sure + // paths with colon are properly resolved on Linux + results[j] = getFileStatus(new Path(f, new Path(null, null, + names[i]))); + j++; + } catch (FileNotFoundException e) { + // ignore the files not found since the dir list may have have + // changed since the names[] list was generated. + } + } + if (j == names.length) { + return results; + } + return Arrays.copyOf(results, j); + } + + if (!useDeprecatedFileStatus) { + return new FileStatus[] { getFileStatus(f) }; + } + return new FileStatus[] { + new DeprecatedRawLocalFileStatus(localf, + getDefaultBlockSize(f), this) }; + } + + protected boolean mkOneDir(File p2f) throws IOException { + return mkOneDirWithMode(new Path(p2f.getAbsolutePath()), p2f, null); + } + + protected boolean mkOneDirWithMode(Path p, File p2f, FsPermission permission) + throws IOException { + if (permission == null) { + permission = FsPermission.getDirDefault(); + } + permission = permission.applyUMask(FsPermission.getUMask(getConf())); + if (Shell.WINDOWS && NativeIO.isAvailable()) { + try { + NativeIO.Windows.createDirectoryWithMode(p2f, permission.toShort()); + return true; + } catch (IOException e) { + if (LOG.isDebugEnabled()) { + LOG.debug(String.format( + "NativeIO.createDirectoryWithMode error, path = %s, mode = %o", + p2f, permission.toShort()), e); + } + return false; + } + } else { + boolean b = p2f.mkdir(); + if (b) { + setPermission(p, permission); + } + return b; + } + } + + /** + * Creates the specified directory hierarchy. Does not + * treat existence as an error. + */ + @Override + public boolean mkdirs(Path f) throws IOException { + return mkdirsWithOptionalPermission(f, null); + } + + @Override + public boolean mkdirs(Path f, FsPermission permission) throws IOException { + return mkdirsWithOptionalPermission(f, permission); + } + + private boolean mkdirsWithOptionalPermission(Path f, FsPermission permission) + throws IOException { + if(f == null) { + throw new IllegalArgumentException("mkdirs path arg is null"); + } + Path parent = f.getParent(); + File p2f = pathToFile(f); + File parent2f = null; + if(parent != null) { + parent2f = pathToFile(parent); + if(parent2f != null && parent2f.exists() && !parent2f.isDirectory()) { + throw new ParentNotDirectoryException("Parent path is not a directory: " + + parent); + } + } + if (p2f.exists() && !p2f.isDirectory()) { + throw new FileAlreadyExistsException("Destination exists" + + " and is not a directory: " + p2f.getCanonicalPath()); + } + return (parent == null || parent2f.exists() || mkdirs(parent)) && + (mkOneDirWithMode(f, p2f, permission) || p2f.isDirectory()); + } + + + @Override + public Path getHomeDirectory() { + return this.makeQualified(new Path(System.getProperty("user.home"))); + } + + /** + * Set the working directory to the given directory. + */ + @Override + public void setWorkingDirectory(Path newDir) { + workingDir = makeAbsolute(newDir); + checkPath(workingDir); + } + + @Override + public Path getWorkingDirectory() { + return workingDir; + } + + @Override + protected Path getInitialWorkingDirectory() { + return this.makeQualified(new Path(System.getProperty("user.dir"))); + } + + @Override + public FsStatus getStatus(Path p) throws IOException { + File partition = pathToFile(p == null ? new Path("/") : p); + //File provides getUsableSpace() and getFreeSpace() + //File provides no API to obtain used space, assume used = total - free + return new FsStatus(partition.getTotalSpace(), + partition.getTotalSpace() - partition.getFreeSpace(), + partition.getFreeSpace()); + } + + // In the case of the local filesystem, we can just rename the file. + @Override + public void moveFromLocalFile(Path src, Path dst) throws IOException { + rename(src, dst); + } + + // We can write output directly to the final location + @Override + public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile) + throws IOException { + return fsOutputFile; + } + + // It's in the right place - nothing to do. + @Override + public void completeLocalOutput(Path fsWorkingFile, Path tmpLocalFile) + throws IOException { + } + + @Override + public void close() throws IOException { + super.close(); + } + + @Override + public String toString() { + return "LocalFS"; + } + + @Override + public FileStatus getFileStatus(Path f) throws IOException { + return getFileLinkStatusInternal(f, true); + } + + @Deprecated + private FileStatus deprecatedGetFileStatus(Path f) throws IOException { + File path = pathToFile(f); + if (path.exists()) { + return new DeprecatedRawLocalFileStatus(pathToFile(f), + getDefaultBlockSize(f), this); + } else { + throw new FileNotFoundException("File " + f + " does not exist"); + } + } + + @Deprecated + static class DeprecatedRawLocalFileStatus extends FileStatus { + /* We can add extra fields here. It breaks at least CopyFiles.FilePair(). + * We recognize if the information is already loaded by check if + * onwer.equals(""). + */ + private boolean isPermissionLoaded() { + return !super.getOwner().isEmpty(); + } + + private static long getLastAccessTime(File f) throws IOException { + long accessTime; + try { + accessTime = Files.readAttributes(f.toPath(), + BasicFileAttributes.class).lastAccessTime().toMillis(); + } catch (NoSuchFileException e) { + throw new FileNotFoundException("File " + f + " does not exist"); + } + return accessTime; + } + + DeprecatedRawLocalFileStatus(File f, long defaultBlockSize, FileSystem fs) + throws IOException { + super(f.length(), f.isDirectory(), 1, defaultBlockSize, + f.lastModified(), getLastAccessTime(f), + null, null, null, + new Path(f.getPath()).makeQualified(fs.getUri(), + fs.getWorkingDirectory())); + } + + @Override + public FsPermission getPermission() { + if (!isPermissionLoaded()) { + loadPermissionInfo(); + } + return super.getPermission(); + } + + @Override + public String getOwner() { + if (!isPermissionLoaded()) { + loadPermissionInfo(); + } + return super.getOwner(); + } + + @Override + public String getGroup() { + if (!isPermissionLoaded()) { + loadPermissionInfo(); + } + return super.getGroup(); + } + + /** + * Load file permission information (UNIX symbol rwxrwxrwx, sticky bit info). + * + * To improve peformance, give priority to native stat() call. First try get + * permission information by using native JNI call then fall back to use non + * native (ProcessBuilder) call in case native lib is not loaded or native + * call is not successful + */ + private synchronized void loadPermissionInfo() { + if (!isPermissionLoaded() && NativeIO.isAvailable()) { + try { + loadPermissionInfoByNativeIO(); + } catch (IOException ex) { + LOG.debug("Native call failed", ex); + } + } + + if (!isPermissionLoaded()) { + loadPermissionInfoByNonNativeIO(); + } + } + + /// loads permissions, owner, and group from `ls -ld` + @VisibleForTesting + void loadPermissionInfoByNonNativeIO() { + IOException e = null; + try { + java.nio.file.Path path = Paths.get(getPath().toUri()); + String permission = '-' + PosixFilePermissions.toString(Files.getPosixFilePermissions(path)); + setPermission(FsPermission.valueOf(permission)); + + String owner = Files.getOwner(path).getName(); + String group = Files.readAttributes(path, PosixFileAttributes.class, LinkOption.NOFOLLOW_LINKS).group().getName(); + // If on windows domain, token format is DOMAIN\\user and we want to + // extract only the user name + // same as to the group name + if (Shell.WINDOWS) { + owner = removeDomain(owner); + group = removeDomain(group); + } + setOwner(owner); + setGroup(group); + } catch (IOException ioe) { + e = ioe; + } finally { + if (e != null) { + throw new RuntimeException("Error while running command to get " + + "file permissions : " + + StringUtils.stringifyException(e)); + } + } + } + + // In Windows, domain name is added. + // For example, given machine name (domain name) dname, user name i, then + // the result for user is dname\\i and for group is dname\\None. So we need + // remove domain name as follows: + // DOMAIN\\user => user, DOMAIN\\group => group + private String removeDomain(String str) { + int index = str.indexOf("\\"); + if (index != -1) { + str = str.substring(index + 1); + } + return str; + } + + // loads permissions, owner, and group from `ls -ld` + // but use JNI to more efficiently get file mode (permission, owner, group) + // by calling file stat() in *nix or some similar calls in Windows + @VisibleForTesting + void loadPermissionInfoByNativeIO() throws IOException { + Path path = getPath(); + String pathName = path.toUri().getPath(); + // remove leading slash for Windows path + if (Shell.WINDOWS && pathName.startsWith("/")) { + pathName = pathName.substring(1); + } + try { + NativeIO.POSIX.Stat stat = NativeIO.POSIX.getStat(pathName); + String owner = stat.getOwner(); + String group = stat.getGroup(); + int mode = stat.getMode(); + setOwner(owner); + setGroup(group); + setPermission(new FsPermission(mode)); + } catch (IOException e) { + setOwner(null); + setGroup(null); + setPermission(null); + throw e; + } + } + + @Override + public void write(DataOutput out) throws IOException { + if (!isPermissionLoaded()) { + loadPermissionInfo(); + } + super.write(out); + } + } + + /** + * Use the command chown to set owner. + */ + @Override + public void setOwner(Path p, String username, String groupname) + throws IOException { + FileUtil.setOwner(pathToFile(p), username, groupname); + } + + /** + * Use the command chmod to set permission. + */ + @Override + public void setPermission(Path p, FsPermission permission) + throws IOException { + if (NativeIO.isAvailable()) { + NativeIO.POSIX.chmod(pathToFile(p).getCanonicalPath(), + permission.toShort()); + } else { + Files.setPosixFilePermissions(Paths.get(p.toUri()), + PosixFilePermissions.fromString(permission.toString())); + } + } + + /** + * Sets the {@link Path}'s last modified time and last access time to + * the given valid times. + * + * @param mtime the modification time to set (only if no less than zero). + * @param atime the access time to set (only if no less than zero). + * @throws IOException if setting the times fails. + */ + @Override + public void setTimes(Path p, long mtime, long atime) throws IOException { + try { + BasicFileAttributeView view = Files.getFileAttributeView( + pathToFile(p).toPath(), BasicFileAttributeView.class); + FileTime fmtime = (mtime >= 0) ? FileTime.fromMillis(mtime) : null; + FileTime fatime = (atime >= 0) ? FileTime.fromMillis(atime) : null; + view.setTimes(fmtime, fatime, null); + } catch (NoSuchFileException e) { + throw new FileNotFoundException("File " + p + " does not exist"); + } + } + + /** + * Hook to implement support for {@link PathHandle} operations. + * @param stat Referent in the target FileSystem + * @param opts Constraints that determine the validity of the + * {@link PathHandle} reference. + */ + protected PathHandle createPathHandle(FileStatus stat, + Options.HandleOpt... opts) { + if (stat.isDirectory() || stat.isSymlink()) { + throw new IllegalArgumentException("PathHandle only available for files"); + } + String authority = stat.getPath().toUri().getAuthority(); + if (authority != null && !authority.equals("file://")) { + throw new IllegalArgumentException("Wrong FileSystem: " + stat.getPath()); + } + Options.HandleOpt.Data data = + Options.HandleOpt.getOpt(Options.HandleOpt.Data.class, opts) + .orElse(Options.HandleOpt.changed(false)); + Options.HandleOpt.Location loc = + Options.HandleOpt.getOpt(Options.HandleOpt.Location.class, opts) + .orElse(Options.HandleOpt.moved(false)); + if (loc.allowChange()) { + throw new UnsupportedOperationException("Tracking file movement in " + + "basic FileSystem is not supported"); + } + final Path p = stat.getPath(); + final Optional mtime = !data.allowChange() + ? Optional.of(stat.getModificationTime()) + : Optional.empty(); + return new LocalFileSystemPathHandle(p.toString(), mtime); + } + + @Override + public boolean supportsSymlinks() { + return true; + } + + @SuppressWarnings("deprecation") + @Override + public void createSymlink(Path target, Path link, boolean createParent) + throws IOException { + if (!FileSystem.areSymlinksEnabled()) { + throw new UnsupportedOperationException("Symlinks not supported"); + } + final String targetScheme = target.toUri().getScheme(); + if (targetScheme != null && !"file".equals(targetScheme)) { + throw new IOException("Unable to create symlink to non-local file "+ + "system: "+target.toString()); + } + if (createParent) { + mkdirs(link.getParent()); + } + + // NB: Use createSymbolicLink in java.nio.file.Path once available + int result = FileUtil.symLink(target.toString(), + makeAbsolute(link).toString()); + if (result != 0) { + throw new IOException("Error " + result + " creating symlink " + + link + " to " + target); + } + } + + /** + * Return a FileStatus representing the given path. If the path refers + * to a symlink return a FileStatus representing the link rather than + * the object the link refers to. + */ + @Override + public FileStatus getFileLinkStatus(final Path f) throws IOException { + FileStatus fi = getFileLinkStatusInternal(f, false); + // getFileLinkStatus is supposed to return a symlink with a + // qualified path + if (fi.isSymlink()) { + Path targetQual = FSLinkResolver.qualifySymlinkTarget(this.getUri(), + fi.getPath(), fi.getSymlink()); + fi.setSymlink(targetQual); + } + return fi; + } + + /** + * Public {@link FileStatus} methods delegate to this function, which in turn + * either call the new {@link Stat} based implementation or the deprecated + * methods based on platform support. + * + * @param f Path to stat + * @param dereference whether to dereference the final path component if a + * symlink + * @return FileStatus of f + * @throws IOException Exception on getFileLinkStatusInternal + */ + private FileStatus getFileLinkStatusInternal(final Path f, + boolean dereference) throws IOException { + if (!useDeprecatedFileStatus) { + return getNativeFileLinkStatus(f, dereference); + } else if (dereference) { + return deprecatedGetFileStatus(f); + } else { + return deprecatedGetFileLinkStatusInternal(f); + } + } + + /** + * Deprecated. Remains for legacy support. Should be removed when {@link Stat} + * gains support for Windows and other operating systems. + */ + @Deprecated + private FileStatus deprecatedGetFileLinkStatusInternal(final Path f) + throws IOException { + String target = FileUtil.readLink(new File(f.toString())); + + try { + FileStatus fs = getFileStatus(f); + // If f refers to a regular file or directory + if (target.isEmpty()) { + return fs; + } + // Otherwise f refers to a symlink + return new FileStatus(fs.getLen(), + false, + fs.getReplication(), + fs.getBlockSize(), + fs.getModificationTime(), + fs.getAccessTime(), + fs.getPermission(), + fs.getOwner(), + fs.getGroup(), + new Path(target), + f); + } catch (FileNotFoundException e) { + /* The exists method in the File class returns false for dangling + * links so we can get a FileNotFoundException for links that exist. + * It's also possible that we raced with a delete of the link. Use + * the readBasicFileAttributes method in java.nio.file.attributes + * when available. + */ + if (!target.isEmpty()) { + return new FileStatus(0, false, 0, 0, 0, 0, FsPermission.getDefault(), + "", "", new Path(target), f); + } + // f refers to a file or directory that does not exist + throw e; + } + } + /** + * Calls out to platform's native stat(1) implementation to get file metadata + * (permissions, user, group, atime, mtime, etc). This works around the lack + * of lstat(2) in Java 6. + * + * Currently, the {@link Stat} class used to do this only supports Linux + * and FreeBSD, so the old {@link #deprecatedGetFileLinkStatusInternal(Path)} + * implementation (deprecated) remains further OS support is added. + * + * @param f File to stat + * @param dereference whether to dereference symlinks + * @return FileStatus of f + * @throws IOException Exception on getNativeFileLinkStatus + */ + private FileStatus getNativeFileLinkStatus(final Path f, + boolean dereference) throws IOException { + checkPath(f); + Stat stat = new Stat(f, getDefaultBlockSize(f), dereference, this); + return stat.getFileStatus(); + } + + @Override + public Path getLinkTarget(Path f) throws IOException { + FileStatus fi = getFileLinkStatusInternal(f, false); + // return an unqualified symlink target + return fi.getSymlink(); + } +} diff --git a/solr/core/src/test/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/solr/core/src/test/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java index 4564696f4ef..ff19c72a45a 100644 --- a/solr/core/src/test/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java +++ b/solr/core/src/test/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java @@ -25,9 +25,7 @@ import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; -import java.io.OutputStreamWriter; import java.io.RandomAccessFile; -import java.io.Writer; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -46,9 +44,7 @@ import java.util.concurrent.atomic.AtomicLong; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CachingGetSpaceUsed; import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.fs.GetSpaceUsed; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.protocol.Block; @@ -119,9 +115,6 @@ class BlockPoolSlice { } }; - // TODO:FEDERATION scalability issue - a thread per DU is needed - private final GetSpaceUsed dfsUsage; - /** * Create a blook pool slice * @param bpid Block pool Id @@ -178,12 +171,6 @@ class BlockPoolSlice { fileIoProvider.mkdirs(volume, rbwDir); fileIoProvider.mkdirs(volume, tmpDir); - // Use cached value initially if available. Or the following call will - // block until the initial du command completes. - this.dfsUsage = new CachingGetSpaceUsed.Builder().setPath(bpDir) - .setConf(conf) - .setInitialUsed(loadDfsUsed()) - .build(); if (addReplicaThreadPool == null) { // initialize add replica fork join pool initializeAddReplicaPool(conf); @@ -192,10 +179,7 @@ class BlockPoolSlice { shutdownHook = new Runnable() { @Override public void run() { - if (!dfsUsedSaved) { - saveDfsUsed(); - addReplicaThreadPool.shutdownNow(); - } + addReplicaThreadPool.shutdownNow(); } }; ShutdownHookManager.get().addShutdownHook(shutdownHook, @@ -243,92 +227,13 @@ class BlockPoolSlice { /** Run DU on local drives. It must be synchronized from caller. */ void decDfsUsed(long value) { - if (dfsUsage instanceof CachingGetSpaceUsed) { - ((CachingGetSpaceUsed)dfsUsage).incDfsUsed(-value); - } } long getDfsUsed() throws IOException { - return dfsUsage.getUsed(); + return 0L; } void incDfsUsed(long value) { - if (dfsUsage instanceof CachingGetSpaceUsed) { - ((CachingGetSpaceUsed)dfsUsage).incDfsUsed(value); - } - } - - /** - * Read in the cached DU value and return it if it is less than - * cachedDfsUsedCheckTime which is set by - * dfs.datanode.cached-dfsused.check.interval.ms parameter. Slight imprecision - * of dfsUsed is not critical and skipping DU can significantly shorten the - * startup time. If the cached value is not available or too old, -1 is - * returned. - */ - long loadDfsUsed() { - long cachedDfsUsed; - long mtime; - Scanner sc; - - try { - sc = new Scanner(new File(currentDir, DU_CACHE_FILE), "UTF-8"); - } catch (FileNotFoundException fnfe) { - return -1; - } - - try { - // Get the recorded dfsUsed from the file. - if (sc.hasNextLong()) { - cachedDfsUsed = sc.nextLong(); - } else { - return -1; - } - // Get the recorded mtime from the file. - if (sc.hasNextLong()) { - mtime = sc.nextLong(); - } else { - return -1; - } - - // Return the cached value if mtime is okay. - if (mtime > 0 && (timer.now() - mtime < cachedDfsUsedCheckTime)) { - FsDatasetImpl.LOG.info("Cached dfsUsed found for " + currentDir + ": " + - cachedDfsUsed); - return cachedDfsUsed; - } - return -1; - } finally { - sc.close(); - } - } - - /** - * Write the current dfsUsed to the cache file. - */ - void saveDfsUsed() { - File outFile = new File(currentDir, DU_CACHE_FILE); - if (!fileIoProvider.deleteWithExistsCheck(volume, outFile)) { - FsDatasetImpl.LOG.warn("Failed to delete old dfsUsed file in " + - outFile.getParent()); - } - - try { - long used = getDfsUsed(); - try (Writer out = new OutputStreamWriter( - new FileOutputStream(outFile), "UTF-8")) { - // mtime is written last, so that truncated writes won't be valid. - out.write(Long.toString(used) + " " + Long.toString(timer.now())); - // This is only called as part of the volume shutdown. - // We explicitly avoid calling flush with fileIoProvider which triggers - // volume check upon io exception to avoid cyclic volume checks. - out.flush(); - } - } catch (IOException ioe) { - // If write failed, the volume might be bad. Since the cache file is - // not critical, log the error and continue. - FsDatasetImpl.LOG.warn("Failed to write dfsUsed to " + outFile, ioe); - } } /** @@ -362,13 +267,7 @@ class BlockPoolSlice { File addFinalizedBlock(Block b, ReplicaInfo replicaInfo) throws IOException { File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId()); fileIoProvider.mkdirsWithExistsCheck(volume, blockDir); - File blockFile = FsDatasetImpl.moveBlockFiles(b, replicaInfo, blockDir); - File metaFile = FsDatasetUtil.getMetaFile(blockFile, b.getGenerationStamp()); - if (dfsUsage instanceof CachingGetSpaceUsed) { - ((CachingGetSpaceUsed) dfsUsage).incDfsUsed( - b.getNumBytes() + metaFile.length()); - } - return blockFile; + return FsDatasetImpl.moveBlockFiles(b, replicaInfo, blockDir); } /** @@ -851,17 +750,11 @@ class BlockPoolSlice { void shutdown(BlockListAsLongs blocksListToPersist) { saveReplicas(blocksListToPersist); - saveDfsUsed(); - dfsUsedSaved = true; // Remove the shutdown hook to avoid any memory leak if (shutdownHook != null) { ShutdownHookManager.get().removeShutdownHook(shutdownHook); } - - if (dfsUsage instanceof CachingGetSpaceUsed) { - IOUtils.cleanupWithLogger(LOG, ((CachingGetSpaceUsed) dfsUsage)); - } } private boolean readReplicasFromCache(ReplicaMap volumeMap, diff --git a/solr/core/src/test/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java b/solr/core/src/test/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java new file mode 100644 index 00000000000..bf18facd2fb --- /dev/null +++ b/solr/core/src/test/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; + +/** + * + * NameNodeResourceChecker provides a method - + * hasAvailableDiskSpace - which will return true if and only if + * the NameNode has disk space available on all required volumes, and any volume + * which is configured to be redundant. Volumes containing file system edits dirs + * are added by default, and arbitrary extra volumes may be configured as well. + */ +@InterfaceAudience.Private +public class NameNodeResourceChecker { + /** + * Create a NameNodeResourceChecker, which will check the edits dirs and any + * additional dirs to check set in conf. + */ + public NameNodeResourceChecker(Configuration conf) throws IOException { + } + + + /** + * Return true if disk space is available on at least one of the configured + * redundant volumes, and all of the configured required volumes. + * + * @return True if the configured amount of disk space is available on at + * least one redundant volume and all of the required volumes, false + * otherwise. + */ + public boolean hasAvailableDiskSpace() { + return true; + } +} diff --git a/solr/core/src/test/org/apache/hadoop/util/DiskChecker.java b/solr/core/src/test/org/apache/hadoop/util/DiskChecker.java new file mode 100644 index 00000000000..3043da9c107 --- /dev/null +++ b/solr/core/src/test/org/apache/hadoop/util/DiskChecker.java @@ -0,0 +1,370 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.util; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicReference; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.io.IOUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Class that provides utility functions for checking disk problem + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public class DiskChecker { + public static final Logger LOG = LoggerFactory.getLogger(DiskChecker.class); + + public static class DiskErrorException extends IOException { + public DiskErrorException(String msg) { + super(msg); + } + + public DiskErrorException(String msg, Throwable cause) { + super(msg, cause); + } + } + + public static class DiskOutOfSpaceException extends IOException { + public DiskOutOfSpaceException(String msg) { + super(msg); + } + } + + // Provider that abstracts some FileOutputStream operations for + // testability. + private static AtomicReference fileIoProvider = + new AtomicReference<>(new DefaultFileIoProvider()); + + /** + * Create the directory if it doesn't exist and check that dir is readable, + * writable and executable + * + * @param dir dir to check + * @throws DiskErrorException exception checking dir + */ + public static void checkDir(File dir) throws DiskErrorException { + checkDirInternal(dir); + } + + /** + * Create the directory if it doesn't exist and check that dir is + * readable, writable and executable. Perform some disk IO to + * ensure that the disk is usable for writes. + * + * @param dir dir to check + * @throws DiskErrorException exception checking dir + */ + public static void checkDirWithDiskIo(File dir) + throws DiskErrorException { + checkDirInternal(dir); + doDiskIo(dir); + } + + private static void checkDirInternal(File dir) + throws DiskErrorException { + if (!mkdirsWithExistsCheck(dir)) { + throw new DiskErrorException("Cannot create directory: " + + dir.toString()); + } + checkAccessByFileMethods(dir); + } + + /** + * Create the local directory if necessary, check permissions and also ensure + * it can be read from and written into. + * + * @param localFS local filesystem + * @param dir directory + * @param expected permission + * @throws DiskErrorException exception checking dir + * @throws IOException exception checking dir + */ + public static void checkDir(LocalFileSystem localFS, Path dir, + FsPermission expected) + throws DiskErrorException, IOException { + checkDirInternal(localFS, dir, expected); + } + + + /** + * Create the local directory if necessary, also ensure permissions + * allow it to be read from and written into. Perform some diskIO + * to ensure that the disk is usable for writes. + * + * @param localFS local filesystem + * @param dir directory + * @param expected permission + * @throws DiskErrorException exception checking dir + * @throws IOException exception checking dir + */ + public static void checkDirWithDiskIo(LocalFileSystem localFS, Path dir, + FsPermission expected) + throws DiskErrorException, IOException { + checkDirInternal(localFS, dir, expected); + doDiskIo(localFS.pathToFile(dir)); + } + + private static void checkDirInternal(LocalFileSystem localFS, Path dir, + FsPermission expected) + throws DiskErrorException, IOException { + mkdirsWithExistsAndPermissionCheck(localFS, dir, expected); + checkAccessByFileMethods(localFS.pathToFile(dir)); + } + + /** + * Checks that the current running process can read, write, and execute the + * given directory by using methods of the File object. + * + * @param dir File to check + * @throws DiskErrorException if dir is not readable, not writable, or not + * executable + */ + private static void checkAccessByFileMethods(File dir) + throws DiskErrorException { + if (!dir.isDirectory()) { + throw new DiskErrorException("Not a directory: " + + dir.toString()); + } + + if (!FileUtil.canRead(dir)) { + throw new DiskErrorException("Directory is not readable: " + + dir.toString()); + } + + if (!FileUtil.canWrite(dir)) { + throw new DiskErrorException("Directory is not writable: " + + dir.toString()); + } + } + + /** + * The semantics of mkdirsWithExistsCheck method is different from the mkdirs + * method provided in the Sun's java.io.File class in the following way: + * While creating the non-existent parent directories, this method checks for + * the existence of those directories if the mkdir fails at any point (since + * that directory might have just been created by some other process). + * If both mkdir() and the exists() check fails for any seemingly + * non-existent directory, then we signal an error; Sun's mkdir would signal + * an error (return false) if a directory it is attempting to create already + * exists or the mkdir fails. + * @param dir dir to create and check + * @return true on success, false on failure + */ + private static boolean mkdirsWithExistsCheck(File dir) { + if (dir.mkdir() || dir.exists()) { + return true; + } + File canonDir; + try { + canonDir = dir.getCanonicalFile(); + } catch (IOException e) { + return false; + } + String parent = canonDir.getParent(); + return (parent != null) && + (mkdirsWithExistsCheck(new File(parent)) && + (canonDir.mkdir() || canonDir.exists())); + } + + /** + * Create the directory or check permissions if it already exists. + * + * The semantics of mkdirsWithExistsAndPermissionCheck method is different + * from the mkdirs method provided in the Sun's java.io.File class in the + * following way: + * While creating the non-existent parent directories, this method checks for + * the existence of those directories if the mkdir fails at any point (since + * that directory might have just been created by some other process). + * If both mkdir() and the exists() check fails for any seemingly + * non-existent directory, then we signal an error; Sun's mkdir would signal + * an error (return false) if a directory it is attempting to create already + * exists or the mkdir fails. + * + * @param localFS local filesystem + * @param dir directory to be created or checked + * @param expected expected permission + * @throws IOException exception making dir and checking + */ + static void mkdirsWithExistsAndPermissionCheck( + LocalFileSystem localFS, Path dir, FsPermission expected) + throws IOException { + File directory = localFS.pathToFile(dir); + boolean created = false; + + if (!directory.exists()) + created = mkdirsWithExistsCheck(directory); + + if (created || !localFS.getFileStatus(dir).getPermission().equals(expected)) + localFS.setPermission(dir, expected); + } + + // State related to running disk IO checks. + private static final String DISK_IO_FILE_PREFIX = + "DiskChecker.OK_TO_DELETE_."; + + @VisibleForTesting + static final int DISK_IO_MAX_ITERATIONS = 3; + + /** + * Performs some disk IO by writing to a new file in the given directory + * and sync'ing file contents to disk. + * + * This increases the likelihood of catching catastrophic disk/controller + * failures sooner. + * + * @param dir directory to be checked. + * @throws DiskErrorException if we hit an error while trying to perform + * disk IO against the file. + */ + private static void doDiskIo(File dir) throws DiskErrorException { + try { + IOException ioe = null; + + for (int i = 0; i < DISK_IO_MAX_ITERATIONS; ++i) { + final File file = getFileNameForDiskIoCheck(dir, i+1); + try { + diskIoCheckWithoutNativeIo(file); + return; + } catch (IOException e) { + // Let's retry a few times before we really give up and + // declare the disk as bad. + ioe = e; + } + } + throw ioe; // Just rethrow the last exception to signal failure. + } catch(IOException e) { + throw new DiskErrorException("Error checking directory " + dir, e); + } + } + + /** + * Try to perform some disk IO by writing to the given file + * without using Native IO. + * + * @param file file to check + * @throws IOException if there was a non-retriable error. + */ + private static void diskIoCheckWithoutNativeIo(File file) + throws IOException { + FileOutputStream fos = null; + + try { + final FileIoProvider provider = fileIoProvider.get(); + fos = provider.get(file); + provider.write(fos, new byte[1]); + fos.getFD().sync(); + fos.close(); + fos = null; + if (!file.delete() && file.exists()) { + throw new IOException("Failed to delete " + file); + } + file = null; + } finally { + IOUtils.cleanupWithLogger(LOG, fos); + FileUtils.deleteQuietly(file); + } + } + + /** + * Generate a path name for a test file under the given directory. + * + * @return file object. + */ + @VisibleForTesting + static File getFileNameForDiskIoCheck(File dir, int iterationCount) { + if (iterationCount < DISK_IO_MAX_ITERATIONS) { + // Use file names of the format prefix.001 by default. + return new File(dir, + DISK_IO_FILE_PREFIX + String.format("%03d", iterationCount)); + } else { + // If the first few checks then fail, try using a randomly generated + // file name. + return new File(dir, DISK_IO_FILE_PREFIX + UUID.randomUUID()); + } + } + + /** + * An interface that abstracts operations on {@link FileOutputStream} + * objects for testability. + */ + interface FileIoProvider { + FileOutputStream get(File f) throws FileNotFoundException; + void write(FileOutputStream fos, byte[] data) throws IOException; + } + + /** + * The default implementation of {@link FileIoProvider}. + */ + private static class DefaultFileIoProvider implements FileIoProvider { + /** + * See {@link FileOutputStream#FileOutputStream(File)}. + */ + @Override + public FileOutputStream get(File f) throws FileNotFoundException { + return new FileOutputStream(f); + } + + /** + * See {@link FileOutputStream#write(byte[])}. + */ + @Override + public void write(FileOutputStream fos, byte[] data) throws IOException { + fos.write(data); + } + } + + /** + * Replace the {@link FileIoProvider} for tests. + * This method MUST NOT be used outside of unit tests. + * + * @param newFosProvider FileIoProvider + * @return the old FileIoProvider. + */ + @VisibleForTesting + static FileIoProvider replaceFileOutputStreamProvider( + FileIoProvider newFosProvider) { + return fileIoProvider.getAndSet(newFosProvider); + } + + /** + * Retrieve the current {@link FileIoProvider}. + * This method MUST NOT be used outside of unit tests. + * + * @return the current FileIoProvider. + */ + @VisibleForTesting + static FileIoProvider getFileOutputStreamProvider() { + return fileIoProvider.get(); + } +} diff --git a/solr/core/src/test/org/apache/solr/cloud/SharedFSAutoReplicaFailoverTest.java b/solr/core/src/test/org/apache/solr/cloud/SharedFSAutoReplicaFailoverTest.java index 735cc2080d7..e371e340361 100644 --- a/solr/core/src/test/org/apache/solr/cloud/SharedFSAutoReplicaFailoverTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/SharedFSAutoReplicaFailoverTest.java @@ -92,7 +92,6 @@ public class SharedFSAutoReplicaFailoverTest extends AbstractFullDistribZkTestBa CompletionService completionService; Set> pending; private final Map collectionUlogDirMap = new HashMap<>(); - @BeforeClass public static void hdfsFailoverBeforeClass() throws Exception { @@ -104,9 +103,12 @@ public class SharedFSAutoReplicaFailoverTest extends AbstractFullDistribZkTestBa @AfterClass public static void hdfsFailoverAfterClass() throws Exception { - HdfsTestUtil.teardownClass(dfsCluster); - System.clearProperty("solr.hdfs.blockcache.blocksperbank"); - dfsCluster = null; + try { + HdfsTestUtil.teardownClass(dfsCluster); + } finally { + System.clearProperty("solr.hdfs.blockcache.blocksperbank"); + dfsCluster = null; + } } @Before diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/FakeGroupMapping.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/FakeGroupMapping.java new file mode 100644 index 00000000000..fae1b00f7d0 --- /dev/null +++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/FakeGroupMapping.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.cloud.hdfs; + +import java.util.Collections; +import java.util.List; + +import org.apache.hadoop.security.GroupMappingServiceProvider; + +/** + * Fake mapping for Hadoop to prevent falling back to Shell group provider + */ +public class FakeGroupMapping implements GroupMappingServiceProvider { + @Override + public List getGroups(String user) { + return Collections.singletonList("supergroup"); + } + + @Override + public void cacheGroupsRefresh() { + } + + @Override + public void cacheGroupsAdd(List groups) { + } +} diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HDFSCollectionsAPITest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HDFSCollectionsAPITest.java index 6a93042e960..550d1235e4b 100644 --- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HDFSCollectionsAPITest.java +++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HDFSCollectionsAPITest.java @@ -40,8 +40,7 @@ public class HDFSCollectionsAPITest extends SolrCloudTestCase { @BeforeClass public static void setupClass() throws Exception { - configureCluster(2) - .configure(); + configureCluster(2).configure(); dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath()); @@ -54,6 +53,7 @@ public class HDFSCollectionsAPITest extends SolrCloudTestCase { public static void teardownClass() throws Exception { try { shutdownCluster(); // need to close before the MiniDFSCluster + cluster = null; } finally { try { HdfsTestUtil.teardownClass(dfsCluster); diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsBasicDistributedZk2Test.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsBasicDistributedZk2Test.java index 0c7081b3f39..5f274a552df 100644 --- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsBasicDistributedZk2Test.java +++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsBasicDistributedZk2Test.java @@ -33,8 +33,6 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; @ThreadLeakFilters(defaultFilters = true, filters = { BadHdfsThreadsFilter.class // hdfs currently leaks thread(s) }) -// commented 20-July-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 26-Mar-2018 -// commented out on: 24-Dec-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 17-Aug-2018 public class HdfsBasicDistributedZk2Test extends BasicDistributedZk2Test { private static MiniDFSCluster dfsCluster; @@ -45,14 +43,15 @@ public class HdfsBasicDistributedZk2Test extends BasicDistributedZk2Test { @AfterClass public static void teardownClass() throws Exception { - HdfsTestUtil.teardownClass(dfsCluster); - dfsCluster = null; + try { + HdfsTestUtil.teardownClass(dfsCluster); + } finally { + dfsCluster = null; + } } - @Override protected String getDataDir(String dataDir) throws IOException { return HdfsTestUtil.getDataDir(dfsCluster, dataDir); } - } diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRecoverLeaseTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRecoverLeaseTest.java index ad8556cf0ca..16d0f7ab961 100644 --- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRecoverLeaseTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRecoverLeaseTest.java @@ -52,8 +52,11 @@ public class HdfsRecoverLeaseTest extends SolrTestCaseJ4 { @AfterClass public static void afterClass() throws Exception { - HdfsTestUtil.teardownClass(dfsCluster); - dfsCluster = null; + try { + HdfsTestUtil.teardownClass(dfsCluster); + } finally { + dfsCluster = null; + } } @Before diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsSyncSliceTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsSyncSliceTest.java index 49998472f46..48e6d249b22 100644 --- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsSyncSliceTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsSyncSliceTest.java @@ -44,8 +44,11 @@ public class HdfsSyncSliceTest extends SyncSliceTest { @AfterClass public static void teardownClass() throws Exception { - HdfsTestUtil.teardownClass(dfsCluster); - dfsCluster = null; + try { + HdfsTestUtil.teardownClass(dfsCluster); + } finally { + dfsCluster = null; + } } @Override diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTestUtil.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTestUtil.java index 6987a6a49f1..149d108b200 100644 --- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTestUtil.java +++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTestUtil.java @@ -20,11 +20,11 @@ import java.io.File; import java.lang.invoke.MethodHandles; import java.net.URI; import java.util.Enumeration; +import java.util.HashMap; import java.util.Locale; import java.util.Map; import java.util.Timer; import java.util.TimerTask; -import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ForkJoinPool; import java.util.concurrent.ForkJoinWorkerThread; import java.util.regex.Pattern; @@ -56,7 +56,8 @@ public class HdfsTestUtil { private static final boolean HA_TESTING_ENABLED = false; // SOLR-XXX - private static Map timers = new ConcurrentHashMap<>(); + private static Map timers = new HashMap<>(); + private static final Object TIMERS_LOCK = new Object(); private static FSDataOutputStream badTlogOutStream; @@ -145,7 +146,12 @@ public class HdfsTestUtil { int rnd = random().nextInt(10000); Timer timer = new Timer(); - timers.put(dfsCluster, timer); + synchronized (TIMERS_LOCK) { + if (timers == null) { + timers = new HashMap<>(); + } + timers.put(dfsCluster, timer); + } timer.schedule(new TimerTask() { @Override @@ -156,7 +162,12 @@ public class HdfsTestUtil { } else if (haTesting && rndMode == 2) { int rnd = random().nextInt(30000); Timer timer = new Timer(); - timers.put(dfsCluster, timer); + synchronized (TIMERS_LOCK) { + if (timers == null) { + timers = new HashMap<>(); + } + timers.put(dfsCluster, timer); + } timer.schedule(new TimerTask() { @Override @@ -196,19 +207,23 @@ public class HdfsTestUtil { public static Configuration getClientConfiguration(MiniDFSCluster dfsCluster) { Configuration conf = getBasicConfiguration(dfsCluster.getConfiguration(0)); - if (dfsCluster.getNameNodeInfos().length > 1) { + if (dfsCluster.getNumNameNodes() > 1) { HATestUtil.setFailoverConfigurations(dfsCluster, conf); } return conf; } public static void teardownClass(MiniDFSCluster dfsCluster) throws Exception { + HdfsUtil.TEST_CONF = null; + if (badTlogOutStream != null) { IOUtils.closeQuietly(badTlogOutStream); + badTlogOutStream = null; } if (badTlogOutStreamFs != null) { IOUtils.closeQuietly(badTlogOutStreamFs); + badTlogOutStreamFs = null; } try { @@ -218,9 +233,16 @@ public class HdfsTestUtil { log.error("Exception trying to reset solr.directoryFactory", e); } if (dfsCluster != null) { - Timer timer = timers.remove(dfsCluster); - if (timer != null) { - timer.cancel(); + synchronized (TIMERS_LOCK) { + if (timers != null) { + Timer timer = timers.remove(dfsCluster); + if (timer != null) { + timer.cancel(); + } + if (timers.isEmpty()) { + timers = null; + } + } } try { dfsCluster.shutdown(true); diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java index 1f316033785..13531a4c364 100644 --- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java @@ -73,6 +73,7 @@ public class HdfsWriteToMultipleCollectionsTest extends BasicDistributedZkTest { HdfsTestUtil.teardownClass(dfsCluster); } finally { dfsCluster = null; + schemaString = null; } } diff --git a/solr/core/src/test/org/apache/solr/index/hdfs/CheckHdfsIndexTest.java b/solr/core/src/test/org/apache/solr/index/hdfs/CheckHdfsIndexTest.java index 8eefd9a3662..d31bd820be8 100644 --- a/solr/core/src/test/org/apache/solr/index/hdfs/CheckHdfsIndexTest.java +++ b/solr/core/src/test/org/apache/solr/index/hdfs/CheckHdfsIndexTest.java @@ -71,6 +71,7 @@ public class CheckHdfsIndexTest extends AbstractFullDistribZkTestBase { HdfsTestUtil.teardownClass(dfsCluster); } finally { dfsCluster = null; + path = null; } } diff --git a/solr/core/src/test/org/apache/solr/search/TestRecoveryHdfs.java b/solr/core/src/test/org/apache/solr/search/TestRecoveryHdfs.java index e19895e9e53..a148d59599f 100644 --- a/solr/core/src/test/org/apache/solr/search/TestRecoveryHdfs.java +++ b/solr/core/src/test/org/apache/solr/search/TestRecoveryHdfs.java @@ -65,10 +65,11 @@ import static org.apache.solr.update.processor.DistributingUpdateProcessorFactor // TODO: longer term this should be combined with TestRecovery somehow ?? public class TestRecoveryHdfs extends SolrTestCaseJ4 { // means that we've seen the leader and have version info (i.e. we are a non-leader replica) - private static String FROM_LEADER = DistribPhase.FROMLEADER.toString(); + private static final String FROM_LEADER = DistribPhase.FROMLEADER.toString(); + + // acquire timeout in seconds. change this to a huge number when debugging to prevent threads from advancing. + private static final int TIMEOUT = 60; - private static int timeout=60; // acquire timeout in seconds. change this to a huge number when debugging to prevent threads from advancing. - private static MiniDFSCluster dfsCluster; private static String hdfsUri; private static FileSystem fs; @@ -102,7 +103,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 { HdfsTestUtil.teardownClass(dfsCluster); } finally { dfsCluster = null; - hdfsDataDir = null; + hdfsUri = null; System.clearProperty("solr.ulog.dir"); System.clearProperty("test.build.data"); System.clearProperty("test.cache.data"); @@ -142,7 +143,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 { UpdateLog.testing_logReplayHook = () -> { try { - assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS)); + assertTrue(logReplay.tryAcquire(TIMEOUT, TimeUnit.SECONDS)); } catch (Exception e) { throw new RuntimeException(e); } @@ -183,7 +184,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 { assertJQ(req("qt","/get", "getVersions",""+versions.size()),"/versions==" + versions); // wait until recovery has finished - assertTrue(logReplayFinish.tryAcquire(timeout, TimeUnit.SECONDS)); + assertTrue(logReplayFinish.tryAcquire(TIMEOUT, TimeUnit.SECONDS)); assertJQ(req("q","*:*") ,"/response/numFound==3"); @@ -203,7 +204,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 { // h.getCore().getUpdateHandler().getUpdateLog().recoverFromLog(); // wait until recovery has finished - assertTrue(logReplayFinish.tryAcquire(timeout, TimeUnit.SECONDS)); + assertTrue(logReplayFinish.tryAcquire(TIMEOUT, TimeUnit.SECONDS)); assertJQ(req("q","*:*") ,"/response/numFound==5"); assertJQ(req("q","id:A2") ,"/response/numFound==0"); @@ -235,7 +236,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 { UpdateLog.testing_logReplayHook = () -> { try { - assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS)); + assertTrue(logReplay.tryAcquire(TIMEOUT, TimeUnit.SECONDS)); } catch (Exception e) { throw new RuntimeException(e); } @@ -386,7 +387,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 { UpdateLog.testing_logReplayHook = () -> { try { - assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS)); + assertTrue(logReplay.tryAcquire(TIMEOUT, TimeUnit.SECONDS)); } catch (Exception e) { throw new RuntimeException(e); } @@ -606,7 +607,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 { UpdateLog.testing_logReplayHook = () -> { try { - assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS)); + assertTrue(logReplay.tryAcquire(TIMEOUT, TimeUnit.SECONDS)); } catch (Exception e) { throw new RuntimeException(e); } @@ -661,7 +662,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 { UpdateLog.testing_logReplayHook = () -> { try { - assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS)); + assertTrue(logReplay.tryAcquire(TIMEOUT, TimeUnit.SECONDS)); } catch (Exception e) { throw new RuntimeException(e); } @@ -718,7 +719,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 { assertJQ(req("qt","/get", "getVersions",""+maxReq), "/versions==" + versions.subList(0,Math.min(maxReq,start))); logReplay.release(1000); - assertTrue(logReplayFinish.tryAcquire(timeout, TimeUnit.SECONDS)); + assertTrue(logReplayFinish.tryAcquire(TIMEOUT, TimeUnit.SECONDS)); assertJQ(req("qt","/get", "getVersions",""+maxReq), "/versions==" + versions.subList(0,Math.min(maxReq,start))); @@ -768,7 +769,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 { UpdateLog.testing_logReplayHook = () -> { try { - assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS)); + assertTrue(logReplay.tryAcquire(TIMEOUT, TimeUnit.SECONDS)); } catch (Exception e) { throw new RuntimeException(e); } @@ -802,7 +803,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 { logReplayFinish.drainPermits(); ignoreException("OutOfBoundsException"); // this is what the corrupted log currently produces... subject to change. createCore(); - assertTrue(logReplayFinish.tryAcquire(timeout, TimeUnit.SECONDS)); + assertTrue(logReplayFinish.tryAcquire(TIMEOUT, TimeUnit.SECONDS)); resetExceptionIgnores(); assertJQ(req("q","*:*") ,"/response/numFound==3"); @@ -896,7 +897,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 { UpdateLog.testing_logReplayHook = () -> { try { - assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS)); + assertTrue(logReplay.tryAcquire(TIMEOUT, TimeUnit.SECONDS)); } catch (Exception e) { throw new RuntimeException(e); } @@ -948,7 +949,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 { logReplayFinish.drainPermits(); ignoreException("OutOfBoundsException"); // this is what the corrupted log currently produces... subject to change. createCore(); - assertTrue(logReplayFinish.tryAcquire(timeout, TimeUnit.SECONDS)); + assertTrue(logReplayFinish.tryAcquire(TIMEOUT, TimeUnit.SECONDS)); resetExceptionIgnores(); assertJQ(req("q","*:*") ,"/response/numFound==6"); diff --git a/solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java b/solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java index 5e72b522204..12269262a3f 100644 --- a/solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java +++ b/solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java @@ -62,8 +62,11 @@ public class HdfsDirectoryTest extends SolrTestCaseJ4 { @AfterClass public static void afterClass() throws Exception { - HdfsTestUtil.teardownClass(dfsCluster); - dfsCluster = null; + try { + HdfsTestUtil.teardownClass(dfsCluster); + } finally { + dfsCluster = null; + } } @Before diff --git a/solr/core/src/test/org/apache/solr/store/hdfs/HdfsLockFactoryTest.java b/solr/core/src/test/org/apache/solr/store/hdfs/HdfsLockFactoryTest.java index 7a232408891..bf00016d164 100644 --- a/solr/core/src/test/org/apache/solr/store/hdfs/HdfsLockFactoryTest.java +++ b/solr/core/src/test/org/apache/solr/store/hdfs/HdfsLockFactoryTest.java @@ -48,8 +48,11 @@ public class HdfsLockFactoryTest extends SolrTestCaseJ4 { @AfterClass public static void afterClass() throws Exception { - HdfsTestUtil.teardownClass(dfsCluster); - dfsCluster = null; + try { + HdfsTestUtil.teardownClass(dfsCluster); + } finally { + dfsCluster = null; + } } @Test diff --git a/solr/core/src/test/org/apache/solr/update/TestHdfsUpdateLog.java b/solr/core/src/test/org/apache/solr/update/TestHdfsUpdateLog.java index 5ba7f58ec14..b9a0158b0ee 100644 --- a/solr/core/src/test/org/apache/solr/update/TestHdfsUpdateLog.java +++ b/solr/core/src/test/org/apache/solr/update/TestHdfsUpdateLog.java @@ -70,8 +70,8 @@ public class TestHdfsUpdateLog extends SolrTestCaseJ4 { try { HdfsTestUtil.teardownClass(dfsCluster); } finally { - hdfsDataDir = null; dfsCluster = null; + hdfsUri = null; System.clearProperty("solr.ulog.dir"); System.clearProperty("test.build.data"); System.clearProperty("test.cache.data"); diff --git a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java index 26709518276..ac8deaaf35f 100644 --- a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java +++ b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java @@ -331,6 +331,7 @@ public abstract class SolrTestCaseJ4 extends SolrTestCase { if (null != testExecutor) { ExecutorUtil.shutdownAndAwaitTermination(testExecutor); + testExecutor = null; } resetExceptionIgnores(); @@ -489,6 +490,7 @@ public abstract class SolrTestCaseJ4 extends SolrTestCase { changedFactory = false; if (savedFactory != null) { System.setProperty("solr.directoryFactory", savedFactory); + savedFactory = null; } else { System.clearProperty("solr.directoryFactory"); } @@ -895,6 +897,7 @@ public abstract class SolrTestCaseJ4 extends SolrTestCase { lrf = null; configString = schemaString = null; initCoreDataDir = null; + hdfsDataDir = null; } /** Validates an update XML String is successful diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java b/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java index b8e0798dc0e..fccb4abdeec 100644 --- a/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java +++ b/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java @@ -275,9 +275,12 @@ public class SolrCloudTestCase extends SolrTestCaseJ4 { @AfterClass public static void shutdownCluster() throws Exception { if (cluster != null) { - cluster.shutdown(); + try { + cluster.shutdown(); + } finally { + cluster = null; + } } - cluster = null; } @Before diff --git a/solr/test-framework/src/java/org/apache/solr/util/SolrSecurityManager.java b/solr/test-framework/src/java/org/apache/solr/util/SolrSecurityManager.java index add572a2495..056f4f7a88f 100644 --- a/solr/test-framework/src/java/org/apache/solr/util/SolrSecurityManager.java +++ b/solr/test-framework/src/java/org/apache/solr/util/SolrSecurityManager.java @@ -20,8 +20,7 @@ import java.security.AccessController; import java.security.PrivilegedAction; /** - * A {@link SecurityManager} that prevents tests calling {@link System#exit(int)}, - * and implements some hacks for hadoop. + * A {@link SecurityManager} that prevents tests calling {@link System#exit(int)}. * Only the test runner itself is allowed to exit the JVM. * All other security checks are handled by the default security policy. *

@@ -43,93 +42,6 @@ public final class SolrSecurityManager extends SecurityManager { super(); } - // TODO: move this stuff into a Solr (non-test) SecurityManager! - /** - * {@inheritDoc} - *

This method implements hacks to workaround hadoop's garbage Shell and FileUtil code - */ - @Override - public void checkExec(String cmd) { - // NOTE: it would be tempting to just allow anything from hadoop's Shell class, but then - // that would just give an easy vector for RCE (use hadoop Shell instead of e.g. ProcessBuilder) - // so we whitelist actual caller impl methods instead. - for (StackTraceElement element : Thread.currentThread().getStackTrace()) { - // hadoop insists on shelling out to get the user's supplementary groups? - if ("org.apache.hadoop.security.ShellBasedUnixGroupsMapping".equals(element.getClassName()) && - "getGroups".equals(element.getMethodName())) { - return; - } - // hadoop insists on shelling out to parse 'df' command instead of using FileStore? - if ("org.apache.hadoop.fs.DF".equals(element.getClassName()) && - "getFilesystem".equals(element.getMethodName())) { - return; - } - // hadoop insists on shelling out to parse 'du' command instead of using FileStore? - if ("org.apache.hadoop.fs.DU".equals(element.getClassName()) && - "refresh".equals(element.getMethodName())) { - return; - } - // hadoop insists on shelling out to parse 'ls' command instead of java nio apis? - if ("org.apache.hadoop.util.DiskChecker".equals(element.getClassName()) && - "checkDir".equals(element.getMethodName())) { - return; - } - // hadoop insists on shelling out to parse 'stat' command instead of Files.getAttributes? - if ("org.apache.hadoop.fs.HardLink".equals(element.getClassName()) && - "getLinkCount".equals(element.getMethodName())) { - return; - } - // hadoop "canExecute" method doesn't handle securityexception and fails completely. - // so, lie to it, and tell it we will happily execute, so it does not crash. - if ("org.apache.hadoop.fs.FileUtil".equals(element.getClassName()) && - "canExecute".equals(element.getMethodName())) { - return; - } - } - super.checkExec(cmd); - } - - /** - * {@inheritDoc} - *

This method implements hacks to workaround hadoop's garbage FileUtil code - */ - @Override - public void checkWrite(String file) { - for (StackTraceElement element : Thread.currentThread().getStackTrace()) { - // hadoop "canWrite" method doesn't handle securityexception and fails completely. - // so, lie to it, and tell it we will happily write, so it does not crash. - if ("org.apache.hadoop.fs.FileUtil".equals(element.getClassName()) && - "canWrite".equals(element.getMethodName())) { - return; - } - } - super.checkWrite(file); - } - - /** - * {@inheritDoc} - *

This method implements hacks to workaround hadoop's garbage FileUtil code - */ - @Override - public void checkRead(String file) { - for (StackTraceElement element : Thread.currentThread().getStackTrace()) { - // hadoop "createPermissionsDiagnosisString" method doesn't handle securityexception and fails completely. - // it insists on climbing up full directory tree! - // so, lie to it, and tell it we will happily read, so it does not crash. - if ("org.apache.hadoop.hdfs.MiniDFSCluster".equals(element.getClassName()) && - "createPermissionsDiagnosisString".equals(element.getMethodName())) { - return; - } - // hadoop "canRead" method doesn't handle securityexception and fails completely. - // so, lie to it, and tell it we will happily read, so it does not crash. - if ("org.apache.hadoop.fs.FileUtil".equals(element.getClassName()) && - "canRead".equals(element.getMethodName())) { - return; - } - } - super.checkRead(file); - } - /** * {@inheritDoc} *

This method inspects the stack trace and checks who is calling