diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java index 1827c272dcd..5beda0d2d2e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java @@ -36,6 +36,7 @@ import com.google.common.annotations.VisibleForTesting; import javax.crypto.spec.SecretKeySpec; +import java.io.FileNotFoundException; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; @@ -167,9 +168,9 @@ public class JavaKeyStoreProvider extends KeyProvider { // rewrite the keystore in flush() permissions = perm; } catch (KeyStoreException e) { - throw new IOException("Can't create keystore", e); + throw new IOException("Can't create keystore: " + e, e); } catch (GeneralSecurityException e) { - throw new IOException("Can't load keystore " + path, e); + throw new IOException("Can't load keystore " + path + " : " + e , e); } } @@ -190,9 +191,7 @@ public class JavaKeyStoreProvider extends KeyProvider { try { perm = loadFromPath(path, password); // Remove _OLD if exists - if (fs.exists(backupPath)) { - fs.delete(backupPath, true); - } + fs.delete(backupPath, true); LOG.debug("KeyStore loaded successfully !!"); } catch (IOException ioe) { // If file is corrupted for some reason other than @@ -260,9 +259,7 @@ public class JavaKeyStoreProvider extends KeyProvider { LOG.debug(String.format("KeyStore loaded successfully from '%s'!!", pathToLoad)); } - if (fs.exists(pathToDelete)) { - fs.delete(pathToDelete, true); - } + fs.delete(pathToDelete, true); } catch (IOException e) { // Check for password issue : don't want to trash file due // to wrong password @@ -539,13 +536,15 @@ public class JavaKeyStoreProvider extends KeyProvider { return; } // Might exist if a backup has been restored etc. - if (fs.exists(newPath)) { + try { renameOrFail(newPath, new Path(newPath.toString() + "_ORPHANED_" + System.currentTimeMillis())); + } catch (FileNotFoundException ignored) { } - if (fs.exists(oldPath)) { + try { renameOrFail(oldPath, new Path(oldPath.toString() + "_ORPHANED_" + System.currentTimeMillis())); + } catch (FileNotFoundException ignored) { } // put all of the updates into the keystore for(Map.Entry entry: cache.entrySet()) { @@ -601,9 +600,7 @@ public class JavaKeyStoreProvider extends KeyProvider { // Rename _NEW to CURRENT renameOrFail(newPath, path); // Delete _OLD - if (fs.exists(oldPath)) { - fs.delete(oldPath, true); - } + fs.delete(oldPath, true); } protected void writeToNew(Path newPath) throws IOException { @@ -623,12 +620,12 @@ public class JavaKeyStoreProvider extends KeyProvider { protected boolean backupToOld(Path oldPath) throws IOException { - boolean fileExisted = false; - if (fs.exists(path)) { + try { renameOrFail(path, oldPath); - fileExisted = true; + return true; + } catch (FileNotFoundException e) { + return false; } - return fileExisted; } private void revertFromOld(Path oldPath, boolean fileExisted) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java index ea6249e47fd..bb70cbd26e6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java @@ -22,6 +22,7 @@ import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.File; import java.io.FileInputStream; +import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; @@ -326,14 +327,15 @@ public class FileUtil { return copy(srcFS, srcs[0], dstFS, dst, deleteSource, overwrite, conf); // Check if dest is directory - if (!dstFS.exists(dst)) { - throw new IOException("`" + dst +"': specified destination directory " + - "does not exist"); - } else { + try { FileStatus sdst = dstFS.getFileStatus(dst); if (!sdst.isDirectory()) throw new IOException("copying multiple files, but last argument `" + dst + "' is not a directory"); + } catch (FileNotFoundException e) { + throw new IOException( + "`" + dst + "': specified destination directory " + + "does not exist", e); } for (Path src : srcs) { @@ -481,8 +483,13 @@ public class FileUtil { private static Path checkDest(String srcName, FileSystem dstFS, Path dst, boolean overwrite) throws IOException { - if (dstFS.exists(dst)) { - FileStatus sdst = dstFS.getFileStatus(dst); + FileStatus sdst; + try { + sdst = dstFS.getFileStatus(dst); + } catch (FileNotFoundException e) { + sdst = null; + } + if (null != sdst) { if (sdst.isDirectory()) { if (null == srcName) { throw new IOException("Target " + dst + " is a directory"); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java index 5e6cb059627..7bf429eed48 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java @@ -208,9 +208,7 @@ public class RawLocalFileSystem extends FileSystem { @Override public FSDataInputStream open(Path f, int bufferSize) throws IOException { - if (!exists(f)) { - throw new FileNotFoundException(f.toString()); - } + getFileStatus(f); return new FSDataInputStream(new BufferedFSInputStream( new LocalFSFileInputStream(f), bufferSize)); } @@ -278,9 +276,6 @@ public class RawLocalFileSystem extends FileSystem { @Override public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException { - if (!exists(f)) { - throw new FileNotFoundException("File " + f + " not found"); - } FileStatus status = getFileStatus(f); if (status.isDirectory()) { throw new IOException("Cannot append to a diretory (=" + f + " )"); @@ -387,17 +382,18 @@ public class RawLocalFileSystem extends FileSystem { // platforms (notably Windows) do not provide this behavior, so the Java API // call renameTo(dstFile) fails. Delete destination and attempt rename // again. - if (this.exists(dst)) { + try { FileStatus sdst = this.getFileStatus(dst); if (sdst.isDirectory() && dstFile.list().length == 0) { if (LOG.isDebugEnabled()) { LOG.debug("Deleting empty destination and renaming " + src + " to " + - dst); + dst); } if (this.delete(dst, false) && srcFile.renameTo(dstFile)) { return true; } } + } catch (FileNotFoundException ignored) { } return false; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java index 4f4c937b440..c65e16ae5fb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java @@ -121,9 +121,8 @@ public class TrashPolicyDefault extends TrashPolicy { if (!path.isAbsolute()) // make path absolute path = new Path(fs.getWorkingDirectory(), path); - if (!fs.exists(path)) // check that path exists - throw new FileNotFoundException(path.toString()); - + // check that path exists + fs.getFileStatus(path); String qpath = fs.makeQualified(path).toString(); Path trashRoot = fs.getTrashRoot(path); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java index d98de565874..eed910c9a35 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java @@ -41,6 +41,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; @@ -328,9 +329,7 @@ public class GenericOptionsParser { // check if the local file exists FileSystem localFs = FileSystem.getLocal(conf); Path p = localFs.makeQualified(new Path(fileName)); - if (!localFs.exists(p)) { - throw new FileNotFoundException("File "+fileName+" does not exist."); - } + localFs.getFileStatus(p); if(LOG.isDebugEnabled()) { LOG.debug("setting conf tokensFile: " + fileName); } @@ -437,9 +436,7 @@ public class GenericOptionsParser { if (pathURI.getScheme() == null) { //default to the local file system //check if the file exists or not first - if (!localFs.exists(path)) { - throw new FileNotFoundException("File " + tmp + " does not exist."); - } + localFs.getFileStatus(path); if (isWildcard) { expandWildcard(finalPaths, path, localFs); } else { @@ -452,9 +449,8 @@ public class GenericOptionsParser { // these files to the file system ResourceManager is running // on. FileSystem fs = path.getFileSystem(conf); - if (!fs.exists(path)) { - throw new FileNotFoundException("File " + tmp + " does not exist."); - } + // existence check + fs.getFileStatus(path); if (isWildcard) { expandWildcard(finalPaths, path, fs); } else { @@ -476,7 +472,8 @@ public class GenericOptionsParser { private void expandWildcard(List finalPaths, Path path, FileSystem fs) throws IOException { - if (!fs.isDirectory(path)) { + FileStatus status = fs.getFileStatus(path); + if (!status.isDirectory()) { throw new FileNotFoundException(path + " is not a directory."); } // get all the jars in the directory diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 18a29e8b09f..4f97896a199 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -2530,8 +2530,9 @@ public class DistributedFileSystem extends FileSystem { } else { Path userTrash = new Path(ezTrashRoot, System.getProperty( "user.name")); - if (exists(userTrash)) { + try { ret.add(getFileStatus(userTrash)); + } catch (FileNotFoundException ignored) { } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java index b12fe01b377..550806441a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java @@ -29,6 +29,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockStoragePolicySpi; import org.apache.hadoop.fs.CacheFlag; +import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -512,10 +513,10 @@ public class HdfsAdmin { Path trashPath = new Path(ez.getPath(), FileSystem.TRASH_PREFIX); - if (dfs.exists(trashPath)) { + try { + FileStatus trashFileStatus = dfs.getFileStatus(trashPath); String errMessage = "Will not provision new trash directory for " + "encryption zone " + ez.getPath() + ". Path already exists."; - FileStatus trashFileStatus = dfs.getFileStatus(trashPath); if (!trashFileStatus.isDirectory()) { errMessage += "\r\n" + "Warning: " + trashPath.toString() + " is not a directory"; @@ -525,7 +526,9 @@ public class HdfsAdmin { "Warning: the permission of " + trashPath.toString() + " is not " + TRASH_PERMISSION; } - throw new IOException(errMessage); + throw new FileAlreadyExistsException(errMessage); + } catch (FileNotFoundException ignored) { + // no trash path } // Update the permission bits diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java index 04033564a87..0cc605c528b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java @@ -1622,9 +1622,7 @@ public class JobHistoryEventHandler extends AbstractService if (stagingDirFS.exists(fromPath)) { LOG.info("Copying " + fromPath.toString() + " to " + toPath.toString()); // TODO temporarily removing the existing dst - if (doneDirFS.exists(toPath)) { - doneDirFS.delete(toPath, true); - } + doneDirFS.delete(toPath, true); boolean copied = FileUtil.copy(stagingDirFS, fromPath, doneDirFS, toPath, false, getConfig()); @@ -1637,10 +1635,6 @@ public class JobHistoryEventHandler extends AbstractService } } - boolean pathExists(FileSystem fileSys, Path path) throws IOException { - return fileSys.exists(path); - } - private String getTempFileName(String srcFile) { return srcFile + "_tmp"; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java index 15dbc13fb9c..4c48ff48c5a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java @@ -435,9 +435,11 @@ class JobResourceUploader { LOG.debug("default FileSystem: " + jtFs.getUri()); FsPermission mapredSysPerms = new FsPermission(JobSubmissionFiles.JOB_DIR_PERMISSION); - if (!jtFs.exists(submitJobDir)) { + try { + jtFs.getFileStatus(submitJobDir); + } catch (FileNotFoundException e) { throw new IOException("Cannot find job submission directory! " - + "It should just be created, so something wrong here."); + + "It should just be created, so something wrong here.", e); } Path fileDir = JobSubmissionFiles.getJobLog4jFile(submitJobDir); @@ -488,9 +490,7 @@ class JobResourceUploader { if (pathURI.getScheme() == null) { // default to the local file system // check if the file exists or not first - if (!localFs.exists(path)) { - throw new FileNotFoundException("File " + file + " does not exist."); - } + localFs.getFileStatus(path); finalPath = path.makeQualified(localFs.getUri(), localFs.getWorkingDirectory()) .toString(); @@ -500,9 +500,7 @@ class JobResourceUploader { // these files to the file system ResourceManager is running // on. FileSystem fs = path.getFileSystem(conf); - if (!fs.exists(path)) { - throw new FileNotFoundException("File " + file + " does not exist."); - } + fs.getFileStatus(path); finalPath = path.makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java index 9dd45c3af62..ae914c3e534 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java @@ -18,6 +18,7 @@ package org.apache.hadoop.mapreduce; +import java.io.FileNotFoundException; import java.io.IOException; import com.google.common.annotations.VisibleForTesting; @@ -130,7 +131,7 @@ public class JobSubmissionFiles { Path stagingArea = cluster.getStagingAreaDir(); FileSystem fs = stagingArea.getFileSystem(conf); UserGroupInformation currentUser = realUser.getCurrentUser(); - if (fs.exists(stagingArea)) { + try { FileStatus fsStatus = fs.getFileStatus(stagingArea); String fileOwner = fsStatus.getOwner(); if (!(fileOwner.equals(currentUser.getShortUserName()) || fileOwner @@ -156,7 +157,7 @@ public class JobSubmissionFiles { "to correct value " + JOB_DIR_PERMISSION); fs.setPermission(stagingArea, JOB_DIR_PERMISSION); } - } else { + } catch (FileNotFoundException e) { fs.mkdirs(stagingArea, new FsPermission(JOB_DIR_PERMISSION)); } return stagingArea; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java index d6610c4be10..9e750be2feb 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java @@ -674,10 +674,9 @@ public class FileOutputCommitter extends OutputCommitter { if (algorithmVersion == 1) { if (fs.exists(previousCommittedTaskPath)) { Path committedTaskPath = getCommittedTaskPath(context); - if (fs.exists(committedTaskPath)) { - if (!fs.delete(committedTaskPath, true)) { - throw new IOException("Could not delete "+committedTaskPath); - } + if (!fs.delete(committedTaskPath, true) && + fs.exists(committedTaskPath)) { + throw new IOException("Could not delete " + committedTaskPath); } //Rename can fail if the parent directory does not yet exist. Path committedParent = committedTaskPath.getParent(); @@ -693,11 +692,12 @@ public class FileOutputCommitter extends OutputCommitter { // essentially a no-op, but for backwards compatibility // after upgrade to the new fileOutputCommitter, // check if there are any output left in committedTaskPath - if (fs.exists(previousCommittedTaskPath)) { + try { + FileStatus from = fs.getFileStatus(previousCommittedTaskPath); LOG.info("Recovering task for upgrading scenario, moving files from " + previousCommittedTaskPath + " to " + outputPath); - FileStatus from = fs.getFileStatus(previousCommittedTaskPath); mergePaths(fs, from, outputPath); + } catch (FileNotFoundException ignored) { } LOG.info("Done recovering task " + attemptId); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PartialFileOutputCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PartialFileOutputCommitter.java index 1d15370ea42..238a2eacd4f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PartialFileOutputCommitter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PartialFileOutputCommitter.java @@ -97,7 +97,7 @@ public class PartialFileOutputCommitter for (int i = 0; i < taid.getId(); ++i) { TaskAttemptID oldId = new TaskAttemptID(tid, i); Path pTask = new Path(pCommit, oldId.toString()); - if (fs.exists(pTask) && !fs.delete(pTask, true)) { + if (!fs.delete(pTask, true) && fs.exists(pTask)) { throw new IOException("Failed to delete " + pTask); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java index cce9f378382..df4e9195ec2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java @@ -322,10 +322,8 @@ public class InputSampler extends Configured implements Tool { Arrays.sort(samples, comparator); Path dst = new Path(TotalOrderPartitioner.getPartitionFile(conf)); FileSystem fs = dst.getFileSystem(conf); - if (fs.exists(dst)) { - fs.delete(dst, false); - } - SequenceFile.Writer writer = SequenceFile.createWriter(fs, + fs.delete(dst, false); + SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, dst, job.getMapOutputKeyClass(), NullWritable.class); NullWritable nullValue = NullWritable.get(); float stepSize = samples.length / (float) numPartitions; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestPreemptableFileOutputCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestPreemptableFileOutputCommitter.java index 09ac286ef2f..e989bf4c38c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestPreemptableFileOutputCommitter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestPreemptableFileOutputCommitter.java @@ -80,7 +80,7 @@ public class TestPreemptableFileOutputCommitter { foc.cleanUpPartialOutputForTask(context); verify(fs).delete(eq(p0), eq(true)); verify(fs).delete(eq(p1), eq(true)); - verify(fs, never()).delete(eq(p3), eq(true)); + verify(fs, times(1)).delete(eq(p3), eq(true)); verify(fs, never()).delete(eq(p2), eq(true)); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerFileSystemStateStoreService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerFileSystemStateStoreService.java index 9902f5ea8f7..47d6583338b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerFileSystemStateStoreService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerFileSystemStateStoreService.java @@ -182,7 +182,7 @@ public class HistoryServerFileSystemStateStoreService Path keyPath = new Path(tokenKeysStatePath, TOKEN_MASTER_KEY_FILE_PREFIX + key.getKeyId()); if (fs.exists(keyPath)) { - throw new IOException(keyPath + " already exists"); + throw new FileAlreadyExistsException(keyPath + " already exists"); } ByteArrayOutputStream memStream = new ByteArrayOutputStream(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java index 73c446d7e7e..96580b1e75a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java @@ -18,6 +18,7 @@ package org.apache.hadoop.examples.terasort; +import java.io.FileNotFoundException; import java.io.IOException; import org.apache.commons.logging.Log; @@ -115,7 +116,7 @@ public class TeraOutputFormat extends FileOutputFormat { final FileSystem fs = outDir.getFileSystem(jobConf); - if (fs.exists(outDir)) { + try { // existing output dir is considered empty iff its only content is the // partition file. // @@ -131,6 +132,7 @@ public class TeraOutputFormat extends FileOutputFormat { throw new FileAlreadyExistsException("Output directory " + outDir + " already exists"); } + } catch (FileNotFoundException ignored) { } } diff --git a/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogs.java b/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogs.java index 2e440701367..4a7aad99f82 100644 --- a/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogs.java +++ b/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogs.java @@ -178,9 +178,7 @@ public class HadoopArchiveLogs implements Tool { } finally { if (fs != null) { // Cleanup working directory - if (fs.exists(workingDir)) { - fs.delete(workingDir, true); - } + fs.delete(workingDir, true); fs.close(); } } diff --git a/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java index ee148503f10..c2097dc673e 100644 --- a/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java +++ b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java @@ -20,7 +20,6 @@ package org.apache.hadoop.tools; import java.io.DataInput; import java.io.DataOutput; -import java.io.FileNotFoundException; import java.io.IOException; import java.io.UnsupportedEncodingException; import java.net.URLEncoder; @@ -149,9 +148,7 @@ public class HadoopArchives implements Tool { IOException { for (Path p : paths) { FileSystem fs = p.getFileSystem(conf); - if (!fs.exists(p)) { - throw new FileNotFoundException("Source " + p + " does not exist."); - } + fs.getFileStatus(p); } } @@ -619,9 +616,7 @@ public class HadoopArchives implements Tool { try { destFs = tmpOutput.getFileSystem(conf); //this was a stale copy - if (destFs.exists(tmpOutput)) { - destFs.delete(tmpOutput, false); - } + destFs.delete(tmpOutput, false); partStream = destFs.create(tmpOutput, false, conf.getInt("io.file.buffer.size", 4096), destFs.getDefaultReplication(tmpOutput), blockSize); } catch(IOException ie) { @@ -747,12 +742,8 @@ public class HadoopArchives implements Tool { replication = conf.getInt(HAR_REPLICATION_LABEL, 3); try { fs = masterIndex.getFileSystem(conf); - if (fs.exists(masterIndex)) { - fs.delete(masterIndex, false); - } - if (fs.exists(index)) { - fs.delete(index, false); - } + fs.delete(masterIndex, false); + fs.delete(index, false); indexStream = fs.create(index); outStream = fs.create(masterIndex); String version = VERSION + " \n"; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbFsck.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbFsck.java index d3115505b57..f512489a8ae 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbFsck.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbFsck.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.azure; +import java.io.FileNotFoundException; import java.io.IOException; import java.util.Arrays; import java.util.List; @@ -139,12 +140,15 @@ public class WasbFsck extends Configured implements Tool { if (p == null) { return true; } - if (!fs.exists(p)) { + FileStatus status; + try { + status = fs.getFileStatus(p); + } catch (FileNotFoundException e) { System.out.println("Path " + p + " does not exist!"); return true; } - if (fs.isFile(p)) { + if (status.isFile()) { if (containsColon(p)) { System.out.println("Warning: file " + p + " has a colon in its name."); return false; diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java index f1fae11bd22..bcae96a8d83 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java @@ -328,9 +328,7 @@ class DistCpSync { Arrays.sort(diffs, DiffInfo.targetComparator); for (DiffInfo diff : diffs) { if (diff.getTarget() != null) { - if (!targetFs.exists(diff.getTarget().getParent())) { - targetFs.mkdirs(diff.getTarget().getParent()); - } + targetFs.mkdirs(diff.getTarget().getParent()); targetFs.rename(diff.getTmp(), diff.getTarget()); } } diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java index 0002d4f56b0..105e4f2fe17 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java @@ -126,7 +126,13 @@ public class SimpleCopyListing extends CopyListing { Path targetPath = options.getTargetPath(); FileSystem targetFS = targetPath.getFileSystem(getConf()); - boolean targetIsFile = targetFS.isFile(targetPath); + boolean targetExists = false; + boolean targetIsFile = false; + try { + targetIsFile = targetFS.getFileStatus(targetPath).isFile(); + targetExists = true; + } catch (FileNotFoundException ignored) { + } targetPath = targetFS.makeQualified(targetPath); final boolean targetIsReservedRaw = Path.getPathWithoutSchemeAndAuthority(targetPath).toString(). @@ -147,7 +153,7 @@ public class SimpleCopyListing extends CopyListing { } } - if (options.shouldAtomicCommit() && targetFS.exists(targetPath)) { + if (options.shouldAtomicCommit() && targetExists) { throw new InvalidInputException("Target path for atomic-commit already exists: " + targetPath + ". Cannot atomic-commit to pre-existing target-path."); } @@ -448,7 +454,7 @@ public class SimpleCopyListing extends CopyListing { && !sourceStatus.isDirectory(); if (solitaryFile) { - if (targetFS.isFile(target) || !targetPathExists) { + if (!targetPathExists || targetFS.isFile(target)) { return sourceStatus.getPath(); } else { return sourceStatus.getPath().getParent(); @@ -495,9 +501,7 @@ public class SimpleCopyListing extends CopyListing { private SequenceFile.Writer getWriter(Path pathToListFile) throws IOException { FileSystem fs = pathToListFile.getFileSystem(getConf()); - if (fs.exists(pathToListFile)) { - fs.delete(pathToListFile, false); - } + fs.delete(pathToListFile, false); return SequenceFile.createWriter(getConf(), SequenceFile.Writer.file(pathToListFile), SequenceFile.Writer.keyClass(Text.class), diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java index dd653b297df..75cefb488ae 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java @@ -279,8 +279,8 @@ public class CopyCommitter extends FileOutputCommitter { if (srcAvailable && trgtRelPath.equals(srcRelPath)) continue; // Target doesn't exist at source. Delete. - boolean result = (!targetFS.exists(trgtFileStatus.getPath()) || - targetFS.delete(trgtFileStatus.getPath(), true)); + boolean result = targetFS.delete(trgtFileStatus.getPath(), true) + || !targetFS.exists(trgtFileStatus.getPath()); if (result) { LOG.info("Deleted " + trgtFileStatus.getPath() + " - Missing at source"); deletedEntries++; diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java index c6f6052d3a6..e1873f17e41 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java @@ -113,8 +113,9 @@ public class CopyMapper extends Mapper DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH)); targetFS = targetFinalPath.getFileSystem(conf); - if (targetFS.exists(targetFinalPath) && targetFS.isFile(targetFinalPath)) { - overWrite = true; // When target is an existing file, overwrite it. + try { + overWrite = overWrite || targetFS.getFileStatus(targetFinalPath).isFile(); + } catch (FileNotFoundException ignored) { } startEpoch = System.currentTimeMillis(); diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java index 4ad530d02c3..82b3b621177 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java @@ -140,7 +140,7 @@ public class RetriableFileCopyCommand extends RetriableCommand { // note that for append case, it is possible that we append partial data // and then fail. In that case, for the next retry, we either reuse the // partial appended data if it is good or we overwrite the whole file - if (!toAppend && targetFS.exists(targetPath)) { + if (!toAppend) { targetFS.delete(targetPath, false); } } diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java index 1784c5de512..c308e6f1f90 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java @@ -356,9 +356,7 @@ public class DistCpUtils { CopyListingFileStatus.class, conf); Path output = new Path(sourceListing.toString() + "_sorted"); - if (fs.exists(output)) { - fs.delete(output, false); - } + fs.delete(output, false); sorter.sort(sourceListing, output); return output; diff --git a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistTool.java b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistTool.java index 2c89cb084d5..cdd7caceaa5 100644 --- a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistTool.java +++ b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistTool.java @@ -20,7 +20,6 @@ package org.apache.hadoop.tools; import java.io.BufferedReader; import java.io.DataInput; import java.io.DataOutput; -import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStreamReader; import java.nio.charset.Charset; @@ -68,11 +67,10 @@ abstract class DistTool implements org.apache.hadoop.util.Tool { List ioes = new ArrayList(); for(Path p : srcs) { try { - if (!p.getFileSystem(conf).exists(p)) { - ioes.add(new FileNotFoundException("Source "+p+" does not exist.")); - } + p.getFileSystem(conf).getFileStatus(p); + } catch(IOException e) { + ioes.add(e); } - catch(IOException e) {ioes.add(e);} } if (!ioes.isEmpty()) { throw new InvalidInputException(ioes); @@ -113,4 +111,4 @@ abstract class DistTool implements org.apache.hadoop.util.Tool { public static final int ERROR_CODE = -2; DuplicationException(String message) {super(message);} } -} \ No newline at end of file +} diff --git a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java index b4dc5eb5a23..f91ba3013ce 100644 --- a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java +++ b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java @@ -489,10 +489,13 @@ public class SwiftTestUtils extends org.junit.Assert { */ public static void assertPathExists(FileSystem fileSystem, String message, Path path) throws IOException { - if (!fileSystem.exists(path)) { + try { + fileSystem.getFileStatus(path); + } catch (FileNotFoundException e) { //failure, report it - fail(message + ": not found " + path + " in " + path.getParent()); - ls(fileSystem, path.getParent()); + throw (IOException)new FileNotFoundException(message + ": not found " + + path + " in " + path.getParent() + ": " + e + " -- " + + ls(fileSystem, path.getParent())).initCause(e); } } diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/state/StatePool.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/state/StatePool.java index 6e6c8591e6c..41eb90d3d46 100644 --- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/state/StatePool.java +++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/state/StatePool.java @@ -21,6 +21,7 @@ import java.io.DataInput; import java.io.DataInputStream; import java.io.DataOutput; import java.io.DataOutputStream; +import java.io.FileNotFoundException; import java.io.IOException; import java.text.DateFormat; import java.text.SimpleDateFormat; @@ -184,27 +185,23 @@ public class StatePool { if (reload) { // Reload persisted entries Path stateFilename = new Path(persistDirPath, COMMIT_STATE_FILENAME); - FileSystem fs = stateFilename.getFileSystem(conf); - if (fs.exists(stateFilename)) { - reloadState(stateFilename, conf); - } else { - throw new RuntimeException("No latest state persist directory found!" + if (!reloadState(stateFilename, conf)) { + throw new RuntimeException("No latest state persist directory found!" + " Disable persistence and run."); } } } - private void reloadState(Path stateFile, Configuration conf) - throws Exception { - FileSystem fs = stateFile.getFileSystem(conf); - if (fs.exists(stateFile)) { + private boolean reloadState(Path stateFile, Configuration configuration) + throws Exception { + FileSystem fs = stateFile.getFileSystem(configuration); + try (FSDataInputStream in = fs.open(stateFile)) { System.out.println("Reading state from " + stateFile.toString()); - FSDataInputStream in = fs.open(stateFile); - read(in); - in.close(); - } else { + return true; + } catch (FileNotFoundException e) { System.out.println("No state information found for " + stateFile); + return false; } } @@ -334,4 +331,4 @@ public class StatePool { //TODO Should we do a clone? this.pool = states; } -} \ No newline at end of file +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java index bf50cadd3d1..cef03b9b052 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java @@ -74,9 +74,7 @@ public class FileSystemBasedConfigurationProvider new Path(bootstrapConf.get(YarnConfiguration.FS_BASED_RM_CONF_STORE, YarnConfiguration.DEFAULT_FS_BASED_RM_CONF_STORE)); fs = configDir.getFileSystem(bootstrapConf); - if (!fs.exists(configDir)) { - fs.mkdirs(configDir); - } + fs.mkdirs(configDir); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java index b1284e146cb..3070cc64afc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java @@ -19,7 +19,6 @@ package org.apache.hadoop.yarn.client.api.impl; import java.io.Closeable; -import java.io.FileNotFoundException; import java.io.Flushable; import java.io.IOException; import java.net.URI; @@ -114,10 +113,8 @@ public class FileSystemTimelineWriter extends TimelineWriter{ .TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_ACTIVE_DIR_DEFAULT)); fs = FileSystem.newInstance(activePath.toUri(), fsConf); - if (!fs.exists(activePath)) { - throw new FileNotFoundException(activePath + " does not exist"); - } - + // raise FileNotFoundException if the path is not found + fs.getFileStatus(activePath); summaryEntityTypes = new HashSet( conf.getStringCollection(YarnConfiguration .TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_SUMMARY_ENTITY_TYPES)); @@ -985,9 +982,8 @@ public class FileSystemTimelineWriter extends TimelineWriter{ Path appDir = createApplicationDir(appAttemptId.getApplicationId()); Path attemptDir = new Path(appDir, appAttemptId.toString()); - if (!fs.exists(attemptDir)) { - FileSystem.mkdirs(fs, attemptDir, new FsPermission( - APP_LOG_DIR_PERMISSIONS)); + if (FileSystem.mkdirs(fs, attemptDir, + new FsPermission(APP_LOG_DIR_PERMISSIONS))) { if (LOG.isDebugEnabled()) { LOG.debug("New attempt directory created - " + attemptDir); } @@ -998,9 +994,8 @@ public class FileSystemTimelineWriter extends TimelineWriter{ private Path createApplicationDir(ApplicationId appId) throws IOException { Path appDir = new Path(activePath, appId.toString()); - if (!fs.exists(appDir)) { - FileSystem.mkdirs(fs, appDir, - new FsPermission(APP_LOG_DIR_PERMISSIONS)); + if (FileSystem.mkdirs(fs, appDir, + new FsPermission(APP_LOG_DIR_PERMISSIONS))) { if (LOG.isDebugEnabled()) { LOG.debug("New app directory created - " + appDir); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java index 9a2b8bec3b8..0ec4ea42f73 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.nodelabels; import java.io.EOFException; +import java.io.FileNotFoundException; import java.io.IOException; import java.util.Collection; import java.util.List; @@ -83,9 +84,7 @@ public class FileSystemNodeLabelsStore extends NodeLabelsStore { setFileSystem(conf); // mkdir of root dir path - if (!fs.exists(fsWorkingPath)) { - fs.mkdirs(fsWorkingPath); - } + fs.mkdirs(fsWorkingPath); } @Override @@ -155,12 +154,15 @@ public class FileSystemNodeLabelsStore extends NodeLabelsStore { throws IOException { // If mirror.new exists, read from mirror.new, FSDataInputStream is = null; - if (fs.exists(newMirrorPath)) { + try { is = fs.open(newMirrorPath); - } else if (fs.exists(oldMirrorPath)) { - is = fs.open(oldMirrorPath); - } + } catch (FileNotFoundException e) { + try { + is = fs.open(oldMirrorPath); + } catch (FileNotFoundException ignored) { + } + } if (null != is) { List labels = new AddToClusterNodeLabelsRequestPBImpl( AddToClusterNodeLabelsRequestProto.parseDelimitedFrom(is)) @@ -204,8 +206,13 @@ public class FileSystemNodeLabelsStore extends NodeLabelsStore { // Open and process editlog editLogPath = new Path(fsWorkingPath, EDITLOG_FILENAME); - if (fs.exists(editLogPath)) { - FSDataInputStream is = fs.open(editLogPath); + FSDataInputStream is; + try { + is = fs.open(editLogPath); + } catch (FileNotFoundException e) { + is = null; + } + if (null != is) { while (true) { try { @@ -250,6 +257,7 @@ public class FileSystemNodeLabelsStore extends NodeLabelsStore { break; } } + is.close(); } // Serialize current mirror to mirror.writing diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NonAppendableFSNodeLabelStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NonAppendableFSNodeLabelStore.java index 6be571589d4..989f0279403 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NonAppendableFSNodeLabelStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NonAppendableFSNodeLabelStore.java @@ -116,16 +116,12 @@ public class NonAppendableFSNodeLabelStore extends FileSystemNodeLabelsStore { // Rename mirror.new.tmp to mirror.new (will remove .new if it's existed) Path newPath = new Path(fsWorkingPath, MIRROR_FILENAME + ".new"); - if (fs.exists(newPath)) { - fs.delete(newPath, false); - } + fs.delete(newPath, false); fs.rename(newTmpPath, newPath); // Remove existing mirror and rename mirror.new to mirror Path mirrorPath = new Path(fsWorkingPath, MIRROR_FILENAME); - if (fs.exists(mirrorPath)) { - fs.delete(mirrorPath, false); - } + fs.delete(mirrorPath, false); fs.rename(newPath, mirrorPath); } finally { readLock.unlock(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java index 82e4e117854..ed2f4aa6c74 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java @@ -348,10 +348,10 @@ public class TestFileSystemNodeLabelsStore extends NodeLabelTestBase { }; mockStore.setNodeLabelsManager(mgr); mockStore.fs = mockFs; - verifyMkdirsCount(mockStore, true, 0); - verifyMkdirsCount(mockStore, false, 1); verifyMkdirsCount(mockStore, true, 1); verifyMkdirsCount(mockStore, false, 2); + verifyMkdirsCount(mockStore, true, 3); + verifyMkdirsCount(mockStore, false, 4); } private void verifyMkdirsCount(FileSystemNodeLabelsStore store, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java index bb52b5566e9..be7bc6df0eb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java @@ -22,6 +22,7 @@ import java.io.DataInput; import java.io.DataInputStream; import java.io.DataOutput; import java.io.DataOutputStream; +import java.io.FileNotFoundException; import java.io.IOException; import java.util.HashMap; import java.util.Map; @@ -123,12 +124,7 @@ public class FileSystemApplicationHistoryStore extends AbstractService rootDirPath = new Path(fsWorkingPath, ROOT_DIR_NAME); try { fs = getFileSystem(fsWorkingPath, conf); - - if (!fs.isDirectory(rootDirPath)) { - fs.mkdirs(rootDirPath); - fs.setPermission(rootDirPath, ROOT_DIR_UMASK); - } - + fs.mkdirs(rootDirPath, ROOT_DIR_UMASK); } catch (IOException e) { LOG.error("Error when initializing FileSystemHistoryStorage", e); throw e; @@ -659,9 +655,11 @@ public class FileSystemApplicationHistoryStore extends AbstractService private HistoryFileReader getHistoryFileReader(ApplicationId appId) throws IOException { Path applicationHistoryFile = new Path(rootDirPath, appId.toString()); - if (!fs.exists(applicationHistoryFile)) { - throw new IOException("History file for application " + appId - + " is not found"); + try { + fs.getFileStatus(applicationHistoryFile); + } catch (FileNotFoundException e) { + throw (FileNotFoundException) new FileNotFoundException("History file for" + + " application " + appId + " is not found: " + e).initCause(e); } // The history file is still under writing if (outstandingWriters.containsKey(appId)) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java index c91d9f5a6d5..bd6bea39984 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java @@ -27,6 +27,7 @@ import org.junit.Assert; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -279,8 +280,8 @@ public class TestFileSystemApplicationHistoryStore extends } // Make sure that directory creation was not attempted - verify(fs, times(1)).isDirectory(any(Path.class)); - verify(fs, times(0)).mkdirs(any(Path.class)); + verify(fs, never()).isDirectory(any(Path.class)); + verify(fs, times(1)).mkdirs(any(Path.class)); } @Test @@ -301,7 +302,7 @@ public class TestFileSystemApplicationHistoryStore extends } // Make sure that directory creation was attempted - verify(fs, times(1)).isDirectory(any(Path.class)); + verify(fs, never()).isDirectory(any(Path.class)); verify(fs, times(1)).mkdirs(any(Path.class)); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploader.java index b034e7a209f..e077f8994be 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploader.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploader.java @@ -192,10 +192,12 @@ class SharedCacheUploader implements Callable { private void deleteTempFile(Path tempPath) { try { - if (tempPath != null && fs.exists(tempPath)) { + if (tempPath != null) { fs.delete(tempPath, false); } - } catch (IOException ignore) {} + } catch (IOException ioe) { + LOG.debug("Exception received while deleting temp files", ioe); + } } /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/store/InMemorySCMStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/store/InMemorySCMStore.java index 54d736f5372..7b769a72e04 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/store/InMemorySCMStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/store/InMemorySCMStore.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.sharedcachemanager.store; +import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; @@ -189,11 +190,14 @@ public class InMemorySCMStore extends SCMStore { conf.get(YarnConfiguration.SHARED_CACHE_ROOT, YarnConfiguration.DEFAULT_SHARED_CACHE_ROOT); Path root = new Path(location); - if (!fs.exists(root)) { + try { + fs.getFileStatus(root); + } catch (FileNotFoundException e) { String message = "The shared cache root directory " + location + " was not found"; LOG.error(message); - throw new IOException(message); + throw (IOException)new FileNotFoundException(message) + .initCause(e); } int nestedLevel = SharedCacheUtil.getCacheDepth(conf);