From 1a895a3fd6c01dc7366e5e1bc9758a1dfab07af6 Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Fri, 24 May 2013 22:29:49 +0000 Subject: [PATCH] HADOOP-8562. Merge r1477427 for HDFS-4610, r1477408 for YARN-506, r1477403 for MAPREDUCE-5177 git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1486241 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hadoop/hdfs/server/common/Storage.java | 2 +- ...ImagePreTransactionalStorageInspector.java | 3 ++- .../hdfs/server/namenode/NNStorage.java | 9 ++++--- .../hdfs/server/namenode/TransferFsImage.java | 3 ++- .../apache/hadoop/hdfs/MiniDFSCluster.java | 6 ++--- .../datanode/TestDataNodeVolumeFailure.java | 5 ++-- .../TestDataNodeVolumeFailureReporting.java | 25 ++++++++++--------- .../TestDataNodeVolumeFailureToleration.java | 8 +++--- .../hdfs/server/datanode/TestDiskError.java | 5 ++-- .../hdfs/server/namenode/TestCheckpoint.java | 16 ++++++------ .../hdfs/server/namenode/TestEditLog.java | 4 +-- .../TestNNStorageRetentionFunctional.java | 7 +++--- hadoop-mapreduce-project/CHANGES.txt | 3 +++ .../hadoop/mapred/pipes/Application.java | 2 +- .../apache/hadoop/streaming/PathFinder.java | 3 ++- .../apache/hadoop/streaming/StreamJob.java | 3 ++- hadoop-yarn-project/CHANGES.txt | 3 +++ .../nodemanager/NodeHealthScriptRunner.java | 3 ++- .../util/CgroupsLCEResourcesHandler.java | 3 ++- .../TestLinuxContainerExecutor.java | 5 ++-- .../TestLinuxContainerExecutorWithMocks.java | 13 +++++----- .../nodemanager/TestNodeHealthService.java | 3 ++- .../launcher/TestContainerLaunch.java | 7 +++--- 24 files changed, 83 insertions(+), 61 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 763ea37af13..ef27cedd46c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -403,6 +403,9 @@ Release 2.0.5-beta - UNRELEASED HDFS-4734. HDFS Tests that use ShellCommandFencer are broken on Windows. (Arpit Agarwal via suresh) + HDFS-4610. Use common utils FileUtil#setReadable/Writable/Executable and + FileUtil#canRead/Write/Execute. (Ivan Mitic via suresh) + Release 2.0.4-alpha - 2013-04-25 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java index 00551c071f2..0889f66ab08 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java @@ -448,7 +448,7 @@ public abstract class Storage extends StorageInfo { LOG.warn(rootPath + "is not a directory"); return StorageState.NON_EXISTENT; } - if (!root.canWrite()) { + if (!FileUtil.canWrite(root)) { LOG.warn("Cannot access storage directory " + rootPath); return StorageState.NON_EXISTENT; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java index 1a637cc32bc..d0554f62048 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java @@ -33,6 +33,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; @@ -128,7 +129,7 @@ class FSImagePreTransactionalStorageInspector extends FSImageStorageInspector { static long readCheckpointTime(StorageDirectory sd) throws IOException { File timeFile = NNStorage.getStorageFile(sd, NameNodeFile.TIME); long timeStamp = 0L; - if (timeFile.exists() && timeFile.canRead()) { + if (timeFile.exists() && FileUtil.canRead(timeFile)) { DataInputStream in = new DataInputStream(new FileInputStream(timeFile)); try { timeStamp = in.readLong(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java index 1a35fc9f281..01d505728e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java @@ -34,6 +34,7 @@ import java.util.concurrent.CopyOnWriteArrayList; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; @@ -231,8 +232,8 @@ public class NNStorage extends Storage implements Closeable, File root = sd.getRoot(); LOG.info("currently disabled dir " + root.getAbsolutePath() + "; type="+sd.getStorageDirType() - + ";canwrite="+root.canWrite()); - if(root.exists() && root.canWrite()) { + + ";canwrite="+FileUtil.canWrite(root)); + if(root.exists() && FileUtil.canWrite(root)) { LOG.info("restoring dir " + sd.getRoot().getAbsolutePath()); this.addStorageDir(sd); // restore this.removedStorageDirs.remove(sd); @@ -506,7 +507,7 @@ public class NNStorage extends Storage implements Closeable, dirIterator(NameNodeDirType.IMAGE); it.hasNext();) { sd = it.next(); File fsImage = getStorageFile(sd, NameNodeFile.IMAGE, txid); - if(sd.getRoot().canRead() && fsImage.exists()) + if(FileUtil.canRead(sd.getRoot()) && fsImage.exists()) return fsImage; } return null; @@ -735,7 +736,7 @@ public class NNStorage extends Storage implements Closeable, private File findFile(NameNodeDirType dirType, String name) { for (StorageDirectory sd : dirIterable(dirType)) { File candidate = new File(sd.getCurrentDir(), name); - if (sd.getCurrentDir().canRead() && + if (FileUtil.canRead(sd.getCurrentDir()) && candidate.exists()) { return candidate; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java index eecc64e058e..da442a59e9a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java @@ -33,6 +33,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.Time; @@ -104,7 +105,7 @@ public class TransferFsImage { assert !finalFiles.isEmpty() : "No checkpoint targets."; for (File f : finalFiles) { - if (f.exists() && f.canRead()) { + if (f.exists() && FileUtil.canRead(f)) { LOG.info("Skipping download of remote edit log " + log + " since it already is stored locally at " + f); return; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 198fac16dde..1092d67449f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -679,9 +679,9 @@ public class MiniDFSCluster { sb.append("\tabsolute:").append(path.getAbsolutePath()).append("\n"); sb.append("\tpermissions: "); sb.append(path.isDirectory() ? "d": "-"); - sb.append(path.canRead() ? "r" : "-"); - sb.append(path.canWrite() ? "w" : "-"); - sb.append(path.canExecute() ? "x" : "-"); + sb.append(FileUtil.canRead(path) ? "r" : "-"); + sb.append(FileUtil.canWrite(path) ? "w" : "-"); + sb.append(FileUtil.canExecute(path) ? "x" : "-"); sb.append("\n"); path = path.getParentFile(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java index 3697ae25804..0a69c072399 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java @@ -31,6 +31,7 @@ import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.BlockReader; import org.apache.hadoop.hdfs.BlockReaderFactory; @@ -91,10 +92,10 @@ public class TestDataNodeVolumeFailure { @After public void tearDown() throws Exception { if(data_fail != null) { - data_fail.setWritable(true); + FileUtil.setWritable(data_fail, true); } if(failedDir != null) { - failedDir.setWritable(true); + FileUtil.setWritable(failedDir, true); } if(cluster != null) { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java index e53933539e4..d07bb45075b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java @@ -31,6 +31,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; @@ -88,8 +89,8 @@ public class TestDataNodeVolumeFailureReporting { @After public void tearDown() throws Exception { for (int i = 0; i < 3; i++) { - new File(dataDir, "data"+(2*i+1)).setExecutable(true); - new File(dataDir, "data"+(2*i+2)).setExecutable(true); + FileUtil.setExecutable(new File(dataDir, "data"+(2*i+1)), true); + FileUtil.setExecutable(new File(dataDir, "data"+(2*i+2)), true); } cluster.shutdown(); } @@ -131,8 +132,8 @@ public class TestDataNodeVolumeFailureReporting { * fail. The client does not retry failed nodes even though * perhaps they could succeed because just a single volume failed. */ - assertTrue("Couldn't chmod local vol", dn1Vol1.setExecutable(false)); - assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(false)); + assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, false)); + assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, false)); /* * Create file1 and wait for 3 replicas (ie all DNs can still @@ -168,7 +169,7 @@ public class TestDataNodeVolumeFailureReporting { * Now fail a volume on the third datanode. We should be able to get * three replicas since we've already identified the other failures. */ - assertTrue("Couldn't chmod local vol", dn3Vol1.setExecutable(false)); + assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol1, false)); Path file2 = new Path("/test2"); DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L); DFSTestUtil.waitReplication(fs, file2, (short)3); @@ -200,7 +201,7 @@ public class TestDataNodeVolumeFailureReporting { * and that it's no longer up. Only wait for two replicas since * we'll never get a third. */ - assertTrue("Couldn't chmod local vol", dn3Vol2.setExecutable(false)); + assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol2, false)); Path file3 = new Path("/test3"); DFSTestUtil.createFile(fs, file3, 1024, (short)3, 1L); DFSTestUtil.waitReplication(fs, file3, (short)2); @@ -222,10 +223,10 @@ public class TestDataNodeVolumeFailureReporting { * restart, so file creation should be able to succeed after * restoring the data directories and restarting the datanodes. */ - assertTrue("Couldn't chmod local vol", dn1Vol1.setExecutable(true)); - assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(true)); - assertTrue("Couldn't chmod local vol", dn3Vol1.setExecutable(true)); - assertTrue("Couldn't chmod local vol", dn3Vol2.setExecutable(true)); + assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, true)); + assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, true)); + assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol1, true)); + assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol2, true)); cluster.restartDataNodes(); cluster.waitActive(); Path file4 = new Path("/test4"); @@ -261,8 +262,8 @@ public class TestDataNodeVolumeFailureReporting { // third healthy so one node in the pipeline will not fail). File dn1Vol1 = new File(dataDir, "data"+(2*0+1)); File dn2Vol1 = new File(dataDir, "data"+(2*1+1)); - assertTrue("Couldn't chmod local vol", dn1Vol1.setExecutable(false)); - assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(false)); + assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, false)); + assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, false)); Path file1 = new Path("/test1"); DFSTestUtil.createFile(fs, file1, 1024, (short)2, 1L); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java index ea256853f98..73dc77c3b2f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java @@ -77,8 +77,8 @@ public class TestDataNodeVolumeFailureToleration { @After public void tearDown() throws Exception { for (int i = 0; i < 3; i++) { - new File(dataDir, "data"+(2*i+1)).setExecutable(true); - new File(dataDir, "data"+(2*i+2)).setExecutable(true); + FileUtil.setExecutable(new File(dataDir, "data"+(2*i+1)), true); + FileUtil.setExecutable(new File(dataDir, "data"+(2*i+2)), true); } cluster.shutdown(); } @@ -152,7 +152,7 @@ public class TestDataNodeVolumeFailureToleration { // Fail a volume on the 2nd DN File dn2Vol1 = new File(dataDir, "data"+(2*1+1)); - assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(false)); + assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, false)); // Should only get two replicas (the first DN and the 3rd) Path file1 = new Path("/test1"); @@ -165,7 +165,7 @@ public class TestDataNodeVolumeFailureToleration { // If we restore the volume we should still only be able to get // two replicas since the DN is still considered dead. - assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(true)); + assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, true)); Path file2 = new Path("/test2"); DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L); DFSTestUtil.waitReplication(fs, file2, (short)2); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java index 3c10a7ae12a..ac268a2a4ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java @@ -27,6 +27,7 @@ import java.net.Socket; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -106,8 +107,8 @@ public class TestDiskError { } } finally { // restore its old permission - dir1.setWritable(true); - dir2.setWritable(true); + FileUtil.setWritable(dir1, true); + FileUtil.setWritable(dir2, true); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index e7c3b3cb797..b57f0cea7fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -181,7 +181,7 @@ public class TestCheckpoint { try { // Simulate the mount going read-only - dir.setWritable(false); + FileUtil.setWritable(dir, false); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) .format(false).build(); fail("NN should have failed to start with " + dir + " set unreadable"); @@ -191,7 +191,7 @@ public class TestCheckpoint { } finally { cleanup(cluster); cluster = null; - dir.setWritable(true); + FileUtil.setWritable(dir, true); } } } @@ -1967,7 +1967,7 @@ public class TestCheckpoint { StorageDirectory sd1 = storage.getStorageDir(1); currentDir = sd0.getCurrentDir(); - currentDir.setExecutable(false); + FileUtil.setExecutable(currentDir, false); // Upload checkpoint when NN has a bad storage dir. This should // succeed and create the checkpoint in the good dir. @@ -1977,7 +1977,7 @@ public class TestCheckpoint { new File(sd1.getCurrentDir(), NNStorage.getImageFileName(2))); // Restore the good dir - currentDir.setExecutable(true); + FileUtil.setExecutable(currentDir, true); nn.restoreFailedStorage("true"); nn.rollEditLog(); @@ -1988,7 +1988,7 @@ public class TestCheckpoint { assertParallelFilesInvariant(cluster, ImmutableList.of(secondary)); } finally { if (currentDir != null) { - currentDir.setExecutable(true); + FileUtil.setExecutable(currentDir, true); } cleanup(secondary); secondary = null; @@ -2038,7 +2038,7 @@ public class TestCheckpoint { StorageDirectory sd0 = storage.getStorageDir(0); assertEquals(NameNodeDirType.IMAGE, sd0.getStorageDirType()); currentDir = sd0.getCurrentDir(); - currentDir.setExecutable(false); + FileUtil.setExecutable(currentDir, false); // Try to upload checkpoint -- this should fail since there are no // valid storage dirs @@ -2051,7 +2051,7 @@ public class TestCheckpoint { } // Restore the good dir - currentDir.setExecutable(true); + FileUtil.setExecutable(currentDir, true); nn.restoreFailedStorage("true"); nn.rollEditLog(); @@ -2062,7 +2062,7 @@ public class TestCheckpoint { assertParallelFilesInvariant(cluster, ImmutableList.of(secondary)); } finally { if (currentDir != null) { - currentDir.setExecutable(true); + FileUtil.setExecutable(currentDir, true); } cleanup(secondary); secondary = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index 80f449e1f2b..0cabf0818dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -881,14 +881,14 @@ public class TestEditLog { logDir.mkdirs(); FSEditLog log = FSImageTestUtil.createStandaloneEditLog(logDir); try { - logDir.setWritable(false); + FileUtil.setWritable(logDir, false); log.openForWrite(); fail("Did no throw exception on only having a bad dir"); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains( "too few journals successfully started", ioe); } finally { - logDir.setWritable(true); + FileUtil.setWritable(logDir, true); log.close(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java index dcc6b2da4a0..d27b91d2247 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java @@ -28,6 +28,7 @@ import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -106,10 +107,10 @@ public class TestNNStorageRetentionFunctional { getInProgressEditsFileName(5)); LOG.info("Failing first storage dir by chmodding it"); - sd0.setExecutable(false); + FileUtil.setExecutable(sd0, false); doSaveNamespace(nn); LOG.info("Restoring accessibility of first storage dir"); - sd0.setExecutable(true); + FileUtil.setExecutable(sd0, true); LOG.info("nothing should have been purged in first storage dir"); assertGlobEquals(cd0, "fsimage_\\d*", @@ -138,7 +139,7 @@ public class TestNNStorageRetentionFunctional { assertGlobEquals(cd0, "edits_.*", getInProgressEditsFileName(9)); } finally { - sd0.setExecutable(true); + FileUtil.setExecutable(sd0, true); LOG.info("Shutting down..."); if (cluster != null) { diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index de98617992d..e58e06edba9 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -168,6 +168,9 @@ Release 2.0.5-beta - UNRELEASED HADOOP-9372. Fix bad timeout annotations on tests. (Arpit Agarwal via suresh) + MAPREDUCE-5177. Use common utils FileUtil#setReadable/Writable/Executable & + FileUtil#canRead/Write/Execute. (Ivan Mitic via suresh) + Release 2.0.5-beta - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java index 8d4d2592373..733ead48e0a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java @@ -113,7 +113,7 @@ class Application