diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 63f43189c8c..e6fe6e5c47e 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -169,6 +169,10 @@ Release 0.23.2 - UNRELEASED HADOOP-8071. Avoid an extra packet in client code when nagling is disabled. (todd) + HADOOP-6502. Improve the performance of Configuration.getClassByName when + the class is not found by caching negative results. + (sharad, todd via todd) + BUG FIXES HADOOP-8042 When copying a file out of HDFS, modifying it, and uploading diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java index 84a31871195..d71aaf58106 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java @@ -1146,6 +1146,22 @@ public void setStrings(String name, String... values) { * @throws ClassNotFoundException if the class is not found. */ public Class getClassByName(String name) throws ClassNotFoundException { + Class ret = getClassByNameOrNull(name); + if (ret == null) { + throw new ClassNotFoundException("Class " + name + " not found"); + } + return ret; + } + + /** + * Load a class by name, returning null rather than throwing an exception + * if it couldn't be loaded. This is to avoid the overhead of creating + * an exception. + * + * @param name the class name + * @return the class object, or null if it could not be found. + */ + public Class getClassByNameOrNull(String name) { Map> map; synchronized (CACHE_CLASSES) { @@ -1157,12 +1173,20 @@ public Class getClassByName(String name) throws ClassNotFoundException { } } - Class clazz = map.get(name); - if (clazz == null) { - clazz = Class.forName(name, true, classLoader); - if (clazz != null) { - // two putters can race here, but they'll put the same class - map.put(name, clazz); + Class clazz = null; + if (!map.containsKey(name)) { + try { + clazz = Class.forName(name, true, classLoader); + } catch (ClassNotFoundException e) { + map.put(name, null); //cache negative that class is not found + return null; + } + // two putters can race here, but they'll put the same class + map.put(name, clazz); + } else { // check already performed on this class name + clazz = map.get(name); + if (clazz == null) { // found the negative + return null; } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java index 0387c7e8b8e..fc5289779e6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java @@ -86,17 +86,22 @@ private static void setJobConf(Object theObject, Configuration conf) { //invoke configure on theObject try { Class jobConfClass = - conf.getClassByName("org.apache.hadoop.mapred.JobConf"); + conf.getClassByNameOrNull("org.apache.hadoop.mapred.JobConf"); + if (jobConfClass == null) { + return; + } + Class jobConfigurableClass = - conf.getClassByName("org.apache.hadoop.mapred.JobConfigurable"); - if (jobConfClass.isAssignableFrom(conf.getClass()) && + conf.getClassByNameOrNull("org.apache.hadoop.mapred.JobConfigurable"); + if (jobConfigurableClass == null) { + return; + } + if (jobConfClass.isAssignableFrom(conf.getClass()) && jobConfigurableClass.isAssignableFrom(theObject.getClass())) { Method configureMethod = jobConfigurableClass.getMethod("configure", jobConfClass); configureMethod.invoke(theObject, conf); } - } catch (ClassNotFoundException e) { - //JobConf/JobConfigurable not in classpath. no need to configure } catch (Exception e) { throw new RuntimeException("Error in configuring object", e); } diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index 8e08568b5a5..9390d1093ff 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -216,6 +216,13 @@ determine the host, port, etc. for a filesystem. + + fs.default.name + file:/// + Deprecated. Use (fs.defaultFS) property + instead + + fs.trash.interval 0 diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index f936c4612c6..0ab243cc285 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -210,6 +210,9 @@ Trunk (unreleased changes) dfs.client.block.write.replace-datanode-on-failure.enable to be mistakenly disabled. (atm) + HDFS-2525. Race between BlockPoolSliceScanner and append. (Brandon Li + via jitendra) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES @@ -245,6 +248,9 @@ Release 0.23.2 - UNRELEASED HDFS-2815. Namenode sometimes oes not come out of safemode during NN crash + restart. (Uma Maheswara Rao via suresh) + HDFS-2950. Secondary NN HTTPS address should be listed as a + NAMESERVICE_SPECIFIC_KEY. (todd) + Release 0.23.1 - 2012-02-08 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index b655b6ebd45..35590eaff28 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -99,6 +99,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final int DFS_NAMENODE_SAFEMODE_MIN_DATANODES_DEFAULT = 0; public static final String DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY = "dfs.namenode.secondary.http-address"; public static final String DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50090"; + public static final String DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY = "dfs.namenode.secondary.https-port"; + public static final int DFS_NAMENODE_SECONDARY_HTTPS_PORT_DEFAULT = 50490; public static final String DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY = "dfs.namenode.checkpoint.check.period"; public static final long DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_DEFAULT = 60; public static final String DFS_NAMENODE_CHECKPOINT_PERIOD_KEY = "dfs.namenode.checkpoint.period"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java index 294e5852cbd..44533f13a12 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java @@ -81,6 +81,7 @@ private static void addDeprecatedKeys() { deprecate("dfs.safemode.extension", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY); deprecate("dfs.safemode.threshold.pct", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY); deprecate("dfs.secondary.http.address", DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY); + deprecate("dfs.secondary.https.port", DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY); deprecate("dfs.socket.timeout", DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY); deprecate("fs.checkpoint.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY); deprecate("fs.checkpoint.edits.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java index e3709463b41..54c1b6f3952 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java @@ -51,11 +51,8 @@ import org.apache.hadoop.io.IOUtils; /** - * Performs two types of scanning: - *
  • Gets block files from the data directories and reconciles the - * difference between the blocks on the disk and in memory.
  • - *
  • Scans the data directories for block files under a block pool - * and verifies that the files are not corrupt
  • + * Scans the block files under a block pool and verifies that the + * files are not corrupt. * This keeps track of blocks and their last verification times. * Currently it does not modify the metadata for block. */ @@ -430,6 +427,19 @@ private void verifyBlock(ExtendedBlock block) { return; } + // If the block exists, the exception may due to a race with write: + // The BlockSender got an old block path in rbw. BlockReceiver removed + // the rbw block from rbw to finalized but BlockSender tried to open the + // file before BlockReceiver updated the VolumeMap. The state of the + // block can be changed again now, so ignore this error here. If there + // is a block really deleted by mistake, DirectoryScan should catch it. + if (e instanceof FileNotFoundException ) { + LOG.info("Verification failed for " + block + + ". It may be due to race with write."); + deleteBlock(block.getLocalBlock()); + return; + } + LOG.warn((second ? "Second " : "First ") + "Verification failed for " + block, e); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index f1bfa74888f..fec017664b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -146,6 +146,7 @@ public static enum OperationCategory { DFS_NAMENODE_HTTPS_ADDRESS_KEY, DFS_NAMENODE_KEYTAB_FILE_KEY, DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, + DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY, DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY, DFS_NAMENODE_BACKUP_ADDRESS_KEY, DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index 61b533d3442..3846e806642 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -254,7 +254,8 @@ public HttpServer run() throws IOException, InterruptedException { Krb5AndCertsSslSocketConnector.KRB5_CIPHER_SUITES.get(0)); InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(infoBindAddress + ":"+ conf.getInt( - "dfs.secondary.https.port", 443)); + DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY, + DFS_NAMENODE_SECONDARY_HTTPS_PORT_DEFAULT)); imagePort = secInfoSocAddr.getPort(); infoServer.addSslListener(secInfoSocAddr, conf, false, true); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendDifferentChecksum.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendDifferentChecksum.java index f296419bde5..9fbb7605d44 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendDifferentChecksum.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendDifferentChecksum.java @@ -47,12 +47,6 @@ public class TestAppendDifferentChecksum { public static void setupCluster() throws IOException { Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096); - - // disable block scanner, since otherwise this test can trigger - // HDFS-2525, which is a different bug than we're trying to unit test - // here! When HDFS-2525 is fixed, this can be removed. - conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); - conf.set("fs.hdfs.impl.disable.cache", "true"); cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(1) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index e0504b90481..22324a79f2c 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -94,6 +94,9 @@ Release 0.23.2 - UNRELEASED IMPROVEMENTS + MAPREDUCE-3854. Fixed and reenabled tests related to MR child JVM's + environmental variables in TestMiniMRChildTask. (Tom White via vinodkv) + OPTIMIZATIONS BUG FIXES @@ -103,7 +106,10 @@ Release 0.23.2 - UNRELEASED MAPREDUCE-3852. Test TestLinuxResourceCalculatorPlugin failing. (Thomas Graves via mahadev) - + + MAPREDUCE-3736. Variable substitution depth too large for fs.default.name + causes jobs to fail (ahmed via tucu). + Release 0.23.1 - 2012-02-08 INCOMPATIBLE CHANGES @@ -812,6 +818,9 @@ Release 0.23.1 - 2012-02-08 MAPREDUCE-3802. Added test to validate that AM can crash multiple times and still can recover successfully after MAPREDUCE-3846. (vinodkv) + MAPREDUCE-3858. Task attempt failure during commit results in task never completing. + (Tom White via mahadev) + Release 0.23.0 - 2011-11-01 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java index e472e99cd21..0d7a2d8caee 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java @@ -832,6 +832,9 @@ private static class AttemptFailedTransition implements public TaskState transition(TaskImpl task, TaskEvent event) { task.failedAttempts++; TaskTAttemptEvent castEvent = (TaskTAttemptEvent) event; + if (castEvent.getTaskAttemptID().equals(task.commitAttempt)) { + task.commitAttempt = null; + } TaskAttempt attempt = task.attempts.get(castEvent.getTaskAttemptID()); if (attempt.getAssignedContainerMgrAddress() != null) { //container was assigned @@ -877,6 +880,7 @@ protected TaskState getDefaultState(Task task) { protected void unSucceed(TaskImpl task) { ++task.numberUncompletedAttempts; + task.commitAttempt = null; task.successfulAttempt = null; } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java index dcc9b07cc38..0033528347e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java @@ -18,6 +18,8 @@ package org.apache.hadoop.mapreduce.v2.app.job.impl; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -261,6 +263,12 @@ private void launchTaskAttempt(TaskAttemptId attemptId) { assertTaskRunningState(); } + private void commitTaskAttempt(TaskAttemptId attemptId) { + mockTask.handle(new TaskTAttemptEvent(attemptId, + TaskEventType.T_ATTEMPT_COMMIT_PENDING)); + assertTaskRunningState(); + } + private MockTaskAttemptImpl getLastAttempt() { return taskAttempts.get(taskAttempts.size()-1); } @@ -279,32 +287,45 @@ private void killRunningTaskAttempt(TaskAttemptId attemptId) { assertTaskRunningState(); } + private void failRunningTaskAttempt(TaskAttemptId attemptId) { + mockTask.handle(new TaskTAttemptEvent(attemptId, + TaskEventType.T_ATTEMPT_FAILED)); + assertTaskRunningState(); + } + /** * {@link TaskState#NEW} */ private void assertTaskNewState() { - assertEquals(mockTask.getState(), TaskState.NEW); + assertEquals(TaskState.NEW, mockTask.getState()); } /** * {@link TaskState#SCHEDULED} */ private void assertTaskScheduledState() { - assertEquals(mockTask.getState(), TaskState.SCHEDULED); + assertEquals(TaskState.SCHEDULED, mockTask.getState()); } /** * {@link TaskState#RUNNING} */ private void assertTaskRunningState() { - assertEquals(mockTask.getState(), TaskState.RUNNING); + assertEquals(TaskState.RUNNING, mockTask.getState()); } /** * {@link TaskState#KILL_WAIT} */ private void assertTaskKillWaitState() { - assertEquals(mockTask.getState(), TaskState.KILL_WAIT); + assertEquals(TaskState.KILL_WAIT, mockTask.getState()); + } + + /** + * {@link TaskState#SUCCEEDED} + */ + private void assertTaskSucceededState() { + assertEquals(TaskState.SUCCEEDED, mockTask.getState()); } @Test @@ -409,5 +430,32 @@ public void testTaskProgress() { assert(mockTask.getProgress() == progress); } + + @Test + public void testFailureDuringTaskAttemptCommit() { + TaskId taskId = getNewTaskID(); + scheduleTaskAttempt(taskId); + launchTaskAttempt(getLastAttempt().getAttemptId()); + updateLastAttemptState(TaskAttemptState.COMMIT_PENDING); + commitTaskAttempt(getLastAttempt().getAttemptId()); + + // During the task attempt commit there is an exception which causes + // the attempt to fail + updateLastAttemptState(TaskAttemptState.FAILED); + failRunningTaskAttempt(getLastAttempt().getAttemptId()); + + assertEquals(2, taskAttempts.size()); + updateLastAttemptState(TaskAttemptState.SUCCEEDED); + commitTaskAttempt(getLastAttempt().getAttemptId()); + mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(), + TaskEventType.T_ATTEMPT_SUCCEEDED)); + + assertFalse("First attempt should not commit", + mockTask.canCommit(taskAttempts.get(0).getAttemptId())); + assertTrue("Second attempt should commit", + mockTask.canCommit(getLastAttempt().getAttemptId())); + + assertTaskSucceededState(); + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestMRWithDistributedCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestMRWithDistributedCache.java index ed89bf9fd4f..6798831ba73 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestMRWithDistributedCache.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestMRWithDistributedCache.java @@ -159,7 +159,7 @@ private void testWithConf(Configuration conf) throws IOException, public void testLocalJobRunner() throws Exception { Configuration c = new Configuration(); c.set(JTConfig.JT_IPC_ADDRESS, "local"); - c.set("fs.default.name", "file:///"); + c.set("fs.defaultFS", "file:///"); testWithConf(c); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/conf/TestNoDefaultsJobConf.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/conf/TestNoDefaultsJobConf.java index d91754d71d7..81c474e9cb6 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/conf/TestNoDefaultsJobConf.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/conf/TestNoDefaultsJobConf.java @@ -59,7 +59,7 @@ public void testNoDefaults() throws Exception { JobConf conf = new JobConf(false); - conf.set("fs.default.name", createJobConf().get("fs.default.name")); + conf.set("fs.defaultFS", createJobConf().get("fs.defaultFS")); conf.setJobName("mr"); @@ -100,4 +100,4 @@ public void testNoDefaults() throws Exception { } -} \ No newline at end of file +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java index 540f15bffa2..347dd066a98 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java @@ -1024,7 +1024,7 @@ public static void main(String[] args) { if(testFile != null) { LOG.info("Start JHLA test ============ "); LocalFileSystem lfs = FileSystem.getLocal(conf); - conf.set("fs.default.name", "file:///"); + conf.set("fs.defaultFS", "file:///"); JHLAMapper map = new JHLAMapper(conf); map.parseLogFile(lfs, new Path(testFile), 0L, new LoggingCollector(), Reporter.NULL); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/io/FileBench.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/io/FileBench.java index fb2a4168959..f155daef141 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/io/FileBench.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/io/FileBench.java @@ -53,7 +53,7 @@ static int printUsage() { "unless they are also explicitly included, as in \"-pln -zip\"\n" + "Note that CompressionType params only apply to SequenceFiles\n\n" + "Useful options to set:\n" + -"-D fs.default.name=\"file:///\" \\\n" + +"-D fs.defaultFS=\"file:///\" \\\n" + "-D fs.file.impl=org.apache.hadoop.fs.RawLocalFileSystem \\\n" + "-D filebench.file.bytes=$((10*1024*1024*1024)) \\\n" + "-D filebench.key.words=5 \\\n" + diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCombineFileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCombineFileInputFormat.java index e07577e1c41..218ef85a3ab 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCombineFileInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCombineFileInputFormat.java @@ -41,7 +41,7 @@ public class TestCombineFileInputFormat { private static FileSystem localFs = null; static { try { - defaultConf.set("fs.default.name", "file:///"); + defaultConf.set("fs.defaultFS", "file:///"); localFs = FileSystem.getLocal(defaultConf); } catch (IOException e) { throw new RuntimeException("init failure", e); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestConcatenatedCompressedInput.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestConcatenatedCompressedInput.java index 1192ee70ce5..df409c10be8 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestConcatenatedCompressedInput.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestConcatenatedCompressedInput.java @@ -69,7 +69,7 @@ public class TestConcatenatedCompressedInput { static { try { - defaultConf.set("fs.default.name", "file:///"); + defaultConf.set("fs.defaultFS", "file:///"); localFs = FileSystem.getLocal(defaultConf); } catch (IOException e) { throw new RuntimeException("init failure", e); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRChildTask.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRChildTask.java index 49c899cf1d5..2b5ffd4f0db 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRChildTask.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRChildTask.java @@ -219,8 +219,8 @@ public void configure(JobConf job) { // check if X=$X:/abc works for LD_LIBRARY_PATH checkEnv("LD_LIBRARY_PATH", "/tmp", "append"); - // check if X=/tmp works for an already existing parameter - checkEnv("HOME", "/tmp", "noappend"); + // check if X=y works for an already existing parameter + checkEnv("LANG", "en_us_8859_1", "noappend"); // check if X=/tmp for a new env variable checkEnv("MY_PATH", "/tmp", "noappend"); // check if X=$X:/tmp works for a new env var and results into :/tmp @@ -269,8 +269,8 @@ public void configure(JobConf job) { // check if X=$X:/abc works for LD_LIBRARY_PATH checkEnv("LD_LIBRARY_PATH", "/tmp", "append"); - // check if X=/tmp works for an already existing parameter - checkEnv("HOME", "/tmp", "noappend"); + // check if X=y works for an already existing parameter + checkEnv("LANG", "en_us_8859_1", "noappend"); // check if X=/tmp for a new env variable checkEnv("MY_PATH", "/tmp", "noappend"); // check if X=$X:/tmp works for a new env var and results into :/tmp @@ -369,7 +369,7 @@ public void testTaskTempDir(){ * - x=y (x can be a already existing env variable or a new variable) * - x=$x:y (replace $x with the current value of x) */ - + @Test public void testTaskEnv(){ try { JobConf conf = new JobConf(mr.getConfig()); @@ -392,6 +392,7 @@ public void testTaskEnv(){ * - x=y (x can be a already existing env variable or a new variable) * - x=$x:y (replace $x with the current value of x) */ + @Test public void testTaskOldEnv(){ try { JobConf conf = new JobConf(mr.getConfig()); @@ -415,7 +416,7 @@ void runTestTaskEnv(JobConf conf, Path inDir, Path outDir, boolean oldConfigs) EnvCheckMapper.class, EnvCheckReducer.class); // test // - new SET of new var (MY_PATH) - // - set of old var (HOME) + // - set of old var (LANG) // - append to an old var from modified env (LD_LIBRARY_PATH) // - append to an old var from tt's env (PATH) // - append to a new var (NEW_PATH) @@ -432,10 +433,10 @@ void runTestTaskEnv(JobConf conf, Path inDir, Path outDir, boolean oldConfigs) mapTaskJavaOpts = reduceTaskJavaOpts = TASK_OPTS_VAL; } conf.set(mapTaskEnvKey, - "MY_PATH=/tmp,HOME=/tmp,LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp," + + "MY_PATH=/tmp,LANG=en_us_8859_1,LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp," + "PATH=$PATH:/tmp,NEW_PATH=$NEW_PATH:/tmp"); conf.set(reduceTaskEnvKey, - "MY_PATH=/tmp,HOME=/tmp,LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp," + + "MY_PATH=/tmp,LANG=en_us_8859_1,LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp," + "PATH=$PATH:/tmp,NEW_PATH=$NEW_PATH:/tmp"); conf.set("path", System.getenv("PATH")); conf.set(mapTaskJavaOptsKey, mapTaskJavaOpts); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestTextInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestTextInputFormat.java index faadf795942..4066a8e55f8 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestTextInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestTextInputFormat.java @@ -55,7 +55,7 @@ public class TestTextInputFormat { private static FileSystem localFs = null; static { try { - defaultConf.set("fs.default.name", "file:///"); + defaultConf.set("fs.defaultFS", "file:///"); localFs = FileSystem.getLocal(defaultConf); } catch (IOException e) { throw new RuntimeException("init failure", e); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapCollection.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapCollection.java index 813bf9f1cc8..ecc01dbd02f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapCollection.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapCollection.java @@ -314,7 +314,7 @@ private static void runTest(String name, Job job) throws Exception { job.setNumReduceTasks(1); job.getConfiguration().set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME); job.getConfiguration().setInt(MRJobConfig.IO_SORT_FACTOR, 1000); - job.getConfiguration().set("fs.default.name", "file:///"); + job.getConfiguration().set("fs.defaultFS", "file:///"); job.getConfiguration().setInt("test.mapcollection.num.maps", 1); job.setInputFormatClass(FakeIF.class); job.setOutputFormatClass(NullOutputFormat.class); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java index 824e6842cff..28359585a2d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java @@ -45,9 +45,9 @@ public class TestFileInputFormat { @Test public void testAddInputPath() throws IOException { final Configuration conf = new Configuration(); - conf.set("fs.default.name", "s3://abc:xyz@hostname/"); + conf.set("fs.defaultFS", "s3://abc:xyz@hostname/"); final Job j = Job.getInstance(conf); - j.getConfiguration().set("fs.default.name", "s3://abc:xyz@hostname/"); + j.getConfiguration().set("fs.defaultFS", "s3://abc:xyz@hostname/"); //setup default fs final FileSystem defaultfs = FileSystem.get(conf); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestMRKeyValueTextInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestMRKeyValueTextInputFormat.java index ea3b83807ce..54ad498dd48 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestMRKeyValueTextInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestMRKeyValueTextInputFormat.java @@ -57,7 +57,7 @@ public class TestMRKeyValueTextInputFormat { private static FileSystem localFs = null; static { try { - defaultConf.set("fs.default.name", "file:///"); + defaultConf.set("fs.defaultFS", "file:///"); localFs = FileSystem.getLocal(defaultConf); } catch (IOException e) { throw new RuntimeException("init failure", e); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index cea45798a71..217d6e1c775 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -457,7 +457,7 @@ mapreduce.job.hdfs-servers - ${fs.default.name} + ${fs.defaultFS}