Merge trunk into HA branch.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1244645 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Aaron Myers 2012-02-15 19:04:17 +00:00
commit 1fb0ab92f8
26 changed files with 166 additions and 49 deletions

View File

@ -169,6 +169,10 @@ Release 0.23.2 - UNRELEASED
HADOOP-8071. Avoid an extra packet in client code when nagling is
disabled. (todd)
HADOOP-6502. Improve the performance of Configuration.getClassByName when
the class is not found by caching negative results.
(sharad, todd via todd)
BUG FIXES
HADOOP-8042 When copying a file out of HDFS, modifying it, and uploading

View File

@ -1146,6 +1146,22 @@ public void setStrings(String name, String... values) {
* @throws ClassNotFoundException if the class is not found.
*/
public Class<?> getClassByName(String name) throws ClassNotFoundException {
Class<?> ret = getClassByNameOrNull(name);
if (ret == null) {
throw new ClassNotFoundException("Class " + name + " not found");
}
return ret;
}
/**
* Load a class by name, returning null rather than throwing an exception
* if it couldn't be loaded. This is to avoid the overhead of creating
* an exception.
*
* @param name the class name
* @return the class object, or null if it could not be found.
*/
public Class<?> getClassByNameOrNull(String name) {
Map<String, Class<?>> map;
synchronized (CACHE_CLASSES) {
@ -1157,12 +1173,20 @@ public Class<?> getClassByName(String name) throws ClassNotFoundException {
}
}
Class<?> clazz = map.get(name);
if (clazz == null) {
clazz = Class.forName(name, true, classLoader);
if (clazz != null) {
// two putters can race here, but they'll put the same class
map.put(name, clazz);
Class<?> clazz = null;
if (!map.containsKey(name)) {
try {
clazz = Class.forName(name, true, classLoader);
} catch (ClassNotFoundException e) {
map.put(name, null); //cache negative that class is not found
return null;
}
// two putters can race here, but they'll put the same class
map.put(name, clazz);
} else { // check already performed on this class name
clazz = map.get(name);
if (clazz == null) { // found the negative
return null;
}
}

View File

@ -86,17 +86,22 @@ private static void setJobConf(Object theObject, Configuration conf) {
//invoke configure on theObject
try {
Class<?> jobConfClass =
conf.getClassByName("org.apache.hadoop.mapred.JobConf");
conf.getClassByNameOrNull("org.apache.hadoop.mapred.JobConf");
if (jobConfClass == null) {
return;
}
Class<?> jobConfigurableClass =
conf.getClassByName("org.apache.hadoop.mapred.JobConfigurable");
if (jobConfClass.isAssignableFrom(conf.getClass()) &&
conf.getClassByNameOrNull("org.apache.hadoop.mapred.JobConfigurable");
if (jobConfigurableClass == null) {
return;
}
if (jobConfClass.isAssignableFrom(conf.getClass()) &&
jobConfigurableClass.isAssignableFrom(theObject.getClass())) {
Method configureMethod =
jobConfigurableClass.getMethod("configure", jobConfClass);
configureMethod.invoke(theObject, conf);
}
} catch (ClassNotFoundException e) {
//JobConf/JobConfigurable not in classpath. no need to configure
} catch (Exception e) {
throw new RuntimeException("Error in configuring object", e);
}

View File

@ -216,6 +216,13 @@
determine the host, port, etc. for a filesystem.</description>
</property>
<property>
<name>fs.default.name</name>
<value>file:///</value>
<description>Deprecated. Use (fs.defaultFS) property
instead</description>
</property>
<property>
<name>fs.trash.interval</name>
<value>0</value>

View File

@ -210,6 +210,9 @@ Trunk (unreleased changes)
dfs.client.block.write.replace-datanode-on-failure.enable to be mistakenly
disabled. (atm)
HDFS-2525. Race between BlockPoolSliceScanner and append. (Brandon Li
via jitendra)
Release 0.23.2 - UNRELEASED
INCOMPATIBLE CHANGES
@ -245,6 +248,9 @@ Release 0.23.2 - UNRELEASED
HDFS-2815. Namenode sometimes oes not come out of safemode during
NN crash + restart. (Uma Maheswara Rao via suresh)
HDFS-2950. Secondary NN HTTPS address should be listed as a
NAMESERVICE_SPECIFIC_KEY. (todd)
Release 0.23.1 - 2012-02-08
INCOMPATIBLE CHANGES

View File

@ -99,6 +99,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final int DFS_NAMENODE_SAFEMODE_MIN_DATANODES_DEFAULT = 0;
public static final String DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY = "dfs.namenode.secondary.http-address";
public static final String DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50090";
public static final String DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY = "dfs.namenode.secondary.https-port";
public static final int DFS_NAMENODE_SECONDARY_HTTPS_PORT_DEFAULT = 50490;
public static final String DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY = "dfs.namenode.checkpoint.check.period";
public static final long DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_DEFAULT = 60;
public static final String DFS_NAMENODE_CHECKPOINT_PERIOD_KEY = "dfs.namenode.checkpoint.period";

View File

@ -81,6 +81,7 @@ private static void addDeprecatedKeys() {
deprecate("dfs.safemode.extension", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY);
deprecate("dfs.safemode.threshold.pct", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY);
deprecate("dfs.secondary.http.address", DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
deprecate("dfs.secondary.https.port", DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY);
deprecate("dfs.socket.timeout", DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY);
deprecate("fs.checkpoint.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
deprecate("fs.checkpoint.edits.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY);

View File

@ -51,11 +51,8 @@
import org.apache.hadoop.io.IOUtils;
/**
* Performs two types of scanning:
* <li> Gets block files from the data directories and reconciles the
* difference between the blocks on the disk and in memory.</li>
* <li> Scans the data directories for block files under a block pool
* and verifies that the files are not corrupt</li>
* Scans the block files under a block pool and verifies that the
* files are not corrupt.
* This keeps track of blocks and their last verification times.
* Currently it does not modify the metadata for block.
*/
@ -430,6 +427,19 @@ private void verifyBlock(ExtendedBlock block) {
return;
}
// If the block exists, the exception may due to a race with write:
// The BlockSender got an old block path in rbw. BlockReceiver removed
// the rbw block from rbw to finalized but BlockSender tried to open the
// file before BlockReceiver updated the VolumeMap. The state of the
// block can be changed again now, so ignore this error here. If there
// is a block really deleted by mistake, DirectoryScan should catch it.
if (e instanceof FileNotFoundException ) {
LOG.info("Verification failed for " + block +
". It may be due to race with write.");
deleteBlock(block.getLocalBlock());
return;
}
LOG.warn((second ? "Second " : "First ") + "Verification failed for "
+ block, e);

View File

@ -146,6 +146,7 @@ public static enum OperationCategory {
DFS_NAMENODE_HTTPS_ADDRESS_KEY,
DFS_NAMENODE_KEYTAB_FILE_KEY,
DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY,
DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
DFS_NAMENODE_BACKUP_ADDRESS_KEY,
DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,

View File

@ -254,7 +254,8 @@ public HttpServer run() throws IOException, InterruptedException {
Krb5AndCertsSslSocketConnector.KRB5_CIPHER_SUITES.get(0));
InetSocketAddress secInfoSocAddr =
NetUtils.createSocketAddr(infoBindAddress + ":"+ conf.getInt(
"dfs.secondary.https.port", 443));
DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY,
DFS_NAMENODE_SECONDARY_HTTPS_PORT_DEFAULT));
imagePort = secInfoSocAddr.getPort();
infoServer.addSslListener(secInfoSocAddr, conf, false, true);
}

View File

@ -47,12 +47,6 @@ public class TestAppendDifferentChecksum {
public static void setupCluster() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
// disable block scanner, since otherwise this test can trigger
// HDFS-2525, which is a different bug than we're trying to unit test
// here! When HDFS-2525 is fixed, this can be removed.
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
conf.set("fs.hdfs.impl.disable.cache", "true");
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(1)

View File

@ -94,6 +94,9 @@ Release 0.23.2 - UNRELEASED
IMPROVEMENTS
MAPREDUCE-3854. Fixed and reenabled tests related to MR child JVM's
environmental variables in TestMiniMRChildTask. (Tom White via vinodkv)
OPTIMIZATIONS
BUG FIXES
@ -103,7 +106,10 @@ Release 0.23.2 - UNRELEASED
MAPREDUCE-3852. Test TestLinuxResourceCalculatorPlugin failing. (Thomas
Graves via mahadev)
MAPREDUCE-3736. Variable substitution depth too large for fs.default.name
causes jobs to fail (ahmed via tucu).
Release 0.23.1 - 2012-02-08
INCOMPATIBLE CHANGES
@ -812,6 +818,9 @@ Release 0.23.1 - 2012-02-08
MAPREDUCE-3802. Added test to validate that AM can crash multiple times and
still can recover successfully after MAPREDUCE-3846. (vinodkv)
MAPREDUCE-3858. Task attempt failure during commit results in task never completing.
(Tom White via mahadev)
Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES

View File

@ -832,6 +832,9 @@ private static class AttemptFailedTransition implements
public TaskState transition(TaskImpl task, TaskEvent event) {
task.failedAttempts++;
TaskTAttemptEvent castEvent = (TaskTAttemptEvent) event;
if (castEvent.getTaskAttemptID().equals(task.commitAttempt)) {
task.commitAttempt = null;
}
TaskAttempt attempt = task.attempts.get(castEvent.getTaskAttemptID());
if (attempt.getAssignedContainerMgrAddress() != null) {
//container was assigned
@ -877,6 +880,7 @@ protected TaskState getDefaultState(Task task) {
protected void unSucceed(TaskImpl task) {
++task.numberUncompletedAttempts;
task.commitAttempt = null;
task.successfulAttempt = null;
}
}

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.mapreduce.v2.app.job.impl;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@ -261,6 +263,12 @@ private void launchTaskAttempt(TaskAttemptId attemptId) {
assertTaskRunningState();
}
private void commitTaskAttempt(TaskAttemptId attemptId) {
mockTask.handle(new TaskTAttemptEvent(attemptId,
TaskEventType.T_ATTEMPT_COMMIT_PENDING));
assertTaskRunningState();
}
private MockTaskAttemptImpl getLastAttempt() {
return taskAttempts.get(taskAttempts.size()-1);
}
@ -279,32 +287,45 @@ private void killRunningTaskAttempt(TaskAttemptId attemptId) {
assertTaskRunningState();
}
private void failRunningTaskAttempt(TaskAttemptId attemptId) {
mockTask.handle(new TaskTAttemptEvent(attemptId,
TaskEventType.T_ATTEMPT_FAILED));
assertTaskRunningState();
}
/**
* {@link TaskState#NEW}
*/
private void assertTaskNewState() {
assertEquals(mockTask.getState(), TaskState.NEW);
assertEquals(TaskState.NEW, mockTask.getState());
}
/**
* {@link TaskState#SCHEDULED}
*/
private void assertTaskScheduledState() {
assertEquals(mockTask.getState(), TaskState.SCHEDULED);
assertEquals(TaskState.SCHEDULED, mockTask.getState());
}
/**
* {@link TaskState#RUNNING}
*/
private void assertTaskRunningState() {
assertEquals(mockTask.getState(), TaskState.RUNNING);
assertEquals(TaskState.RUNNING, mockTask.getState());
}
/**
* {@link TaskState#KILL_WAIT}
*/
private void assertTaskKillWaitState() {
assertEquals(mockTask.getState(), TaskState.KILL_WAIT);
assertEquals(TaskState.KILL_WAIT, mockTask.getState());
}
/**
* {@link TaskState#SUCCEEDED}
*/
private void assertTaskSucceededState() {
assertEquals(TaskState.SUCCEEDED, mockTask.getState());
}
@Test
@ -409,5 +430,32 @@ public void testTaskProgress() {
assert(mockTask.getProgress() == progress);
}
@Test
public void testFailureDuringTaskAttemptCommit() {
TaskId taskId = getNewTaskID();
scheduleTaskAttempt(taskId);
launchTaskAttempt(getLastAttempt().getAttemptId());
updateLastAttemptState(TaskAttemptState.COMMIT_PENDING);
commitTaskAttempt(getLastAttempt().getAttemptId());
// During the task attempt commit there is an exception which causes
// the attempt to fail
updateLastAttemptState(TaskAttemptState.FAILED);
failRunningTaskAttempt(getLastAttempt().getAttemptId());
assertEquals(2, taskAttempts.size());
updateLastAttemptState(TaskAttemptState.SUCCEEDED);
commitTaskAttempt(getLastAttempt().getAttemptId());
mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),
TaskEventType.T_ATTEMPT_SUCCEEDED));
assertFalse("First attempt should not commit",
mockTask.canCommit(taskAttempts.get(0).getAttemptId()));
assertTrue("Second attempt should commit",
mockTask.canCommit(getLastAttempt().getAttemptId()));
assertTaskSucceededState();
}
}

View File

@ -159,7 +159,7 @@ private void testWithConf(Configuration conf) throws IOException,
public void testLocalJobRunner() throws Exception {
Configuration c = new Configuration();
c.set(JTConfig.JT_IPC_ADDRESS, "local");
c.set("fs.default.name", "file:///");
c.set("fs.defaultFS", "file:///");
testWithConf(c);
}

View File

@ -59,7 +59,7 @@ public void testNoDefaults() throws Exception {
JobConf conf = new JobConf(false);
conf.set("fs.default.name", createJobConf().get("fs.default.name"));
conf.set("fs.defaultFS", createJobConf().get("fs.defaultFS"));
conf.setJobName("mr");
@ -100,4 +100,4 @@ public void testNoDefaults() throws Exception {
}
}
}

View File

@ -1024,7 +1024,7 @@ public static void main(String[] args) {
if(testFile != null) {
LOG.info("Start JHLA test ============ ");
LocalFileSystem lfs = FileSystem.getLocal(conf);
conf.set("fs.default.name", "file:///");
conf.set("fs.defaultFS", "file:///");
JHLAMapper map = new JHLAMapper(conf);
map.parseLogFile(lfs, new Path(testFile), 0L,
new LoggingCollector(), Reporter.NULL);

View File

@ -53,7 +53,7 @@ static int printUsage() {
"unless they are also explicitly included, as in \"-pln -zip\"\n" +
"Note that CompressionType params only apply to SequenceFiles\n\n" +
"Useful options to set:\n" +
"-D fs.default.name=\"file:///\" \\\n" +
"-D fs.defaultFS=\"file:///\" \\\n" +
"-D fs.file.impl=org.apache.hadoop.fs.RawLocalFileSystem \\\n" +
"-D filebench.file.bytes=$((10*1024*1024*1024)) \\\n" +
"-D filebench.key.words=5 \\\n" +

View File

@ -41,7 +41,7 @@ public class TestCombineFileInputFormat {
private static FileSystem localFs = null;
static {
try {
defaultConf.set("fs.default.name", "file:///");
defaultConf.set("fs.defaultFS", "file:///");
localFs = FileSystem.getLocal(defaultConf);
} catch (IOException e) {
throw new RuntimeException("init failure", e);

View File

@ -69,7 +69,7 @@ public class TestConcatenatedCompressedInput {
static {
try {
defaultConf.set("fs.default.name", "file:///");
defaultConf.set("fs.defaultFS", "file:///");
localFs = FileSystem.getLocal(defaultConf);
} catch (IOException e) {
throw new RuntimeException("init failure", e);

View File

@ -219,8 +219,8 @@ public void configure(JobConf job) {
// check if X=$X:/abc works for LD_LIBRARY_PATH
checkEnv("LD_LIBRARY_PATH", "/tmp", "append");
// check if X=/tmp works for an already existing parameter
checkEnv("HOME", "/tmp", "noappend");
// check if X=y works for an already existing parameter
checkEnv("LANG", "en_us_8859_1", "noappend");
// check if X=/tmp for a new env variable
checkEnv("MY_PATH", "/tmp", "noappend");
// check if X=$X:/tmp works for a new env var and results into :/tmp
@ -269,8 +269,8 @@ public void configure(JobConf job) {
// check if X=$X:/abc works for LD_LIBRARY_PATH
checkEnv("LD_LIBRARY_PATH", "/tmp", "append");
// check if X=/tmp works for an already existing parameter
checkEnv("HOME", "/tmp", "noappend");
// check if X=y works for an already existing parameter
checkEnv("LANG", "en_us_8859_1", "noappend");
// check if X=/tmp for a new env variable
checkEnv("MY_PATH", "/tmp", "noappend");
// check if X=$X:/tmp works for a new env var and results into :/tmp
@ -369,7 +369,7 @@ public void testTaskTempDir(){
* - x=y (x can be a already existing env variable or a new variable)
* - x=$x:y (replace $x with the current value of x)
*/
@Test
public void testTaskEnv(){
try {
JobConf conf = new JobConf(mr.getConfig());
@ -392,6 +392,7 @@ public void testTaskEnv(){
* - x=y (x can be a already existing env variable or a new variable)
* - x=$x:y (replace $x with the current value of x)
*/
@Test
public void testTaskOldEnv(){
try {
JobConf conf = new JobConf(mr.getConfig());
@ -415,7 +416,7 @@ void runTestTaskEnv(JobConf conf, Path inDir, Path outDir, boolean oldConfigs)
EnvCheckMapper.class, EnvCheckReducer.class);
// test
// - new SET of new var (MY_PATH)
// - set of old var (HOME)
// - set of old var (LANG)
// - append to an old var from modified env (LD_LIBRARY_PATH)
// - append to an old var from tt's env (PATH)
// - append to a new var (NEW_PATH)
@ -432,10 +433,10 @@ void runTestTaskEnv(JobConf conf, Path inDir, Path outDir, boolean oldConfigs)
mapTaskJavaOpts = reduceTaskJavaOpts = TASK_OPTS_VAL;
}
conf.set(mapTaskEnvKey,
"MY_PATH=/tmp,HOME=/tmp,LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp," +
"MY_PATH=/tmp,LANG=en_us_8859_1,LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp," +
"PATH=$PATH:/tmp,NEW_PATH=$NEW_PATH:/tmp");
conf.set(reduceTaskEnvKey,
"MY_PATH=/tmp,HOME=/tmp,LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp," +
"MY_PATH=/tmp,LANG=en_us_8859_1,LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp," +
"PATH=$PATH:/tmp,NEW_PATH=$NEW_PATH:/tmp");
conf.set("path", System.getenv("PATH"));
conf.set(mapTaskJavaOptsKey, mapTaskJavaOpts);

View File

@ -55,7 +55,7 @@ public class TestTextInputFormat {
private static FileSystem localFs = null;
static {
try {
defaultConf.set("fs.default.name", "file:///");
defaultConf.set("fs.defaultFS", "file:///");
localFs = FileSystem.getLocal(defaultConf);
} catch (IOException e) {
throw new RuntimeException("init failure", e);

View File

@ -314,7 +314,7 @@ private static void runTest(String name, Job job) throws Exception {
job.setNumReduceTasks(1);
job.getConfiguration().set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
job.getConfiguration().setInt(MRJobConfig.IO_SORT_FACTOR, 1000);
job.getConfiguration().set("fs.default.name", "file:///");
job.getConfiguration().set("fs.defaultFS", "file:///");
job.getConfiguration().setInt("test.mapcollection.num.maps", 1);
job.setInputFormatClass(FakeIF.class);
job.setOutputFormatClass(NullOutputFormat.class);

View File

@ -45,9 +45,9 @@ public class TestFileInputFormat {
@Test
public void testAddInputPath() throws IOException {
final Configuration conf = new Configuration();
conf.set("fs.default.name", "s3://abc:xyz@hostname/");
conf.set("fs.defaultFS", "s3://abc:xyz@hostname/");
final Job j = Job.getInstance(conf);
j.getConfiguration().set("fs.default.name", "s3://abc:xyz@hostname/");
j.getConfiguration().set("fs.defaultFS", "s3://abc:xyz@hostname/");
//setup default fs
final FileSystem defaultfs = FileSystem.get(conf);

View File

@ -57,7 +57,7 @@ public class TestMRKeyValueTextInputFormat {
private static FileSystem localFs = null;
static {
try {
defaultConf.set("fs.default.name", "file:///");
defaultConf.set("fs.defaultFS", "file:///");
localFs = FileSystem.getLocal(defaultConf);
} catch (IOException e) {
throw new RuntimeException("init failure", e);

View File

@ -457,7 +457,7 @@
<property>
<name>mapreduce.job.hdfs-servers</name>
<value>${fs.default.name}</value>
<value>${fs.defaultFS}</value>
</property>
<!-- WebAppProxy Configuration-->