From bc2340e8b5994cbac5cdc788d8616c0ed3da7a81 Mon Sep 17 00:00:00 2001 From: Thomas Graves Date: Wed, 11 Apr 2012 15:24:19 +0000 Subject: [PATCH 01/57] HADOOP-8147. test-patch should run tests with -fn to avoid masking test failures (Robert Evans via tgraves) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1324816 13f79535-47bb-0310-9956-ffa450edef68 --- dev-support/test-patch.sh | 21 +++++++------------ .../hadoop-common/CHANGES.txt | 3 +++ 2 files changed, 11 insertions(+), 13 deletions(-) diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 673f8ddd48d..acec313de3c 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -627,22 +627,17 @@ runTests () { echo "" echo "" - echo "$MVN clean install -Pnative -D${PROJECT_NAME}PatchProcess" - $MVN clean install -Pnative -D${PROJECT_NAME}PatchProcess - if [[ $? != 0 ]] ; then - ### Find and format names of failed tests - failed_tests=`find . -name 'TEST*.xml' | xargs $GREP -l -E " createJobHistoryHandler( return this.jobHistoryEventHandler; } + protected AbstractService createStagingDirCleaningService() { + return new StagingDirCleaningService(); + } + protected Speculator createSpeculator(Configuration conf, AppContext context) { Class speculatorClass; @@ -710,6 +712,22 @@ public synchronized void stop() { } } + private final class StagingDirCleaningService extends AbstractService { + StagingDirCleaningService() { + super(StagingDirCleaningService.class.getName()); + } + + @Override + public synchronized void stop() { + try { + cleanupStagingDir(); + } catch (IOException io) { + LOG.error("Failed to cleanup staging dir: ", io); + } + super.stop(); + } + } + private class RunningAppContext implements AppContext { private final Map jobs = new ConcurrentHashMap(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java index feb62724da6..5bf26fed0f9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java @@ -428,9 +428,13 @@ protected void attemptLaunched(TaskAttemptId attemptID) { @Override protected ContainerAllocator createContainerAllocator( ClientService clientService, final AppContext context) { - return new ContainerAllocator(){ - private int containerCount; - @Override + return new MRAppContainerAllocator(); + } + + protected class MRAppContainerAllocator implements ContainerAllocator { + private int containerCount; + + @Override public void handle(ContainerAllocatorEvent event) { ContainerId cId = recordFactory.newRecordInstance(ContainerId.class); cId.setApplicationAttemptId(getContext().getApplicationAttemptId()); @@ -452,7 +456,6 @@ public void handle(ContainerAllocatorEvent event) { new TaskAttemptContainerAssignedEvent(event.getAttemptID(), container, null)); } - }; } @Override diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java index 68d07a7ef2e..3ca9c24bad4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java @@ -18,11 +18,10 @@ package org.apache.hadoop.mapreduce.v2.app; +import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.spy; -import java.io.IOException; import java.util.Iterator; import junit.framework.Assert; @@ -36,14 +35,11 @@ import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; -import org.apache.hadoop.mapreduce.v2.app.job.event.JobFinishEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType; import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.yarn.YarnException; import org.junit.Test; /** @@ -237,71 +233,6 @@ public void checkTaskStateTypeConversion() { } } - private final class MRAppTestCleanup extends MRApp { - boolean hasStopped; - boolean cleanedBeforeStopped; - - public MRAppTestCleanup(int maps, int reduces, boolean autoComplete, - String testName, boolean cleanOnStart) { - super(maps, reduces, autoComplete, testName, cleanOnStart); - hasStopped = false; - cleanedBeforeStopped = false; - } - - @Override - protected Job createJob(Configuration conf) { - UserGroupInformation currentUser = null; - try { - currentUser = UserGroupInformation.getCurrentUser(); - } catch (IOException e) { - throw new YarnException(e); - } - Job newJob = new TestJob(getJobId(), getAttemptID(), conf, - getDispatcher().getEventHandler(), - getTaskAttemptListener(), getContext().getClock(), - getCommitter(), isNewApiCommitter(), - currentUser.getUserName(), getContext()); - ((AppContext) getContext()).getAllJobs().put(newJob.getID(), newJob); - - getDispatcher().register(JobFinishEvent.Type.class, - createJobFinishEventHandler()); - - return newJob; - } - - @Override - public void cleanupStagingDir() throws IOException { - cleanedBeforeStopped = !hasStopped; - } - - @Override - public synchronized void stop() { - hasStopped = true; - super.stop(); - } - - @Override - protected void sysexit() { - } - } - - @Test - public void testStagingCleanupOrder() throws Exception { - MRAppTestCleanup app = new MRAppTestCleanup(1, 1, true, - this.getClass().getName(), true); - JobImpl job = (JobImpl)app.submit(new Configuration()); - app.waitForState(job, JobState.SUCCEEDED); - app.verifyCompleted(); - - int waitTime = 20 * 1000; - while (waitTime > 0 && !app.cleanedBeforeStopped) { - Thread.sleep(100); - waitTime -= 100; - } - Assert.assertTrue("Staging directory not cleaned before notifying RM", - app.cleanedBeforeStopped); - } - public static void main(String[] args) throws Exception { TestMRApp t = new TestMRApp(); t.testMapReduce(); @@ -310,6 +241,5 @@ public static void main(String[] args) throws Exception { t.testCompletedMapsForReduceSlowstart(); t.testJobError(); t.testCountersOnJobFinish(); - t.testStagingCleanupOrder(); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java index e0dbac97b6f..063fcfa2cfd 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java @@ -26,6 +26,7 @@ import java.io.IOException; +import junit.framework.Assert; import junit.framework.TestCase; import org.apache.commons.logging.Log; @@ -35,12 +36,21 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.v2.api.records.JobId; +import org.apache.hadoop.mapreduce.v2.api.records.JobState; +import org.apache.hadoop.mapreduce.v2.app.client.ClientService; +import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.job.event.JobFinishEvent; +import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl; +import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator; +import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.service.AbstractService; import org.apache.hadoop.yarn.util.BuilderUtils; import org.junit.Test; @@ -103,4 +113,89 @@ public Configuration getConfig() { } } + private final class MRAppTestCleanup extends MRApp { + boolean stoppedContainerAllocator; + boolean cleanedBeforeContainerAllocatorStopped; + + public MRAppTestCleanup(int maps, int reduces, boolean autoComplete, + String testName, boolean cleanOnStart) { + super(maps, reduces, autoComplete, testName, cleanOnStart); + stoppedContainerAllocator = false; + cleanedBeforeContainerAllocatorStopped = false; + } + + @Override + protected Job createJob(Configuration conf) { + UserGroupInformation currentUser = null; + try { + currentUser = UserGroupInformation.getCurrentUser(); + } catch (IOException e) { + throw new YarnException(e); + } + Job newJob = new TestJob(getJobId(), getAttemptID(), conf, + getDispatcher().getEventHandler(), + getTaskAttemptListener(), getContext().getClock(), + getCommitter(), isNewApiCommitter(), + currentUser.getUserName(), getContext()); + ((AppContext) getContext()).getAllJobs().put(newJob.getID(), newJob); + + getDispatcher().register(JobFinishEvent.Type.class, + createJobFinishEventHandler()); + + return newJob; + } + + @Override + protected ContainerAllocator createContainerAllocator( + ClientService clientService, AppContext context) { + return new TestCleanupContainerAllocator(); + } + + private class TestCleanupContainerAllocator extends AbstractService + implements ContainerAllocator { + private MRAppContainerAllocator allocator; + + TestCleanupContainerAllocator() { + super(TestCleanupContainerAllocator.class.getName()); + allocator = new MRAppContainerAllocator(); + } + + @Override + public void handle(ContainerAllocatorEvent event) { + allocator.handle(event); + } + + @Override + public synchronized void stop() { + stoppedContainerAllocator = true; + super.stop(); + } + } + + @Override + public void cleanupStagingDir() throws IOException { + cleanedBeforeContainerAllocatorStopped = !stoppedContainerAllocator; + } + + @Override + protected void sysexit() { + } + } + + @Test + public void testStagingCleanupOrder() throws Exception { + MRAppTestCleanup app = new MRAppTestCleanup(1, 1, true, + this.getClass().getName(), true); + JobImpl job = (JobImpl)app.submit(new Configuration()); + app.waitForState(job, JobState.SUCCEEDED); + app.verifyCompleted(); + + int waitTime = 20 * 1000; + while (waitTime > 0 && !app.cleanedBeforeContainerAllocatorStopped) { + Thread.sleep(100); + waitTime -= 100; + } + Assert.assertTrue("Staging directory not cleaned before notifying RM", + app.cleanedBeforeContainerAllocatorStopped); + } } \ No newline at end of file From ed678e52ce2c46e092ae4a99afd2f0901d7cf12f Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Wed, 11 Apr 2012 17:49:28 +0000 Subject: [PATCH 03/57] HDFS-3179. Improve the exception message thrown by DataStreamer when it failed to add a datanode. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1324892 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../apache/hadoop/hdfs/DFSOutputStream.java | 12 +++-- .../hdfs/TestReplaceDatanodeOnFailure.java | 53 +++++++++++++++++++ 3 files changed, 64 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index b82704cfe51..f95ecd50a94 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -367,6 +367,9 @@ Release 2.0.0 - UNRELEASED HDFS-3249. Use ToolRunner.confirmPrompt in NameNode (todd) + HDFS-3179. Improve the exception message thrown by DataStreamer when + it failed to add a datanode. (szetszwo) + OPTIMIZATIONS HDFS-3024. Improve performance of stringification in addStoredBlock (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index 9322143a9ec..40de4cf202d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -128,7 +128,7 @@ class DFSOutputStream extends FSOutputSummer implements Syncable { private volatile boolean appendChunk = false; // appending to existing partial block private long initialFileSize = 0; // at time of file open private Progressable progress; - private short blockReplication; // replication factor of file + private final short blockReplication; // replication factor of file private class Packet { long seqno; // sequencenumber of buffer in block @@ -775,9 +775,13 @@ private void setHflush() { private int findNewDatanode(final DatanodeInfo[] original ) throws IOException { if (nodes.length != original.length + 1) { - throw new IOException("Failed to add a datanode:" - + " nodes.length != original.length + 1, nodes=" - + Arrays.asList(nodes) + ", original=" + Arrays.asList(original)); + throw new IOException("Failed to add a datanode. " + + "User may turn off this feature by setting " + + DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY + + " in configuration, where the current policy is " + + dfsClient.dtpReplaceDatanodeOnFailure + + ". (Nodes: current=" + Arrays.asList(nodes) + + ", original=" + Arrays.asList(original) + ")"); } for(int i = 0; i < nodes.length; i++) { int j = 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java index 31b44c447d9..86ca9ab73f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java @@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -234,4 +235,56 @@ void checkReplication() throws IOException { Assert.assertEquals(REPLICATION, dfsout.getNumCurrentReplicas()); } } + + @Test + public void testAppend() throws Exception { + final Configuration conf = new HdfsConfiguration(); + final short REPLICATION = (short)3; + + Assert.assertEquals(ReplaceDatanodeOnFailure.DEFAULT, ReplaceDatanodeOnFailure.get(conf)); + + final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf + ).numDataNodes(1).build(); + + try { + final DistributedFileSystem fs = (DistributedFileSystem)cluster.getFileSystem(); + final Path f = new Path(DIR, "testAppend"); + + { + LOG.info("create an empty file " + f); + fs.create(f, REPLICATION).close(); + final FileStatus status = fs.getFileStatus(f); + Assert.assertEquals(REPLICATION, status.getReplication()); + Assert.assertEquals(0L, status.getLen()); + } + + + final byte[] bytes = new byte[1000]; + { + LOG.info("append " + bytes.length + " bytes to " + f); + final FSDataOutputStream out = fs.append(f); + out.write(bytes); + out.close(); + + final FileStatus status = fs.getFileStatus(f); + Assert.assertEquals(REPLICATION, status.getReplication()); + Assert.assertEquals(bytes.length, status.getLen()); + } + + { + LOG.info("append another " + bytes.length + " bytes to " + f); + try { + final FSDataOutputStream out = fs.append(f); + out.write(bytes); + out.close(); + + Assert.fail(); + } catch(IOException ioe) { + LOG.info("This exception is expected", ioe); + } + } + } finally { + if (cluster != null) {cluster.shutdown();} + } + } } From 7809acd67b0e1c73cf9d07f2da690c2902576db5 Mon Sep 17 00:00:00 2001 From: Siddharth Seth Date: Wed, 11 Apr 2012 18:18:40 +0000 Subject: [PATCH 04/57] MAPREDUCE-3932. Fix the TaskAttempt state machine to handle CONTIANER_LAUNCHED and CONTIANER_LAUNCH_FAILED events in additional states. (Contributed by Robert Joseph Evans) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1324902 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 4 + .../mapreduce/v2/app/job/impl/JobImpl.java | 2 + .../v2/app/job/impl/TaskAttemptImpl.java | 17 +++- .../v2/app/job/impl/TestTaskAttempt.java | 90 ++++++++++++++++--- 4 files changed, 101 insertions(+), 12 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 17caf5a0de5..473ea7dd579 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -313,6 +313,10 @@ Release 0.23.3 - UNRELEASED MAPREDUCE-4099 amendment. ApplicationMaster will remove staging directory after the history service is stopped. (Jason Lowe via sseth) + MAPREDUCE-3932. Fix the TaskAttempt state machine to handle + CONTIANER_LAUNCHED and CONTIANER_LAUNCH_FAILED events in additional + states. (Robert Joseph Evans via sseth) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java index 98472d33cf1..f2f7a6c848c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java @@ -347,6 +347,8 @@ JobEventType.JOB_KILL, new KillTasksTransition()) JobEventType.JOB_DIAGNOSTIC_UPDATE, JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE, JobEventType.INTERNAL_ERROR)) + .addTransition(JobState.ERROR, JobState.ERROR, + JobEventType.JOB_COUNTER_UPDATE, COUNTER_UPDATE_TRANSITION) // create the topology tables .installTopology(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java index 53c7211d62d..7ac334c8ffc 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java @@ -316,7 +316,9 @@ TaskAttemptEventType.TA_CONTAINER_CLEANED, new TaskCleanupTransition()) TaskAttemptEventType.TA_CONTAINER_COMPLETED, TaskAttemptEventType.TA_UPDATE, TaskAttemptEventType.TA_COMMIT_PENDING, + // Container launch events can arrive late TaskAttemptEventType.TA_CONTAINER_LAUNCHED, + TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED, TaskAttemptEventType.TA_DONE, TaskAttemptEventType.TA_FAILMSG, TaskAttemptEventType.TA_TIMED_OUT)) @@ -338,6 +340,7 @@ TaskAttemptEventType.TA_CONTAINER_CLEANED, new TaskCleanupTransition()) TaskAttemptEventType.TA_UPDATE, TaskAttemptEventType.TA_COMMIT_PENDING, TaskAttemptEventType.TA_CONTAINER_LAUNCHED, + TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED, TaskAttemptEventType.TA_DONE, TaskAttemptEventType.TA_FAILMSG, TaskAttemptEventType.TA_TIMED_OUT)) @@ -359,7 +362,10 @@ TaskAttemptEventType.TA_CONTAINER_CLEANED, new TaskCleanupTransition()) TaskAttemptEventType.TA_UPDATE, TaskAttemptEventType.TA_COMMIT_PENDING, TaskAttemptEventType.TA_DONE, - TaskAttemptEventType.TA_FAILMSG)) + TaskAttemptEventType.TA_FAILMSG, + // Container launch events can arrive late + TaskAttemptEventType.TA_CONTAINER_LAUNCHED, + TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED)) // Transitions from KILL_TASK_CLEANUP .addTransition(TaskAttemptState.KILL_TASK_CLEANUP, @@ -377,7 +383,10 @@ TaskAttemptEventType.TA_CONTAINER_CLEANED, new TaskCleanupTransition()) TaskAttemptEventType.TA_UPDATE, TaskAttemptEventType.TA_COMMIT_PENDING, TaskAttemptEventType.TA_DONE, - TaskAttemptEventType.TA_FAILMSG)) + TaskAttemptEventType.TA_FAILMSG, + // Container launch events can arrive late + TaskAttemptEventType.TA_CONTAINER_LAUNCHED, + TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED)) // Transitions from SUCCEEDED .addTransition(TaskAttemptState.SUCCEEDED, //only possible for map attempts @@ -405,7 +414,9 @@ TaskAttemptEventType.TA_CONTAINER_CLEANED, new TaskCleanupTransition()) TaskAttemptEventType.TA_ASSIGNED, TaskAttemptEventType.TA_CONTAINER_COMPLETED, TaskAttemptEventType.TA_UPDATE, + // Container launch events can arrive late TaskAttemptEventType.TA_CONTAINER_LAUNCHED, + TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED, TaskAttemptEventType.TA_COMMIT_PENDING, TaskAttemptEventType.TA_DONE, TaskAttemptEventType.TA_FAILMSG)) @@ -420,7 +431,9 @@ TaskAttemptEventType.TA_CONTAINER_CLEANED, new TaskCleanupTransition()) TaskAttemptEventType.TA_ASSIGNED, TaskAttemptEventType.TA_CONTAINER_COMPLETED, TaskAttemptEventType.TA_UPDATE, + // Container launch events can arrive late TaskAttemptEventType.TA_CONTAINER_LAUNCHED, + TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED, TaskAttemptEventType.TA_COMMIT_PENDING, TaskAttemptEventType.TA_DONE, TaskAttemptEventType.TA_FAILMSG)) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java index 1b54a1ae667..e5ad3fd8226 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java @@ -19,6 +19,7 @@ package org.apache.hadoop.mapreduce.v2.app.job.impl; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; @@ -68,6 +69,9 @@ import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; +import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent; +import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType; +import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; @@ -81,9 +85,12 @@ import org.apache.hadoop.yarn.ClusterInfo; import org.apache.hadoop.yarn.SystemClock; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; +import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.event.Event; import org.apache.hadoop.yarn.event.EventHandler; @@ -91,12 +98,16 @@ import org.junit.Test; import org.mockito.ArgumentCaptor; -@SuppressWarnings("unchecked") +@SuppressWarnings({"unchecked", "rawtypes"}) public class TestTaskAttempt{ - - @SuppressWarnings("rawtypes") @Test public void testAttemptContainerRequest() throws Exception { + //WARNING: This test must run first. This is because there is an + // optimization where the credentials passed in are cached statically so + // they do not need to be recomputed when creating a new + // ContainerLaunchContext. if other tests run first this code will cache + // their credentials and this test will fail trying to look for the + // credentials it inserted in. final Text SECRET_KEY_ALIAS = new Text("secretkeyalias"); final byte[] SECRET_KEY = ("secretkey").getBytes(); Map acls = @@ -125,7 +136,7 @@ public void testAttemptContainerRequest() throws Exception { Token jobToken = new Token( ("tokenid").getBytes(), ("tokenpw").getBytes(), new Text("tokenkind"), new Text("tokenservice")); - + TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, mock(TaskSplitMetaInfo.class), jobConf, taListener, @@ -134,7 +145,7 @@ public void testAttemptContainerRequest() throws Exception { jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, taImpl.getID().toString()); ContainerId containerId = BuilderUtils.newContainerId(1, 1, 1, 1); - + ContainerLaunchContext launchCtx = TaskAttemptImpl.createContainerLaunchContext(acls, containerId, jobConf, jobToken, taImpl.createRemoteTask(), @@ -185,7 +196,6 @@ public void testMRAppHistoryForReduce() throws Exception { testMRAppHistory(app); } - @SuppressWarnings("rawtypes") @Test public void testSingleRackRequest() throws Exception { TaskAttemptImpl.RequestContainerTransition rct = @@ -213,11 +223,10 @@ public void testSingleRackRequest() throws Exception { ContainerRequestEvent cre = (ContainerRequestEvent) arg.getAllValues().get(1); String[] requestedRacks = cre.getRacks(); - //Only a single occurance of /DefaultRack + //Only a single occurrence of /DefaultRack assertEquals(1, requestedRacks.length); } - @SuppressWarnings("rawtypes") @Test public void testHostResolveAttempt() throws Exception { TaskAttemptImpl.RequestContainerTransition rct = @@ -316,14 +325,12 @@ public void verifySlotMillis(int mapMemMb, int reduceMemMb, .getValue()); } - @SuppressWarnings("rawtypes") private TaskAttemptImpl createMapTaskAttemptImplForTest( EventHandler eventHandler, TaskSplitMetaInfo taskSplitMetaInfo) { Clock clock = new SystemClock(); return createMapTaskAttemptImplForTest(eventHandler, taskSplitMetaInfo, clock); } - @SuppressWarnings("rawtypes") private TaskAttemptImpl createMapTaskAttemptImplForTest( EventHandler eventHandler, TaskSplitMetaInfo taskSplitMetaInfo, Clock clock) { ApplicationId appId = BuilderUtils.newApplicationId(1, 1); @@ -394,4 +401,67 @@ public void handle(JobHistoryEvent event) { }; } } + + @Test + public void testLaunchFailedWhileKilling() throws Exception { + ApplicationId appId = BuilderUtils.newApplicationId(1, 2); + ApplicationAttemptId appAttemptId = + BuilderUtils.newApplicationAttemptId(appId, 0); + JobId jobId = MRBuilderUtils.newJobId(appId, 1); + TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP); + TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0); + Path jobFile = mock(Path.class); + + MockEventHandler eventHandler = new MockEventHandler(); + TaskAttemptListener taListener = mock(TaskAttemptListener.class); + when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0)); + + JobConf jobConf = new JobConf(); + jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class); + jobConf.setBoolean("fs.file.impl.disable.cache", true); + jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, ""); + jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10"); + + TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class); + when(splits.getLocations()).thenReturn(new String[] {"127.0.0.1"}); + + TaskAttemptImpl taImpl = + new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, + splits, jobConf, taListener, + mock(OutputCommitter.class), mock(Token.class), new Credentials(), + new SystemClock(), null); + + NodeId nid = BuilderUtils.newNodeId("127.0.0.1", 0); + ContainerId contId = BuilderUtils.newContainerId(appAttemptId, 3); + Container container = mock(Container.class); + when(container.getId()).thenReturn(contId); + when(container.getNodeId()).thenReturn(nid); + + taImpl.handle(new TaskAttemptEvent(attemptId, + TaskAttemptEventType.TA_SCHEDULE)); + taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId, + container, mock(Map.class))); + taImpl.handle(new TaskAttemptEvent(attemptId, + TaskAttemptEventType.TA_KILL)); + taImpl.handle(new TaskAttemptEvent(attemptId, + TaskAttemptEventType.TA_CONTAINER_CLEANED)); + taImpl.handle(new TaskAttemptEvent(attemptId, + TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED)); + assertFalse(eventHandler.internalError); + } + + public static class MockEventHandler implements EventHandler { + public boolean internalError; + + @Override + public void handle(Event event) { + if (event instanceof JobEvent) { + JobEvent je = ((JobEvent) event); + if (JobEventType.INTERNAL_ERROR == je.getType()) { + internalError = true; + } + } + } + + }; } From aca6ca0059424a242e21a87da79dbe01d15d8202 Mon Sep 17 00:00:00 2001 From: Thomas Graves Date: Wed, 11 Apr 2012 21:13:49 +0000 Subject: [PATCH 05/57] MAPREDUCE-4107. Fix tests in org.apache.hadoop.ipc.TestSocketFactory (Devaraj K via tgraves) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1325010 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 + .../apache/hadoop/ipc/TestSocketFactory.java | 199 ++++++++++-------- 2 files changed, 109 insertions(+), 93 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 473ea7dd579..14e61473178 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -233,6 +233,9 @@ Release 2.0.0 - UNRELEASED MAPREDUCE-4108. Fix tests in org.apache.hadoop.util.TestRunJar (Devaraj K via tgraves) + MAPREDUCE-4107. Fix tests in org.apache.hadoop.ipc.TestSocketFactory + (Devaraj K via tgraves) + Release 0.23.3 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/ipc/TestSocketFactory.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/ipc/TestSocketFactory.java index 87ab4e0cfd2..48e76f42ed4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/ipc/TestSocketFactory.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/ipc/TestSocketFactory.java @@ -22,42 +22,96 @@ import java.net.Socket; import java.net.SocketAddress; -import junit.framework.TestCase; - import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.mapred.JobClient; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.JobStatus; -import org.apache.hadoop.mapred.MiniMRCluster; import org.apache.hadoop.mapreduce.MRConfig; +import org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster; import org.apache.hadoop.net.StandardSocketFactory; -import org.junit.Ignore; +import org.junit.Assert; +import org.junit.Test; /** * This class checks that RPCs can use specialized socket factories. */ -@Ignore -public class TestSocketFactory extends TestCase { +public class TestSocketFactory { /** - * Check that we can reach a NameNode or a JobTracker using a specific + * Check that we can reach a NameNode or Resource Manager using a specific * socket factory */ + @Test public void testSocketFactory() throws IOException { // Create a standard mini-cluster Configuration sconf = new Configuration(); - MiniDFSCluster cluster = new MiniDFSCluster(sconf, 1, true, null); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(sconf).numDataNodes(1) + .build(); final int nameNodePort = cluster.getNameNodePort(); // Get a reference to its DFS directly FileSystem fs = cluster.getFileSystem(); - assertTrue(fs instanceof DistributedFileSystem); + Assert.assertTrue(fs instanceof DistributedFileSystem); DistributedFileSystem directDfs = (DistributedFileSystem) fs; + Configuration cconf = getCustomSocketConfigs(nameNodePort); + + fs = FileSystem.get(cconf); + Assert.assertTrue(fs instanceof DistributedFileSystem); + DistributedFileSystem dfs = (DistributedFileSystem) fs; + + JobClient client = null; + MiniMRYarnCluster miniMRYarnCluster = null; + try { + // This will test RPC to the NameNode only. + // could we test Client-DataNode connections? + Path filePath = new Path("/dir"); + + Assert.assertFalse(directDfs.exists(filePath)); + Assert.assertFalse(dfs.exists(filePath)); + + directDfs.mkdirs(filePath); + Assert.assertTrue(directDfs.exists(filePath)); + Assert.assertTrue(dfs.exists(filePath)); + + // This will test RPC to a Resource Manager + fs = FileSystem.get(sconf); + JobConf jobConf = new JobConf(); + FileSystem.setDefaultUri(jobConf, fs.getUri().toString()); + miniMRYarnCluster = initAndStartMiniMRYarnCluster(jobConf); + JobConf jconf = new JobConf(cconf); + jconf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME); + String rmAddress = jconf.get("yarn.resourcemanager.address"); + String[] split = rmAddress.split(":"); + jconf.set("yarn.resourcemanager.address", split[0] + ':' + + (Integer.parseInt(split[1]) + 10)); + client = new JobClient(jconf); + + JobStatus[] jobs = client.jobsToComplete(); + Assert.assertTrue(jobs.length == 0); + + } finally { + closeClient(client); + closeDfs(dfs); + closeDfs(directDfs); + stopMiniMRYarnCluster(miniMRYarnCluster); + shutdownDFSCluster(cluster); + } + } + + private MiniMRYarnCluster initAndStartMiniMRYarnCluster(JobConf jobConf) { + MiniMRYarnCluster miniMRYarnCluster; + miniMRYarnCluster = new MiniMRYarnCluster(this.getClass().getName(), 1); + miniMRYarnCluster.init(jobConf); + miniMRYarnCluster.start(); + return miniMRYarnCluster; + } + + private Configuration getCustomSocketConfigs(final int nameNodePort) { // Get another reference via network using a specific socket factory Configuration cconf = new Configuration(); FileSystem.setDefaultUri(cconf, String.format("hdfs://localhost:%s/", @@ -68,78 +122,49 @@ public void testSocketFactory() throws IOException { "org.apache.hadoop.ipc.DummySocketFactory"); cconf.set("hadoop.rpc.socket.factory.class.JobSubmissionProtocol", "org.apache.hadoop.ipc.DummySocketFactory"); + return cconf; + } - fs = FileSystem.get(cconf); - assertTrue(fs instanceof DistributedFileSystem); - DistributedFileSystem dfs = (DistributedFileSystem) fs; - - JobClient client = null; - MiniMRCluster mr = null; + private void shutdownDFSCluster(MiniDFSCluster cluster) { try { - // This will test RPC to the NameNode only. - // could we test Client-DataNode connections? - Path filePath = new Path("/dir"); + if (cluster != null) + cluster.shutdown(); - assertFalse(directDfs.exists(filePath)); - assertFalse(dfs.exists(filePath)); + } catch (Exception ignored) { + // nothing we can do + ignored.printStackTrace(); + } + } - directDfs.mkdirs(filePath); - assertTrue(directDfs.exists(filePath)); - assertTrue(dfs.exists(filePath)); + private void stopMiniMRYarnCluster(MiniMRYarnCluster miniMRYarnCluster) { + try { + if (miniMRYarnCluster != null) + miniMRYarnCluster.stop(); - // This will test TPC to a JobTracker - fs = FileSystem.get(sconf); - mr = new MiniMRCluster(1, fs.getUri().toString(), 1); - final int jobTrackerPort = mr.getJobTrackerPort(); + } catch (Exception ignored) { + // nothing we can do + ignored.printStackTrace(); + } + } - JobConf jconf = new JobConf(cconf); - jconf.set("mapred.job.tracker", String.format("localhost:%d", - jobTrackerPort + 10)); - jconf.set(MRConfig.FRAMEWORK_NAME, MRConfig.CLASSIC_FRAMEWORK_NAME); - client = new JobClient(jconf); + private void closeDfs(DistributedFileSystem dfs) { + try { + if (dfs != null) + dfs.close(); - JobStatus[] jobs = client.jobsToComplete(); - assertTrue(jobs.length == 0); + } catch (Exception ignored) { + // nothing we can do + ignored.printStackTrace(); + } + } - } finally { - try { - if (client != null) - client.close(); - } catch (Exception ignored) { - // nothing we can do - ignored.printStackTrace(); - } - try { - if (dfs != null) - dfs.close(); - - } catch (Exception ignored) { - // nothing we can do - ignored.printStackTrace(); - } - try { - if (directDfs != null) - directDfs.close(); - - } catch (Exception ignored) { - // nothing we can do - ignored.printStackTrace(); - } - try { - if (cluster != null) - cluster.shutdown(); - - } catch (Exception ignored) { - // nothing we can do - ignored.printStackTrace(); - } - if (mr != null) { - try { - mr.shutdown(); - } catch (Exception ignored) { - ignored.printStackTrace(); - } - } + private void closeClient(JobClient client) { + try { + if (client != null) + client.close(); + } catch (Exception ignored) { + // nothing we can do + ignored.printStackTrace(); } } } @@ -155,32 +180,27 @@ class DummySocketFactory extends StandardSocketFactory { public DummySocketFactory() { } - /* @inheritDoc */ @Override public Socket createSocket() throws IOException { return new Socket() { @Override - public void connect(SocketAddress addr, int timeout) - throws IOException { + public void connect(SocketAddress addr, int timeout) throws IOException { assert (addr instanceof InetSocketAddress); InetSocketAddress iaddr = (InetSocketAddress) addr; SocketAddress newAddr = null; if (iaddr.isUnresolved()) - newAddr = - new InetSocketAddress(iaddr.getHostName(), - iaddr.getPort() - 10); + newAddr = new InetSocketAddress(iaddr.getHostName(), + iaddr.getPort() - 10); else - newAddr = - new InetSocketAddress(iaddr.getAddress(), iaddr.getPort() - 10); - System.out.printf("Test socket: rerouting %s to %s\n", iaddr, - newAddr); + newAddr = new InetSocketAddress(iaddr.getAddress(), + iaddr.getPort() - 10); + System.out.printf("Test socket: rerouting %s to %s\n", iaddr, newAddr); super.connect(newAddr, timeout); } }; } - /* @inheritDoc */ @Override public boolean equals(Object obj) { if (this == obj) @@ -191,11 +211,4 @@ public boolean equals(Object obj) { return false; return true; } - - /* @inheritDoc */ - @Override - public int hashCode() { - // Dummy hash code (to make find bugs happy) - return 53; - } } From b31d9d9d86b596cbbf86d5831538003fd383c92c Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Wed, 11 Apr 2012 23:57:12 +0000 Subject: [PATCH 06/57] HADOOP-8270. hadoop-daemon.sh stop action should return 0 for an already stopped service. Contributed by Roman Shaposhnik git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1325069 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../hadoop-common/src/main/bin/hadoop-daemon.sh | 2 -- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index bad03fc0cb2..383bc284a70 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -346,6 +346,9 @@ Release 2.0.0 - UNRELEASED HADOOP-8264. Remove irritating double double quotes in front of hostname (Bernd Fondermann via bobby) + HADOOP-8270. hadoop-daemon.sh stop action should return 0 for an + already stopped service. (Roman Shaposhnik via eli) + BREAKDOWN OF HADOOP-7454 SUBTASKS HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh) diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh index 42d32cf9a0f..1a4d6446fb5 100755 --- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh +++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh @@ -167,11 +167,9 @@ case $startStop in kill `cat $pid` else echo no $command to stop - exit 1 fi else echo no $command to stop - exit 1 fi ;; From 7f427646dfe80f9a4dfac0a979709f367e74a7e7 Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Thu, 12 Apr 2012 03:36:13 +0000 Subject: [PATCH 07/57] HDFS-2983. Relax the build version check to permit rolling upgrades within a release. Contributed by Aaron T. Myers. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1325110 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 4 + .../hadoop/hdfs/protocolPB/PBHelper.java | 10 +- .../common/IncorrectVersionException.java | 25 ++-- .../hdfs/server/datanode/BPServiceActor.java | 25 ++-- .../hadoop/hdfs/server/datanode/DNConf.java | 11 ++ .../hadoop/hdfs/server/datanode/DataNode.java | 1 + .../hdfs/server/namenode/BackupNode.java | 2 +- .../server/namenode/NameNodeRpcServer.java | 51 +++++++- .../server/protocol/DatanodeRegistration.java | 12 +- .../hdfs/server/protocol/NamespaceInfo.java | 12 +- .../apache/hadoop/hdfs/util/VersionUtil.java | 101 +++++++++++++++ .../src/main/proto/DatanodeProtocol.proto | 1 + .../hadoop-hdfs/src/main/proto/hdfs.proto | 5 +- .../hadoop/hdfs/TestDatanodeRegistration.java | 121 +++++++++++++++++- .../hadoop/hdfs/protocolPB/TestPBHelper.java | 3 +- .../server/datanode/TestDatanodeRegister.java | 105 +++++++++++---- .../namenode/NNThroughputBenchmark.java | 2 + .../hadoop/hdfs/util/TestVersionUtil.java | 62 +++++++++ 19 files changed, 492 insertions(+), 64 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/VersionUtil.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestVersionUtil.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index f95ecd50a94..71d3c5be24f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -370,6 +370,9 @@ Release 2.0.0 - UNRELEASED HDFS-3179. Improve the exception message thrown by DataStreamer when it failed to add a datanode. (szetszwo) + HDFS-2983. Relax the build version check to permit rolling upgrades within + a release. (atm) + OPTIMIZATIONS HDFS-3024. Improve performance of stringification in addStoredBlock (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 7deb7eb7b5e..1b42351bc14 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -146,6 +146,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final int DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_DEFAULT = 2; public static final String DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY = "dfs.namenode.num.extra.edits.retained"; public static final int DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_DEFAULT = 1000000; //1M + public static final String DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY = "dfs.namenode.min.supported.datanode.version"; + public static final String DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT = "3.0.0"; public static final String DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY = "dfs.namenode.edits.dir.minimum"; public static final int DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT = 1; @@ -262,6 +264,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_DATANODE_IPC_ADDRESS_KEY = "dfs.datanode.ipc.address"; public static final int DFS_DATANODE_IPC_DEFAULT_PORT = 50020; public static final String DFS_DATANODE_IPC_ADDRESS_DEFAULT = "0.0.0.0" + DFS_DATANODE_IPC_DEFAULT_PORT; + public static final String DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY = "dfs.datanode.min.supported.namenode.version"; + public static final String DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT = "3.0.0"; public static final String DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY = "dfs.block.access.token.enable"; public static final boolean DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT = false; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index fc50606f4d1..92b7858c007 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -386,7 +386,7 @@ public static NamespaceInfo convert(NamespaceInfoProto info) { StorageInfoProto storage = info.getStorageInfo(); return new NamespaceInfo(storage.getNamespceID(), storage.getClusterID(), info.getBlockPoolID(), storage.getCTime(), info.getDistUpgradeVersion(), - info.getBuildVersion()); + info.getBuildVersion(), info.getSoftwareVersion()); } public static NamenodeCommand convert(NamenodeCommandProto cmd) { @@ -612,13 +612,14 @@ public static DatanodeRegistrationProto convert( .newBuilder(); return builder.setDatanodeID(PBHelper.convert((DatanodeID) registration)) .setStorageInfo(PBHelper.convert(registration.getStorageInfo())) - .setKeys(PBHelper.convert(registration.getExportedKeys())).build(); + .setKeys(PBHelper.convert(registration.getExportedKeys())) + .setSoftwareVersion(registration.getSoftwareVersion()).build(); } public static DatanodeRegistration convert(DatanodeRegistrationProto proto) { return new DatanodeRegistration(PBHelper.convert(proto.getDatanodeID()), PBHelper.convert(proto.getStorageInfo()), PBHelper.convert(proto - .getKeys())); + .getKeys()), proto.getSoftwareVersion()); } public static DatanodeCommand convert(DatanodeCommandProto proto) { @@ -894,7 +895,8 @@ public static NamespaceInfoProto convert(NamespaceInfo info) { .setBlockPoolID(info.getBlockPoolID()) .setBuildVersion(info.getBuildVersion()) .setDistUpgradeVersion(info.getDistributedUpgradeVersion()) - .setStorageInfo(PBHelper.convert((StorageInfo)info)).build(); + .setStorageInfo(PBHelper.convert((StorageInfo)info)) + .setSoftwareVersion(info.getSoftwareVersion()).build(); } // Located Block Arrays and Lists diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java index 5f0b2604b0f..57bd214fb57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java @@ -32,7 +32,19 @@ @InterfaceStability.Evolving public class IncorrectVersionException extends IOException { private static final long serialVersionUID = 1L; + + public IncorrectVersionException(String message) { + super(message); + } + public IncorrectVersionException(String minimumVersion, String reportedVersion, + String remoteDaemon, String thisDaemon) { + this("The reported " + remoteDaemon + " version is too low to communicate" + + " with this " + thisDaemon + ". " + remoteDaemon + " version: '" + + reportedVersion + "' Minimum " + remoteDaemon + " version: '" + + minimumVersion + "'"); + } + public IncorrectVersionException(int versionReported, String ofWhat) { this(versionReported, ofWhat, HdfsConstants.LAYOUT_VERSION); } @@ -40,16 +52,9 @@ public IncorrectVersionException(int versionReported, String ofWhat) { public IncorrectVersionException(int versionReported, String ofWhat, int versionExpected) { - super("Unexpected version " - + (ofWhat==null ? "" : "of " + ofWhat) + ". Reported: " - + versionReported + ". Expecting = " + versionExpected + "."); + this("Unexpected version " + + (ofWhat==null ? "" : "of " + ofWhat) + ". Reported: " + + versionReported + ". Expecting = " + versionExpected + "."); } - public IncorrectVersionException(String versionReported, - String ofWhat, - String versionExpected) { - super("Unexpected version " - + (ofWhat==null ? "" : "of " + ofWhat) + ". Reported: " - + versionReported + ". Expecting = " + versionExpected + "."); - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index 25e3a6781ef..f5d09b1fef2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; -import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; @@ -49,9 +48,11 @@ import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.server.protocol.StorageReport; +import org.apache.hadoop.hdfs.util.VersionUtil; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.VersionInfo; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Maps; @@ -178,17 +179,23 @@ NamespaceInfo retrieveNamespaceInfo() throws IOException { private void checkNNVersion(NamespaceInfo nsInfo) throws IncorrectVersionException { // build and layout versions should match - String nsBuildVer = nsInfo.getBuildVersion(); - String stBuildVer = Storage.getBuildVersion(); - if (!nsBuildVer.equals(stBuildVer)) { - LOG.warn("Data-node and name-node Build versions must be the same. " + - "Namenode build version: " + nsBuildVer + "Datanode " + - "build version: " + stBuildVer); - throw new IncorrectVersionException(nsBuildVer, "namenode", stBuildVer); + String nnVersion = nsInfo.getSoftwareVersion(); + String minimumNameNodeVersion = dnConf.getMinimumNameNodeVersion(); + if (VersionUtil.compareVersions(nnVersion, minimumNameNodeVersion) < 0) { + IncorrectVersionException ive = new IncorrectVersionException( + minimumNameNodeVersion, nnVersion, "NameNode", "DataNode"); + LOG.warn(ive.getMessage()); + throw ive; + } + String dnVersion = VersionInfo.getVersion(); + if (!nnVersion.equals(dnVersion)) { + LOG.info("Reported NameNode version '" + nnVersion + "' does not match " + + "DataNode version '" + dnVersion + "' but is within acceptable " + + "limits. Note: This is normal during a rolling upgrade."); } if (HdfsConstants.LAYOUT_VERSION != nsInfo.getLayoutVersion()) { - LOG.warn("Data-node and name-node layout versions must be the same." + + LOG.warn("DataNode and NameNode layout versions must be the same." + " Expected: "+ HdfsConstants.LAYOUT_VERSION + " actual "+ nsInfo.getLayoutVersion()); throw new IncorrectVersionException( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java index e4bf9a676dc..92f1edc2fca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java @@ -31,6 +31,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_TRANSFERTO_ALLOWED_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -58,6 +60,8 @@ class DNConf { final long deleteReportInterval; final long initialBlockReportDelay; final int writePacketSize; + + final String minimumNameNodeVersion; public DNConf(Configuration conf) { socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, @@ -111,5 +115,12 @@ public DNConf(Configuration conf) { this.syncOnClose = conf.getBoolean(DFS_DATANODE_SYNCONCLOSE_KEY, DFS_DATANODE_SYNCONCLOSE_DEFAULT); + this.minimumNameNodeVersion = conf.get(DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, + DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT); + } + + // We get minimumNameNodeVersion via a method so it can be mocked out in tests. + String getMinimumNameNodeVersion() { + return this.minimumNameNodeVersion; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index db74ca40b96..f04fea82048 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -673,6 +673,7 @@ DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) { bpRegistration.setIpcPort(getIpcPort()); bpRegistration.setHostName(hostName); bpRegistration.setStorageID(getStorageId()); + bpRegistration.setSoftwareVersion(VersionInfo.getVersion()); StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID()); if (storageInfo == null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java index 1f005b016f4..cb826f6e089 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java @@ -242,7 +242,7 @@ private BackupNodeRpcServer(Configuration conf, BackupNode nn) */ private void verifyJournalRequest(JournalInfo journalInfo) throws IOException { - verifyVersion(journalInfo.getLayoutVersion()); + verifyLayoutVersion(journalInfo.getLayoutVersion()); String errorMsg = null; int expectedNamespaceID = namesystem.getNamespaceInfo().getNamespaceID(); if (journalInfo.getNamespaceId() != expectedNamespaceID) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index ca4ab24c210..4c891d3d4d9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -47,6 +47,7 @@ import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceProtocolService; import org.apache.hadoop.ha.protocolPB.HAServiceProtocolPB; import org.apache.hadoop.ha.protocolPB.HAServiceProtocolServerSideTranslatorPB; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HDFSPolicyProvider; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -107,6 +108,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; +import org.apache.hadoop.hdfs.util.VersionUtil; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.ProtobufRpcEngine; @@ -121,6 +123,7 @@ import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.VersionInfo; import com.google.protobuf.BlockingService; @@ -147,6 +150,8 @@ class NameNodeRpcServer implements NamenodeProtocols { /** The RPC server that listens to requests from clients */ protected final RPC.Server clientRpcServer; protected final InetSocketAddress clientRpcAddress; + + private final String minimumDataNodeVersion; public NameNodeRpcServer(Configuration conf, NameNode nn) throws IOException { @@ -261,6 +266,10 @@ public NameNodeRpcServer(Configuration conf, NameNode nn) // The rpc-server port can be ephemeral... ensure we have the correct info this.clientRpcAddress = this.clientRpcServer.getListenerAddress(); nn.setRpcServerAddress(conf, clientRpcAddress); + + this.minimumDataNodeVersion = conf.get( + DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY, + DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT); } /** @@ -326,7 +335,7 @@ public void errorReport(NamenodeRegistration registration, @Override // NamenodeProtocol public NamenodeRegistration register(NamenodeRegistration registration) throws IOException { - verifyVersion(registration.getVersion()); + verifyLayoutVersion(registration.getVersion()); NamenodeRegistration myRegistration = nn.setRegistration(); namesystem.registerBackupNode(registration, myRegistration); return myRegistration; @@ -829,9 +838,10 @@ public String getLinkTarget(String path) throws IOException { @Override // DatanodeProtocol - public DatanodeRegistration registerDatanode(DatanodeRegistration nodeReg - ) throws IOException { - verifyVersion(nodeReg.getVersion()); + public DatanodeRegistration registerDatanode(DatanodeRegistration nodeReg) + throws IOException { + verifyLayoutVersion(nodeReg.getVersion()); + verifySoftwareVersion(nodeReg); namesystem.registerDatanode(nodeReg); return nodeReg; } @@ -916,7 +926,7 @@ public UpgradeCommand processUpgradeCommand(UpgradeCommand comm) throws IOExcept * @throws UnregisteredNodeException if the registration is invalid */ void verifyRequest(NodeRegistration nodeReg) throws IOException { - verifyVersion(nodeReg.getVersion()); + verifyLayoutVersion(nodeReg.getVersion()); if (!namesystem.getRegistrationID().equals(nodeReg.getRegistrationID())) { LOG.warn("Invalid registrationID - expected: " + namesystem.getRegistrationID() + " received: " @@ -989,10 +999,39 @@ public synchronized HAServiceStatus getServiceStatus() * @param version * @throws IOException */ - void verifyVersion(int version) throws IOException { + void verifyLayoutVersion(int version) throws IOException { if (version != HdfsConstants.LAYOUT_VERSION) throw new IncorrectVersionException(version, "data node"); } + + private void verifySoftwareVersion(DatanodeRegistration dnReg) + throws IncorrectVersionException { + String dnVersion = dnReg.getSoftwareVersion(); + if (VersionUtil.compareVersions(dnVersion, minimumDataNodeVersion) < 0) { + IncorrectVersionException ive = new IncorrectVersionException( + minimumDataNodeVersion, dnVersion, "DataNode", "NameNode"); + LOG.warn(ive.getMessage() + " DN: " + dnReg); + throw ive; + } + String nnVersion = VersionInfo.getVersion(); + if (!dnVersion.equals(nnVersion)) { + String messagePrefix = "Reported DataNode version '" + dnVersion + + "' of DN " + dnReg + " does not match NameNode version '" + + nnVersion + "'"; + long nnCTime = nn.getFSImage().getStorage().getCTime(); + long dnCTime = dnReg.getStorageInfo().getCTime(); + if (nnCTime != dnCTime) { + IncorrectVersionException ive = new IncorrectVersionException( + messagePrefix + " and CTime of DN ('" + dnCTime + + "') does not match CTime of NN ('" + nnCTime + "')"); + LOG.warn(ive); + throw ive; + } else { + LOG.info(messagePrefix + + ". Note: This is normal during a rolling upgrade."); + } + } + } private static String getClientMachine() { String clientMachine = NamenodeWebHdfsMethods.getRemoteAddress(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java index a5522ced519..dda0a6fbee0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java @@ -37,12 +37,14 @@ public class DatanodeRegistration extends DatanodeID private StorageInfo storageInfo; private ExportedBlockKeys exportedKeys; + private String softwareVersion; public DatanodeRegistration(DatanodeID dn, StorageInfo info, - ExportedBlockKeys keys) { + ExportedBlockKeys keys, String softwareVersion) { super(dn); this.storageInfo = info; this.exportedKeys = keys; + this.softwareVersion = softwareVersion; } public DatanodeRegistration(String ipAddr, int xferPort) { @@ -71,6 +73,14 @@ public void setExportedKeys(ExportedBlockKeys keys) { public ExportedBlockKeys getExportedKeys() { return exportedKeys; } + + public void setSoftwareVersion(String softwareVersion) { + this.softwareVersion = softwareVersion; + } + + public String getSoftwareVersion() { + return softwareVersion; + } @Override // NodeRegistration public int getVersion() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java index f0d46b25b70..eb91a178619 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.NNStorage; +import org.apache.hadoop.util.VersionInfo; /** * NamespaceInfo is returned by the name-node in reply @@ -38,6 +39,7 @@ public class NamespaceInfo extends StorageInfo { String buildVersion; int distributedUpgradeVersion; String blockPoolID = ""; // id of the block pool + String softwareVersion; public NamespaceInfo() { super(); @@ -45,16 +47,18 @@ public NamespaceInfo() { } public NamespaceInfo(int nsID, String clusterID, String bpID, - long cT, int duVersion, String buildVersion) { + long cT, int duVersion, String buildVersion, String softwareVersion) { super(HdfsConstants.LAYOUT_VERSION, nsID, clusterID, cT); blockPoolID = bpID; this.buildVersion = buildVersion; this.distributedUpgradeVersion = duVersion; + this.softwareVersion = softwareVersion; } public NamespaceInfo(int nsID, String clusterID, String bpID, long cT, int duVersion) { - this(nsID, clusterID, bpID, cT, duVersion, Storage.getBuildVersion()); + this(nsID, clusterID, bpID, cT, duVersion, Storage.getBuildVersion(), + VersionInfo.getVersion()); } public String getBuildVersion() { @@ -68,6 +72,10 @@ public int getDistributedUpgradeVersion() { public String getBlockPoolID() { return blockPoolID; } + + public String getSoftwareVersion() { + return softwareVersion; + } public String toString(){ return super.toString() + ";bpid=" + blockPoolID; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/VersionUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/VersionUtil.java new file mode 100644 index 00000000000..59aa5e128ed --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/VersionUtil.java @@ -0,0 +1,101 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.util; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.apache.hadoop.classification.InterfaceAudience; + +@InterfaceAudience.Private +public abstract class VersionUtil { + + private static final Pattern COMPONENT_GROUPS = Pattern.compile("(\\d+)|(\\D+)"); + + /** + * This function splits the two versions on "." and performs a + * naturally-ordered comparison of the resulting components. For example, the + * version string "0.3" is considered to precede "0.20", despite the fact that + * lexical comparison would consider "0.20" to precede "0.3". This method of + * comparison is similar to the method used by package versioning systems like + * deb and RPM. + * + * Version components are compared numerically whenever possible, however a + * version component can contain non-numeric characters. When a non-numeric + * group of characters is found in a version component, this group is compared + * with the similarly-indexed group in the other version component. If the + * other group is numeric, then the numeric group is considered to precede the + * non-numeric group. If both groups are non-numeric, then a lexical + * comparison is performed. + * + * If two versions have a different number of components, then only the lower + * number of components are compared. If those components are identical + * between the two versions, then the version with fewer components is + * considered to precede the version with more components. + * + * This function returns a negative integer if version1 precedes version2, a + * positive integer if version2 precedes version1, and 0 if and only if the + * two versions' components are identical in value and cardinality. + * + * @param version1 + * the first version to compare + * @param version2 + * the second version to compare + * @return a negative integer if version1 precedes version2, a positive + * integer if version2 precedes version1, and 0 if and only if the two + * versions are equal. + */ + public static int compareVersions(String version1, String version2) { + String[] version1Parts = version1.split("\\."); + String[] version2Parts = version2.split("\\."); + + for (int i = 0; i < version1Parts.length && i < version2Parts.length; i++) { + String component1 = version1Parts[i]; + String component2 = version2Parts[i]; + if (!component1.equals(component2)) { + Matcher matcher1 = COMPONENT_GROUPS.matcher(component1); + Matcher matcher2 = COMPONENT_GROUPS.matcher(component2); + + while (matcher1.find() && matcher2.find()) { + String group1 = matcher1.group(); + String group2 = matcher2.group(); + if (!group1.equals(group2)) { + if (isNumeric(group1) && isNumeric(group2)) { + return Integer.parseInt(group1) - Integer.parseInt(group2); + } else if (!isNumeric(group1) && !isNumeric(group2)) { + return group1.compareTo(group2); + } else { + return isNumeric(group1) ? -1 : 1; + } + } + } + return component1.length() - component2.length(); + } + } + return version1Parts.length - version2Parts.length; + } + + private static boolean isNumeric(String s) { + try { + Integer.parseInt(s); + return true; + } catch (NumberFormatException nfe) { + return false; + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto index 046e07f7fa6..f5f36e85bf6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto @@ -33,6 +33,7 @@ message DatanodeRegistrationProto { required DatanodeIDProto datanodeID = 1; // Datanode information required StorageInfoProto storageInfo = 2; // Node information required ExportedBlockKeysProto keys = 3; // Block keys + required string softwareVersion = 4; // Software version of the DN, e.g. "2.0.0" } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto index a0b055642f3..09b72b62bfd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto @@ -303,10 +303,11 @@ message RemoteEditLogManifestProto { * Namespace information that describes namespace on a namenode */ message NamespaceInfoProto { - required string buildVersion = 1; // Software build version + required string buildVersion = 1; // Software revision version (e.g. an svn or git revision) required uint32 distUpgradeVersion = 2; // Distributed upgrade version required string blockPoolID = 3; // block pool used by the namespace - required StorageInfoProto storageInfo = 4;// Noe information + required StorageInfoProto storageInfo = 4;// Node information + required string softwareVersion = 5; // Software version number (e.g. 2.0.0) } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java index 2cde7ed4760..ca894a8ef04 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java @@ -17,24 +17,40 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.*; + import java.net.InetSocketAddress; -import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.DFSClient; -import junit.framework.TestCase; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; +import org.apache.hadoop.hdfs.server.common.StorageInfo; +import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.VersionInfo; +import org.junit.Test; /** * This class tests that a file need not be closed before its * data can be read by another client. */ -public class TestDatanodeRegistration extends TestCase { +public class TestDatanodeRegistration { + + public static final Log LOG = LogFactory.getLog(TestDatanodeRegistration.class); /** * Regression test for HDFS-894 ensures that, when datanodes * are restarted, the new IPC port is registered with the * namenode. */ + @Test public void testChangeIpcPort() throws Exception { HdfsConfiguration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; @@ -74,4 +90,101 @@ public void testChangeIpcPort() throws Exception { } } } + + @Test + public void testRegistrationWithDifferentSoftwareVersions() throws Exception { + Configuration conf = new HdfsConfiguration(); + conf.set(DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, "3.0.0"); + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(0) + .build(); + + NamenodeProtocols rpcServer = cluster.getNameNodeRpc(); + + long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime(); + StorageInfo mockStorageInfo = mock(StorageInfo.class); + doReturn(nnCTime).when(mockStorageInfo).getCTime(); + + DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class); + doReturn(HdfsConstants.LAYOUT_VERSION).when(mockDnReg).getVersion(); + doReturn("fake-storage-id").when(mockDnReg).getStorageID(); + doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo(); + + // Should succeed when software versions are the same. + doReturn("3.0.0").when(mockDnReg).getSoftwareVersion(); + rpcServer.registerDatanode(mockDnReg); + + // Should succeed when software version of DN is above minimum required by NN. + doReturn("4.0.0").when(mockDnReg).getSoftwareVersion(); + rpcServer.registerDatanode(mockDnReg); + + // Should fail when software version of DN is below minimum required by NN. + doReturn("2.0.0").when(mockDnReg).getSoftwareVersion(); + try { + rpcServer.registerDatanode(mockDnReg); + fail("Should not have been able to register DN with too-low version."); + } catch (IncorrectVersionException ive) { + GenericTestUtils.assertExceptionContains( + "The reported DataNode version is too low", ive); + LOG.info("Got expected exception", ive); + } + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + + @Test + public void testRegistrationWithDifferentSoftwareVersionsDuringUpgrade() + throws Exception { + Configuration conf = new HdfsConfiguration(); + conf.set(DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, "1.0.0"); + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(0) + .build(); + + NamenodeProtocols rpcServer = cluster.getNameNodeRpc(); + + long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime(); + StorageInfo mockStorageInfo = mock(StorageInfo.class); + doReturn(nnCTime).when(mockStorageInfo).getCTime(); + + DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class); + doReturn(HdfsConstants.LAYOUT_VERSION).when(mockDnReg).getVersion(); + doReturn("fake-storage-id").when(mockDnReg).getStorageID(); + doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo(); + + // Should succeed when software versions are the same and CTimes are the + // same. + doReturn(VersionInfo.getVersion()).when(mockDnReg).getSoftwareVersion(); + rpcServer.registerDatanode(mockDnReg); + + // Should succeed when software versions are the same and CTimes are + // different. + doReturn(nnCTime + 1).when(mockStorageInfo).getCTime(); + rpcServer.registerDatanode(mockDnReg); + + // Should fail when software version of DN is different from NN and CTimes + // are different. + doReturn(VersionInfo.getVersion() + ".1").when(mockDnReg).getSoftwareVersion(); + try { + rpcServer.registerDatanode(mockDnReg); + fail("Should not have been able to register DN with different software" + + " versions and CTimes"); + } catch (IncorrectVersionException ive) { + GenericTestUtils.assertExceptionContains( + "does not match CTime of NN", ive); + LOG.info("Got expected exception", ive); + } + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java index 652aaf8ae00..a6280d319aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java @@ -429,12 +429,13 @@ public void testConvertDatanodeRegistration() { ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10, getBlockKey(1), keys); DatanodeRegistration reg = new DatanodeRegistration(dnId, - new StorageInfo(), expKeys); + new StorageInfo(), expKeys, "3.0.0"); DatanodeRegistrationProto proto = PBHelper.convert(reg); DatanodeRegistration reg2 = PBHelper.convert(proto); compare(reg.getStorageInfo(), reg2.getStorageInfo()); compare(reg.getExportedKeys(), reg2.getExportedKeys()); compare((DatanodeID)reg, (DatanodeID)reg2); + assertEquals(reg.getSoftwareVersion(), reg2.getSoftwareVersion()); } @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java index dbbaedd6f86..a55ca0b0a4d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java @@ -18,48 +18,105 @@ package org.apache.hadoop.hdfs.server.datanode; -import java.net.InetSocketAddress; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; -import static org.mockito.Mockito.*; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.net.InetSocketAddress; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.VersionInfo; +import org.junit.Before; import org.junit.Test; -import org.mockito.Mockito; - public class TestDatanodeRegister { public static final Log LOG = LogFactory.getLog(TestDatanodeRegister.class); // Invalid address - static final InetSocketAddress INVALID_ADDR = + private static final InetSocketAddress INVALID_ADDR = new InetSocketAddress("127.0.0.1", 1); + + private BPServiceActor actor; + NamespaceInfo fakeNsInfo; + DNConf mockDnConf; + + @Before + public void setUp() throws IOException { + mockDnConf = mock(DNConf.class); + doReturn(VersionInfo.getVersion()).when(mockDnConf).getMinimumNameNodeVersion(); + + DataNode mockDN = mock(DataNode.class); + doReturn(true).when(mockDN).shouldRun(); + doReturn(mockDnConf).when(mockDN).getDnConf(); + + BPOfferService mockBPOS = mock(BPOfferService.class); + doReturn(mockDN).when(mockBPOS).getDataNode(); + + actor = new BPServiceActor(INVALID_ADDR, mockBPOS); + + fakeNsInfo = mock(NamespaceInfo.class); + // Return a a good software version. + doReturn(VersionInfo.getVersion()).when(fakeNsInfo).getSoftwareVersion(); + // Return a good layout version for now. + doReturn(HdfsConstants.LAYOUT_VERSION).when(fakeNsInfo).getLayoutVersion(); + + DatanodeProtocolClientSideTranslatorPB fakeDnProt = + mock(DatanodeProtocolClientSideTranslatorPB.class); + when(fakeDnProt.versionRequest()).thenReturn(fakeNsInfo); + actor.setNameNode(fakeDnProt); + } @Test - public void testDataNodeRegister() throws Exception { - DataNode mockDN = mock(DataNode.class); - Mockito.doReturn(true).when(mockDN).shouldRun(); + public void testSoftwareVersionDifferences() throws Exception { + // We expect no exception to be thrown when the software versions match. + assertEquals(VersionInfo.getVersion(), + actor.retrieveNamespaceInfo().getSoftwareVersion()); - BPOfferService mockBPOS = Mockito.mock(BPOfferService.class); - Mockito.doReturn(mockDN).when(mockBPOS).getDataNode(); + // We expect no exception to be thrown when the min NN version is below the + // reported NN version. + doReturn("4.0.0").when(fakeNsInfo).getSoftwareVersion(); + doReturn("3.0.0").when(mockDnConf).getMinimumNameNodeVersion(); + assertEquals("4.0.0", actor.retrieveNamespaceInfo().getSoftwareVersion()); - BPServiceActor actor = new BPServiceActor(INVALID_ADDR, mockBPOS); - - NamespaceInfo fakeNSInfo = mock(NamespaceInfo.class); - when(fakeNSInfo.getBuildVersion()).thenReturn("NSBuildVersion"); - DatanodeProtocolClientSideTranslatorPB fakeDNProt = - mock(DatanodeProtocolClientSideTranslatorPB.class); - when(fakeDNProt.versionRequest()).thenReturn(fakeNSInfo); - - actor.setNameNode( fakeDNProt ); - try { + // When the NN reports a version that's too low, throw an exception. + doReturn("3.0.0").when(fakeNsInfo).getSoftwareVersion(); + doReturn("4.0.0").when(mockDnConf).getMinimumNameNodeVersion(); + try { actor.retrieveNamespaceInfo(); - fail("register() did not throw exception! " + - "Expected: IncorrectVersionException"); - } catch (IncorrectVersionException ie) { - LOG.info("register() returned correct Exception: IncorrectVersionException"); + fail("Should have thrown an exception for NN with too-low version"); + } catch (IncorrectVersionException ive) { + GenericTestUtils.assertExceptionContains( + "The reported NameNode version is too low", ive); + LOG.info("Got expected exception", ive); + } + } + + @Test + public void testDifferentLayoutVersions() throws Exception { + // We expect no exceptions to be thrown when the layout versions match. + assertEquals(HdfsConstants.LAYOUT_VERSION, + actor.retrieveNamespaceInfo().getLayoutVersion()); + + // We expect an exception to be thrown when the NN reports a layout version + // different from that of the DN. + doReturn(HdfsConstants.LAYOUT_VERSION * 1000).when(fakeNsInfo) + .getLayoutVersion(); + try { + actor.retrieveNamespaceInfo(); + fail("Should have failed to retrieve NS info from DN with bad layout version"); + } catch (IncorrectVersionException ive) { + GenericTestUtils.assertExceptionContains( + "Unexpected version of namenode", ive); + LOG.info("Got expected exception", ive); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index 4b8f225f1e2..ec5b8a72e2f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -58,6 +58,7 @@ import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.security.Groups; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.VersionInfo; import org.apache.log4j.Level; import org.apache.log4j.LogManager; @@ -783,6 +784,7 @@ private static int getNodePort(int num) throws IOException { String hostName = DNS.getDefaultHost("default", "default"); dnRegistration = new DatanodeRegistration(ipAddr, getNodePort(dnIdx)); dnRegistration.setHostName(hostName); + dnRegistration.setSoftwareVersion(VersionInfo.getVersion()); this.blocks = new ArrayList(blockCapacity); this.nrBlocks = 0; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestVersionUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestVersionUtil.java new file mode 100644 index 00000000000..c2537fd515a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestVersionUtil.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.util; + +import static org.junit.Assert.*; + +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.Test; + +public class TestVersionUtil { + + @Test + public void testCompareVersions() { + // Equal versions are equal. + assertEquals(0, VersionUtil.compareVersions("2.0.0", "2.0.0")); + assertEquals(0, VersionUtil.compareVersions("2.0.0a", "2.0.0a")); + assertEquals(0, VersionUtil.compareVersions("1", "1")); + + // Assert that lower versions are lower, and higher versions are higher. + assertExpectedValues("1", "2.0.0"); + assertExpectedValues("1.0.0", "2"); + assertExpectedValues("1.0.0", "2.0.0"); + assertExpectedValues("1.0", "2.0.0"); + assertExpectedValues("1.0.0", "2.0.0"); + assertExpectedValues("1.0.0", "1.0.0a"); + assertExpectedValues("1.0.0.0", "2.0.0"); + assertExpectedValues("1.0.0", "1.0.0-dev"); + assertExpectedValues("1.0.0", "1.0.1"); + assertExpectedValues("1.0.0", "1.0.2"); + assertExpectedValues("1.0.0", "1.1.0"); + assertExpectedValues("2.0.0", "10.0.0"); + assertExpectedValues("1.0.0", "1.0.0a"); + assertExpectedValues("1.0.2a", "1.0.10"); + assertExpectedValues("1.0.2a", "1.0.2b"); + assertExpectedValues("1.0.2a", "1.0.2ab"); + assertExpectedValues("1.0.0a1", "1.0.0a2"); + assertExpectedValues("1.0.0a2", "1.0.0a10"); + assertExpectedValues("1.0", "1.a"); + assertExpectedValues("1.0", "1.a0"); + } + + private static void assertExpectedValues(String lower, String higher) { + assertTrue(VersionUtil.compareVersions(lower, higher) < 0); + assertTrue(VersionUtil.compareVersions(higher, lower) > 0); + } + +} From 4ea042666c4d7997bd5fac893a27152dddfbd957 Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Thu, 12 Apr 2012 05:27:03 +0000 Subject: [PATCH 08/57] HDFS-3260. TestDatanodeRegistration should set minimum DN version in addition to minimum NN version. Contributed by Aaron T. Myers. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1325119 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java | 1 + 2 files changed, 4 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 71d3c5be24f..442a446aa74 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -498,6 +498,9 @@ Release 2.0.0 - UNRELEASED HDFS-2696. Fix the fuse-fds build. (Bruno Mahé via eli) + HDFS-3260. TestDatanodeRegistration should set minimum DN version in + addition to minimum NN version. (atm) + BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java index ca894a8ef04..6bb7b456666 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java @@ -95,6 +95,7 @@ public void testChangeIpcPort() throws Exception { public void testRegistrationWithDifferentSoftwareVersions() throws Exception { Configuration conf = new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, "3.0.0"); + conf.set(DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY, "3.0.0"); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf) From d310c48ce4dc3fc24506455ed5addf1d24f441ee Mon Sep 17 00:00:00 2001 From: Amar Kamat Date: Thu, 12 Apr 2012 07:17:34 +0000 Subject: [PATCH 09/57] MAPREDUCE-4083. [Gridmix] NPE in cpu emulation. (amarrk) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1325145 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 2 ++ .../resourceusage/CumulativeCpuUsageEmulatorPlugin.java | 9 ++++++--- .../resourceusage/TotalHeapUsageEmulatorPlugin.java | 7 +++++-- .../mapred/gridmix/TestGridmixMemoryEmulation.java | 5 +++++ .../mapred/gridmix/TestResourceUsageEmulators.java | 6 +++++- 5 files changed, 23 insertions(+), 6 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 14e61473178..b8921ae2d5d 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -52,6 +52,8 @@ Trunk (unreleased changes) BUG FIXES + MAPREDUCE-4083. [Gridmix] NPE in cpu emulation. (amarrk) + MAPREDUCE-4087. [Gridmix] GenerateDistCacheData job of Gridmix can become slow in some cases (ravigummadi). diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/CumulativeCpuUsageEmulatorPlugin.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/CumulativeCpuUsageEmulatorPlugin.java index 22acb42728b..c2b2a018ff3 100644 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/CumulativeCpuUsageEmulatorPlugin.java +++ b/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/CumulativeCpuUsageEmulatorPlugin.java @@ -235,7 +235,9 @@ private synchronized long getCurrentCPUUsage() { @Override public float getProgress() { - return Math.min(1f, ((float)getCurrentCPUUsage())/targetCpuUsage); + return enabled + ? Math.min(1f, ((float)getCurrentCPUUsage())/targetCpuUsage) + : 1.0f; } @Override @@ -297,6 +299,9 @@ public void emulate() throws IOException, InterruptedException { public void initialize(Configuration conf, ResourceUsageMetrics metrics, ResourceCalculatorPlugin monitor, Progressive progress) { + this.monitor = monitor; + this.progress = progress; + // get the target CPU usage targetCpuUsage = metrics.getCumulativeCpuUsage(); if (targetCpuUsage <= 0 ) { @@ -306,8 +311,6 @@ public void initialize(Configuration conf, ResourceUsageMetrics metrics, enabled = true; } - this.monitor = monitor; - this.progress = progress; emulationInterval = conf.getFloat(CPU_EMULATION_PROGRESS_INTERVAL, DEFAULT_EMULATION_FREQUENCY); diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/TotalHeapUsageEmulatorPlugin.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/TotalHeapUsageEmulatorPlugin.java index 3af1f3558fa..47941ccfffb 100644 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/TotalHeapUsageEmulatorPlugin.java +++ b/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/TotalHeapUsageEmulatorPlugin.java @@ -188,7 +188,9 @@ protected long getMaxHeapUsageInMB() { @Override public float getProgress() { - return Math.min(1f, ((float)getTotalHeapUsageInMB())/targetHeapUsageInMB); + return enabled + ? Math.min(1f, ((float)getTotalHeapUsageInMB())/targetHeapUsageInMB) + : 1.0f; } @Override @@ -237,6 +239,8 @@ public void emulate() throws IOException, InterruptedException { public void initialize(Configuration conf, ResourceUsageMetrics metrics, ResourceCalculatorPlugin monitor, Progressive progress) { + this.progress = progress; + // get the target heap usage targetHeapUsageInMB = metrics.getHeapUsage() / ONE_MB; if (targetHeapUsageInMB <= 0 ) { @@ -248,7 +252,6 @@ public void initialize(Configuration conf, ResourceUsageMetrics metrics, enabled = true; } - this.progress = progress; emulationInterval = conf.getFloat(HEAP_EMULATION_PROGRESS_INTERVAL, DEFAULT_EMULATION_PROGRESS_INTERVAL); diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixMemoryEmulation.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixMemoryEmulation.java index 422ec123d25..486165d9efa 100644 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixMemoryEmulation.java +++ b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixMemoryEmulation.java @@ -171,6 +171,11 @@ public void testTotalHeapUsageEmulatorPlugin() throws Exception { assertEquals("Disabled heap usage emulation plugin works!", heapUsagePre, heapUsagePost); + // test with get progress + float progress = heapPlugin.getProgress(); + assertEquals("Invalid progress of disabled cumulative heap usage emulation " + + "plugin!", 1.0f, progress, 0f); + // test with wrong/invalid configuration Boolean failed = null; invalidUsage = diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestResourceUsageEmulators.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestResourceUsageEmulators.java index f55e8ac9db6..9874be3229e 100644 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestResourceUsageEmulators.java +++ b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestResourceUsageEmulators.java @@ -32,7 +32,6 @@ import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig; import org.apache.hadoop.mapreduce.task.MapContextImpl; import org.apache.hadoop.mapreduce.util.ResourceCalculatorPlugin; -import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin.ProcResourceValues; import org.apache.hadoop.tools.rumen.ResourceUsageMetrics; import org.apache.hadoop.mapred.DummyResourceCalculatorPlugin; import org.apache.hadoop.mapred.gridmix.LoadJob.ResourceUsageMatcherRunner; @@ -484,6 +483,11 @@ public void testCumulativeCpuUsageEmulatorPlugin() throws Exception { assertEquals("Disabled cumulative CPU usage emulation plugin works!", cpuUsagePre, cpuUsagePost); + // test with get progress + float progress = cpuPlugin.getProgress(); + assertEquals("Invalid progress of disabled cumulative CPU usage emulation " + + "plugin!", 1.0f, progress, 0f); + // test with valid resource usage value ResourceUsageMetrics metrics = createMetrics(targetCpuUsage); From 346d50782ddca1c9eccf5a6af749a0b79e71807c Mon Sep 17 00:00:00 2001 From: Thomas White Date: Thu, 12 Apr 2012 16:22:25 +0000 Subject: [PATCH 10/57] MAPREDUCE-4140. mapreduce classes incorrectly importing "clover.org.apache.*" classes. Contributed by Patrick Hunt git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1325352 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 +++ .../java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java | 7 ++++--- .../api/protocolrecords/GetDelegationTokenRequest.java | 4 +--- .../resourcemanager/webapp/TestRMWebServicesNodes.java | 2 -- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index b8921ae2d5d..4dd2517fb08 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -322,6 +322,9 @@ Release 0.23.3 - UNRELEASED CONTIANER_LAUNCHED and CONTIANER_LAUNCH_FAILED events in additional states. (Robert Joseph Evans via sseth) + MAPREDUCE-4140. mapreduce classes incorrectly importing + "clover.org.apache.*" classes. (Patrick Hunt via tomwhite) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java index f2acbe48a9c..199f77062dc 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java @@ -21,6 +21,8 @@ import java.util.List; import java.util.Map; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.JobACL; @@ -37,9 +39,9 @@ import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; -import clover.org.apache.log4j.Logger; public class PartialJob implements org.apache.hadoop.mapreduce.v2.app.job.Job { + private static final Log LOG = LogFactory.getLog(PartialJob.class); private JobIndexInfo jobIndexInfo = null; private JobId jobId = null; @@ -78,8 +80,7 @@ public JobState getState() { } catch (Exception e) { // Meant for use by the display UI. Exception would prevent it from being // rendered.e Defaulting to KILLED - Logger.getLogger(this.getClass().getName()).warn( - "Exception while parsing job state. Defaulting to KILLED", e); + LOG.warn("Exception while parsing job state. Defaulting to KILLED", e); js = JobState.KILLED; } return js; diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetDelegationTokenRequest.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetDelegationTokenRequest.java index 2a1b4685682..ba4cbd643a8 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetDelegationTokenRequest.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetDelegationTokenRequest.java @@ -21,11 +21,9 @@ import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; -import clover.org.apache.velocity.runtime.resource.ResourceManager; - /** * The request issued by the client to get a delegation token from - * the {@link ResourceManager}. + * the {@code ResourceManager}. * for more information. */ @Public diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java index 04b4ad09241..828f5f6f252 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java @@ -55,8 +55,6 @@ import org.w3c.dom.NodeList; import org.xml.sax.InputSource; -import clover.org.jfree.util.Log; - import com.google.inject.Guice; import com.google.inject.Injector; import com.google.inject.servlet.GuiceServletContextListener; From f01ede227f0594388afb855234b0c4fbd250be26 Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Thu, 12 Apr 2012 16:37:15 +0000 Subject: [PATCH 11/57] HADOOP-8144. pseudoSortByDistance in NetworkTopology doesn't work properly if no local node and first node is local rack node. Contributed by Junping Du git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1325367 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 4 ++++ .../src/main/java/org/apache/hadoop/net/NetworkTopology.java | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 383bc284a70..8c7c19e81a0 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -349,6 +349,10 @@ Release 2.0.0 - UNRELEASED HADOOP-8270. hadoop-daemon.sh stop action should return 0 for an already stopped service. (Roman Shaposhnik via eli) + HADOOP-8144. pseudoSortByDistance in NetworkTopology doesn't work + properly if no local node and first node is local rack node. + (Junping Du) + BREAKDOWN OF HADOOP-7454 SUBTASKS HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java index 67fddd7e583..da8fab2956c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java @@ -662,8 +662,8 @@ static private void swap(Node[] nodes, int i, int j) { */ public void pseudoSortByDistance( Node reader, Node[] nodes ) { int tempIndex = 0; + int localRackNode = -1; if (reader != null ) { - int localRackNode = -1; //scan the array to find the local node & local rack node for(int i=0; i Date: Thu, 12 Apr 2012 18:08:00 +0000 Subject: [PATCH 12/57] HDFS-3255. HA DFS returns wrong token service. Contributed by Daryn Sharp. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1325414 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../apache/hadoop/hdfs/DistributedFileSystem.java | 5 +++-- .../namenode/ha/TestDelegationTokensWithHA.java | 15 +++++++++++---- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 442a446aa74..be47dcf980c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -501,6 +501,8 @@ Release 2.0.0 - UNRELEASED HDFS-3260. TestDatanodeRegistration should set minimum DN version in addition to minimum NN version. (atm) + HDFS-3255. HA DFS returns wrong token service (Daryn Sharp via todd) + BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 5c63d6a27fd..d335aa8b71b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -848,8 +848,9 @@ public void setBalancerBandwidth(long bandwidth) throws IOException { */ @Override public String getCanonicalServiceName() { - if (HAUtil.isLogicalUri(getConf(), getUri())) { - return getUri().getHost(); + URI uri = getUri(); + if (HAUtil.isLogicalUri(getConf(), uri)) { + return HAUtil.buildTokenServiceForLogicalUri(uri).toString(); } else { return super.getCanonicalServiceName(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java index 60481af5db3..f7755814c4d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; @@ -196,8 +197,7 @@ public void testHAUtilClonesDelegationTokens() throws Exception { // check that the token selected for one of the physical IPC addresses // matches the one we received InetSocketAddress addr = nn0.getNameNodeAddress(); - Text ipcDtService = new Text( - addr.getAddress().getHostAddress() + ":" + addr.getPort()); + Text ipcDtService = SecurityUtil.buildTokenService(addr); Token token2 = DelegationTokenSelector.selectHdfsDelegationToken(ipcDtService, ugi); assertNotNull(token2); @@ -212,8 +212,15 @@ public void testHAUtilClonesDelegationTokens() throws Exception { */ @Test public void testDFSGetCanonicalServiceName() throws Exception { - assertEquals(fs.getCanonicalServiceName(), - HATestUtil.getLogicalUri(cluster).getHost()); + URI hAUri = HATestUtil.getLogicalUri(cluster); + String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri).toString(); + assertEquals(haService, dfs.getCanonicalServiceName()); + Token token = dfs.getDelegationToken( + UserGroupInformation.getCurrentUser().getShortUserName()); + assertEquals(haService, token.getService().toString()); + // make sure the logical uri is handled correctly + token.renew(dfs.getConf()); + token.cancel(dfs.getConf()); } enum TokenTestAction { From ecb30a48c4533163ff5f3fdfcf3c4e71b5b2a45f Mon Sep 17 00:00:00 2001 From: Siddharth Seth Date: Thu, 12 Apr 2012 18:39:49 +0000 Subject: [PATCH 13/57] MAPREDUCE-4050. For tasks without assigned containers, changes the node text on the UI to N/A instead of a link to null. (Contributed by Bhallamudi Venkata Siva Kamesh) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1325435 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 4 ++++ .../hadoop/mapreduce/v2/app/webapp/TaskPage.java | 10 +++++++--- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 4dd2517fb08..e2238f8af89 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -325,6 +325,10 @@ Release 0.23.3 - UNRELEASED MAPREDUCE-4140. mapreduce classes incorrectly importing "clover.org.apache.*" classes. (Patrick Hunt via tomwhite) + MAPREDUCE-4050. For tasks without assigned containers, changes the node + text on the UI to N/A instead of a link to null. (Bhallamudi Venkata Siva + Kamesh via sseth) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java index 68a5e807c7e..1e7917e8120 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java @@ -87,9 +87,13 @@ protected void render(Block html) { tr(). td(".id", taid). td(".progress", progress). - td(".state", ta.getState()). - td(). - a(".nodelink", url("http://", nodeHttpAddr), nodeHttpAddr); + td(".state", ta.getState()).td(); + if (nodeHttpAddr == null) { + nodeTd._("N/A"); + } else { + nodeTd. + a(".nodelink", url("http://", nodeHttpAddr), nodeHttpAddr); + } if (containerId != null) { String containerIdStr = ta.getAssignedContainerIdStr(); nodeTd._(" "). From 047a7b276c497a4ebb896c93a24e2f0edf258a7b Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Thu, 12 Apr 2012 20:43:18 +0000 Subject: [PATCH 14/57] HADOOP-7510. Tokens should use original hostname provided instead of ip (Daryn Sharp via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1325500 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../main/java/org/apache/hadoop/ipc/Client.java | 15 ++++++--------- .../java/org/apache/hadoop/net/NetUtils.java | 13 +++++++++---- .../test/java/org/apache/hadoop/ipc/TestRPC.java | 16 ++++++++++++++++ 4 files changed, 34 insertions(+), 13 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 8c7c19e81a0..57bc9e0fb9d 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -432,6 +432,9 @@ Release 0.23.3 - UNRELEASED HADOOP-8014. ViewFileSystem does not correctly implement getDefaultBlockSize, getDefaultReplication, getContentSummary (John George via bobby) + HADOOP-7510. Tokens should use original hostname provided instead of ip + (Daryn Sharp via bobby) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java index e5a2d7f15a0..cb999f3c41c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java @@ -53,7 +53,6 @@ import org.apache.hadoop.ipc.RpcPayloadHeader.*; import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.io.DataOutputBuffer; @@ -243,8 +242,8 @@ public Connection(ConnectionId remoteId) throws IOException { this.remoteId = remoteId; this.server = remoteId.getAddress(); if (server.isUnresolved()) { - throw NetUtils.wrapException(remoteId.getAddress().getHostName(), - remoteId.getAddress().getPort(), + throw NetUtils.wrapException(server.getHostName(), + server.getPort(), null, 0, new UnknownHostException()); @@ -274,9 +273,8 @@ public Connection(ConnectionId remoteId) throws IOException { } catch (IllegalAccessException e) { throw new IOException(e.toString()); } - InetSocketAddress addr = remoteId.getAddress(); - token = tokenSelector.selectToken(new Text(addr.getAddress() - .getHostAddress() + ":" + addr.getPort()), + token = tokenSelector.selectToken( + SecurityUtil.buildTokenService(server), ticket.getTokens()); } KerberosInfo krbInfo = SecurityUtil.getKerberosInfo(protocol, conf); @@ -305,7 +303,7 @@ public Connection(ConnectionId remoteId) throws IOException { + protocol.getSimpleName()); this.setName("IPC Client (" + socketFactory.hashCode() +") connection to " + - remoteId.getAddress().toString() + + server.toString() + " from " + ((ticket==null)?"an unknown user":ticket.getUserName())); this.setDaemon(true); } @@ -751,7 +749,6 @@ private synchronized boolean waitForWork() { } } - @SuppressWarnings("unused") public InetSocketAddress getRemoteAddress() { return server; } @@ -1159,7 +1156,7 @@ public Writable call(RpcKind rpcKind, Writable rpcRequest, call.error.fillInStackTrace(); throw call.error; } else { // local exception - InetSocketAddress address = remoteId.getAddress(); + InetSocketAddress address = connection.getRemoteAddress(); throw NetUtils.wrapException(address.getHostName(), address.getPort(), NetUtils.getHostname(), diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java index b6a9eac1084..9bf4a88e42d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java @@ -344,8 +344,8 @@ public static List getAllStaticResolutions() { /** * Returns InetSocketAddress that a client can use to * connect to the server. Server.getListenerAddress() is not correct when - * the server binds to "0.0.0.0". This returns "127.0.0.1:port" when - * the getListenerAddress() returns "0.0.0.0:port". + * the server binds to "0.0.0.0". This returns "hostname:port" of the server, + * or "127.0.0.1:port" when the getListenerAddress() returns "0.0.0.0:port". * * @param server * @return socket address that a client can use to connect to the server. @@ -353,7 +353,12 @@ public static List getAllStaticResolutions() { public static InetSocketAddress getConnectAddress(Server server) { InetSocketAddress addr = server.getListenerAddress(); if (addr.getAddress().isAnyLocalAddress()) { - addr = createSocketAddrForHost("127.0.0.1", addr.getPort()); + try { + addr = new InetSocketAddress(InetAddress.getLocalHost(), addr.getPort()); + } catch (UnknownHostException uhe) { + // shouldn't get here unless the host doesn't have a loopback iface + addr = createSocketAddrForHost("127.0.0.1", addr.getPort()); + } } return addr; } @@ -655,7 +660,7 @@ public static InetAddress getLocalInetAddress(String host) } InetAddress addr = null; try { - addr = InetAddress.getByName(host); + addr = SecurityUtil.getByName(host); if (NetworkInterface.getByInetAddress(addr) == null) { addr = null; // Not a local address } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java index 5b2c6d58848..f22cd614100 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java @@ -21,6 +21,7 @@ import java.io.Closeable; import java.io.IOException; import java.net.ConnectException; +import java.net.InetAddress; import java.net.InetSocketAddress; import java.lang.management.ManagementFactory; import java.lang.management.ThreadInfo; @@ -41,6 +42,8 @@ import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.io.retry.RetryProxy; import org.apache.hadoop.ipc.Client.ConnectionId; +import org.apache.hadoop.ipc.TestSaslRPC.TestSaslImpl; +import org.apache.hadoop.ipc.TestSaslRPC.TestSaslProtocol; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.authorize.AuthorizationException; @@ -542,6 +545,19 @@ private void doRPCs(Configuration conf, boolean expectFailure) throws Exception } } + @Test + public void testServerAddress() throws IOException { + Server server = RPC.getServer(TestProtocol.class, + new TestImpl(), ADDRESS, 0, 5, true, conf, null); + InetSocketAddress bindAddr = null; + try { + bindAddr = NetUtils.getConnectAddress(server); + } finally { + server.stop(); + } + assertEquals(bindAddr.getAddress(), InetAddress.getLocalHost()); + } + @Test public void testAuthorization() throws Exception { Configuration conf = new Configuration(); From 07a436744588d131d8ef31abab3093aa59b4d531 Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Thu, 12 Apr 2012 21:11:32 +0000 Subject: [PATCH 15/57] HDFS-3259. NameNode#initializeSharedEdits should populate shared edits dir with edit log segments. Contributed by Aaron T. Myers. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1325518 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hdfs/server/namenode/FSEditLog.java | 13 ++-- .../server/namenode/FileJournalManager.java | 2 +- .../hadoop/hdfs/server/namenode/NameNode.java | 69 ++++++++++++++++-- .../ha/TestInitializeSharedEdits.java | 71 ++++++++++++++----- 5 files changed, 128 insertions(+), 30 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index be47dcf980c..387368b880b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -373,6 +373,9 @@ Release 2.0.0 - UNRELEASED HDFS-2983. Relax the build version check to permit rolling upgrades within a release. (atm) + HDFS-3259. NameNode#initializeSharedEdits should populate shared edits dir + with edit log segments. (atm) + OPTIMIZATIONS HDFS-3024. Improve performance of stringification in addStoredBlock (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index 7f6435e778b..d96af1ee226 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -311,10 +311,12 @@ synchronized void close() { endCurrentLogSegment(true); } - try { - journalSet.close(); - } catch (IOException ioe) { - LOG.warn("Error closing journalSet", ioe); + if (!journalSet.isEmpty()) { + try { + journalSet.close(); + } catch (IOException ioe) { + LOG.warn("Error closing journalSet", ioe); + } } state = State.CLOSED; @@ -813,9 +815,8 @@ void logReassignLease(String leaseHolder, String src, String newHolder) { } /** - * Used only by unit tests. + * Get all the journals this edit log is currently operating on. */ - @VisibleForTesting synchronized List getJournals() { return journalSet.getAllJournalStreams(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java index c2281700478..3767111c058 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java @@ -344,7 +344,7 @@ synchronized public void recoverUnfinalizedSegments() throws IOException { } } - private List getLogFiles(long fromTxId) throws IOException { + List getLogFiles(long fromTxId) throws IOException { File currentDir = sd.getCurrentDir(); List allLogFiles = matchEditLogs(currentDir); List logFiles = Lists.newArrayList(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index dec7452e27f..fd1160516eb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -18,14 +18,17 @@ package org.apache.hadoop.hdfs.server.namenode; import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; import java.net.InetSocketAddress; import java.net.URI; import java.util.Arrays; import java.util.Collection; import java.util.Iterator; import java.util.List; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.HadoopIllegalArgumentException; @@ -41,7 +44,6 @@ import org.apache.hadoop.fs.Trash; import static org.apache.hadoop.hdfs.DFSConfigKeys.*; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -49,6 +51,9 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; +import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; +import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream; import org.apache.hadoop.hdfs.server.namenode.ha.ActiveState; import org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby; import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; @@ -61,6 +66,8 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.hdfs.util.AtomicFileOutputStream; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.net.NetUtils; @@ -749,9 +756,10 @@ public static boolean initializeSharedEdits(Configuration conf, boolean force) { return initializeSharedEdits(conf, force, false); } - + /** - * Format a new shared edits dir. + * Format a new shared edits dir and copy in enough edit log segments so that + * the standby NN can start up. * * @param conf configuration * @param force format regardless of whether or not the shared edits dir exists @@ -785,8 +793,19 @@ private static boolean initializeSharedEdits(Configuration conf, existingStorage.getBlockPoolID(), existingStorage.getCTime(), existingStorage.getDistributedUpgradeVersion())); - } catch (Exception e) { - LOG.error("Could not format shared edits dir", e); + + // Need to make sure the edit log segments are in good shape to initialize + // the shared edits dir. + fsns.getFSImage().getEditLog().close(); + fsns.getFSImage().getEditLog().initJournalsForWrite(); + fsns.getFSImage().getEditLog().recoverUnclosedStreams(); + + if (copyEditLogSegmentsToSharedDir(fsns, sharedEditsDirs, + newSharedStorage, conf)) { + return true; // aborted + } + } catch (IOException ioe) { + LOG.error("Could not initialize shared edits dir", ioe); return true; // aborted } finally { // Have to unlock storage explicitly for the case when we're running in a @@ -802,6 +821,44 @@ private static boolean initializeSharedEdits(Configuration conf, } return false; // did not abort } + + private static boolean copyEditLogSegmentsToSharedDir(FSNamesystem fsns, + Collection sharedEditsDirs, NNStorage newSharedStorage, + Configuration conf) throws FileNotFoundException, IOException { + // Copy edit log segments into the new shared edits dir. + for (JournalAndStream jas : fsns.getFSImage().getEditLog().getJournals()) { + FileJournalManager fjm = null; + if (!(jas.getManager() instanceof FileJournalManager)) { + LOG.error("Cannot populate shared edits dir from non-file " + + "journal manager: " + jas.getManager()); + return true; // aborted + } else { + fjm = (FileJournalManager) jas.getManager(); + } + for (EditLogFile elf : fjm.getLogFiles(fsns.getFSImage() + .getMostRecentCheckpointTxId())) { + File editLogSegment = elf.getFile(); + for (URI sharedEditsUri : sharedEditsDirs) { + StorageDirectory sharedEditsDir = newSharedStorage + .getStorageDirectory(sharedEditsUri); + File targetFile = new File(sharedEditsDir.getCurrentDir(), + editLogSegment.getName()); + if (!targetFile.exists()) { + InputStream in = null; + OutputStream out = null; + try { + in = new FileInputStream(editLogSegment); + out = new AtomicFileOutputStream(targetFile); + IOUtils.copyBytes(in, out, conf); + } finally { + IOUtils.cleanup(LOG, in, out); + } + } + } + } + } + return false; // did not abort + } private static boolean finalize(Configuration conf, boolean isConfirmationNeeded diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java index 3415b5eff9b..b976a9c395c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java @@ -19,17 +19,22 @@ import java.io.File; import java.io.IOException; +import java.net.URISyntaxException; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.ha.ServiceFailedException; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Before; @@ -48,7 +53,10 @@ public class TestInitializeSharedEdits { @Before public void setupCluster() throws IOException { conf = new Configuration(); - + conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1); + conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); + HAUtil.setAllowStandbyReads(conf, true); + MiniDFSNNTopology topology = MiniDFSNNTopology.simpleHATopology(); cluster = new MiniDFSCluster.Builder(conf) @@ -56,11 +64,8 @@ public void setupCluster() throws IOException { .numDataNodes(0) .build(); cluster.waitActive(); - - cluster.shutdownNameNode(0); - cluster.shutdownNameNode(1); - File sharedEditsDir = new File(cluster.getSharedEditsDir(0, 1)); - assertTrue(FileUtil.fullyDelete(sharedEditsDir)); + + shutdownClusterAndRemoveSharedEditsDir(); } @After @@ -70,8 +75,14 @@ public void shutdownCluster() throws IOException { } } - @Test - public void testInitializeSharedEdits() throws Exception { + private void shutdownClusterAndRemoveSharedEditsDir() throws IOException { + cluster.shutdownNameNode(0); + cluster.shutdownNameNode(1); + File sharedEditsDir = new File(cluster.getSharedEditsDir(0, 1)); + assertTrue(FileUtil.fullyDelete(sharedEditsDir)); + } + + private void assertCannotStartNameNodes() { // Make sure we can't currently start either NN. try { cluster.restartNameNode(0, false); @@ -89,24 +100,27 @@ public void testInitializeSharedEdits() throws Exception { GenericTestUtils.assertExceptionContains( "Cannot start an HA namenode with name dirs that need recovery", ioe); } - - // Initialize the shared edits dir. - assertFalse(NameNode.initializeSharedEdits(conf)); - + } + + private void assertCanStartHaNameNodes(String pathSuffix) + throws ServiceFailedException, IOException, URISyntaxException, + InterruptedException { // Now should be able to start both NNs. Pass "false" here so that we don't // try to waitActive on all NNs, since the second NN doesn't exist yet. cluster.restartNameNode(0, false); cluster.restartNameNode(1, true); // Make sure HA is working. - cluster.transitionToActive(0); + cluster.getNameNode(0).getRpcServer().transitionToActive(); FileSystem fs = null; try { + Path newPath = new Path(TEST_PATH, pathSuffix); fs = HATestUtil.configureFailoverFs(cluster, conf); - assertTrue(fs.mkdirs(TEST_PATH)); - cluster.transitionToStandby(0); - cluster.transitionToActive(1); - assertTrue(fs.isDirectory(TEST_PATH)); + assertTrue(fs.mkdirs(newPath)); + HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0), + cluster.getNameNode(1)); + assertTrue(NameNodeAdapter.getFileInfo(cluster.getNameNode(1), + newPath.toString(), false).isDir()); } finally { if (fs != null) { fs.close(); @@ -114,6 +128,29 @@ public void testInitializeSharedEdits() throws Exception { } } + @Test + public void testInitializeSharedEdits() throws Exception { + assertCannotStartNameNodes(); + + // Initialize the shared edits dir. + assertFalse(NameNode.initializeSharedEdits(cluster.getConfiguration(0))); + + assertCanStartHaNameNodes("1"); + + // Now that we've done a metadata operation, make sure that deleting and + // re-initializing the shared edits dir will let the standby still start. + + shutdownClusterAndRemoveSharedEditsDir(); + + assertCannotStartNameNodes(); + + // Re-initialize the shared edits dir. + assertFalse(NameNode.initializeSharedEdits(cluster.getConfiguration(0))); + + // Should *still* be able to start both NNs + assertCanStartHaNameNodes("2"); + } + @Test public void testDontOverWriteExistingDir() { assertFalse(NameNode.initializeSharedEdits(conf, false)); From 4f230adc13c70b09083a928b9dc65fa404e6d177 Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Thu, 12 Apr 2012 21:28:44 +0000 Subject: [PATCH 16/57] HDFS-3256. HDFS considers blocks under-replicated if topology script is configured with only 1 rack. Contributed by Aaron T. Myers. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1325531 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../server/blockmanagement/BlockManager.java | 7 +-- .../blockmanagement/DatanodeManager.java | 44 +++++++++++++++++++ .../blockmanagement/TestBlockManager.java | 27 ++++++++++++ .../TestBlocksWithNotEnoughRacks.java | 4 +- 5 files changed, 80 insertions(+), 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 387368b880b..0dceb8c7137 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -506,6 +506,9 @@ Release 2.0.0 - UNRELEASED HDFS-3255. HA DFS returns wrong token service (Daryn Sharp via todd) + HDFS-3256. HDFS considers blocks under-replicated if topology script is + configured with only 1 rack. (atm) + BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index a3f432d46a3..52042b45e8f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -247,8 +247,7 @@ public BlockManager(final Namesystem namesystem, final FSClusterStats stats, this.maxReplicationStreams = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT); - this.shouldCheckForEnoughRacks = conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) == null ? false - : true; + this.shouldCheckForEnoughRacks = conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) != null; this.replicationRecheckInterval = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, @@ -2829,7 +2828,9 @@ boolean blockHasEnoughRacks(Block b) { DatanodeDescriptor cur = it.next(); if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) { if ((corruptNodes == null ) || !corruptNodes.contains(cur)) { - if (numExpectedReplicas == 1) { + if (numExpectedReplicas == 1 || + (numExpectedReplicas > 1 && + !datanodeManager.hasClusterEverBeenMultiRack())) { enoughRacks = true; break; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index fe2b34d109a..0c16c7f5d1b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -71,6 +71,7 @@ import org.apache.hadoop.util.HostsFileReader; import org.apache.hadoop.util.ReflectionUtils; +import com.google.common.annotations.VisibleForTesting; import com.google.common.net.InetAddresses; /** @@ -126,6 +127,12 @@ public class DatanodeManager { /** Ask Datanode only up to this many blocks to delete. */ final int blockInvalidateLimit; + /** + * Whether or not this cluster has ever consisted of more than 1 rack, + * according to the NetworkTopology. + */ + private boolean hasClusterEverBeenMultiRack = false; + DatanodeManager(final BlockManager blockManager, final Namesystem namesystem, final Configuration conf ) throws IOException { @@ -331,6 +338,7 @@ private void addDatanode(final DatanodeDescriptor node) { host2DatanodeMap.add(node); networktopology.add(node); + checkIfClusterIsNowMultiRack(node); if (LOG.isDebugEnabled()) { LOG.debug(getClass().getSimpleName() + ".addDatanode: " @@ -768,6 +776,42 @@ public void fetchDatanodes(final List live, } } + /** + * @return true if this cluster has ever consisted of multiple racks, even if + * it is not now a multi-rack cluster. + */ + boolean hasClusterEverBeenMultiRack() { + return hasClusterEverBeenMultiRack; + } + + /** + * Check if the cluster now consists of multiple racks. If it does, and this + * is the first time it's consisted of multiple racks, then process blocks + * that may now be misreplicated. + * + * @param node DN which caused cluster to become multi-rack. Used for logging. + */ + @VisibleForTesting + void checkIfClusterIsNowMultiRack(DatanodeDescriptor node) { + if (!hasClusterEverBeenMultiRack && networktopology.getNumOfRacks() > 1) { + String message = "DN " + node + " joining cluster has expanded a formerly " + + "single-rack cluster to be multi-rack. "; + if (namesystem.isPopulatingReplQueues()) { + message += "Re-checking all blocks for replication, since they should " + + "now be replicated cross-rack"; + LOG.info(message); + } else { + message += "Not checking for mis-replicated blocks because this NN is " + + "not yet processing repl queues."; + LOG.debug(message); + } + hasClusterEverBeenMultiRack = true; + if (namesystem.isPopulatingReplQueues()) { + blockManager.processMisReplicatedBlocks(); + } + } + } + /** * Parse a DatanodeID from a hosts file entry * @param hostLine of form [hostname|ip][:port]? diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 0be519dd46c..a6e8c4f05b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -92,6 +92,7 @@ private void addNodes(Iterable nodesToAdd) { dn.updateHeartbeat( 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0); + bm.getDatanodeManager().checkIfClusterIsNowMultiRack(dn); } } @@ -310,6 +311,32 @@ private void doTestSufficientlyReplBlocksUsesNewRack(int testIndex) { rackB.contains(pipeline[1])); } + @Test + public void testBlocksAreNotUnderreplicatedInSingleRack() throws Exception { + List nodes = ImmutableList.of( + new DatanodeDescriptor(new DatanodeID("h1", 5020), "/rackA"), + new DatanodeDescriptor(new DatanodeID("h2", 5020), "/rackA"), + new DatanodeDescriptor(new DatanodeID("h3", 5020), "/rackA"), + new DatanodeDescriptor(new DatanodeID("h4", 5020), "/rackA"), + new DatanodeDescriptor(new DatanodeID("h5", 5020), "/rackA"), + new DatanodeDescriptor(new DatanodeID("h6", 5020), "/rackA") + ); + addNodes(nodes); + List origNodes = nodes.subList(0, 3);; + for (int i = 0; i < NUM_TEST_ITERS; i++) { + doTestSingleRackClusterIsSufficientlyReplicated(i, origNodes); + } + } + + private void doTestSingleRackClusterIsSufficientlyReplicated(int testIndex, + List origNodes) + throws Exception { + assertEquals(0, bm.numOfUnderReplicatedBlocks()); + addBlockOnNodes((long)testIndex, origNodes); + bm.processMisReplicatedBlocks(); + assertEquals(0, bm.numOfUnderReplicatedBlocks()); + } + /** * Tell the block manager that replication is completed for the given diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java index df007287644..3161124e9cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java @@ -97,7 +97,7 @@ public void testSufficientlyReplBlocksUsesNewRack() throws Exception { final FileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L); ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath); - DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 1); + DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 0); // Add a new datanode on a different rack String newRacks[] = {"/rack2"}; @@ -165,7 +165,7 @@ public void testUnderReplicatedUsesNewRacks() throws Exception { final FileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L); ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath); - DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 1); + DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 0); // Add new datanodes on a different rack and increase the // replication factor so the block is underreplicated and make From 10a0fcb62d37feae6a3cd625cbfc08e5adac8061 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Thu, 12 Apr 2012 23:18:37 +0000 Subject: [PATCH 17/57] Add .classpath, .project and .settings to svn:ignore. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1325569 13f79535-47bb-0310-9956-ffa450edef68 From 02db5b7ef794bb34f8b12a1a16a65d07782c3583 Mon Sep 17 00:00:00 2001 From: Thomas White Date: Fri, 13 Apr 2012 00:10:30 +0000 Subject: [PATCH 18/57] MAPREDUCE-4147. YARN should not have a compile-time dependency on HDFS. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1325573 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 +++ hadoop-mapreduce-project/hadoop-yarn/pom.xml | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index e2238f8af89..51e01b55949 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -238,6 +238,9 @@ Release 2.0.0 - UNRELEASED MAPREDUCE-4107. Fix tests in org.apache.hadoop.ipc.TestSocketFactory (Devaraj K via tgraves) + MAPREDUCE-4147. YARN should not have a compile-time dependency on HDFS. + (tomwhite) + Release 0.23.3 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-yarn/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/pom.xml index fc0f8f895ae..438d034c237 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/pom.xml +++ b/hadoop-mapreduce-project/hadoop-yarn/pom.xml @@ -109,7 +109,7 @@ org.apache.hadoop hadoop-hdfs - provided + test com.google.inject From 27ea3ab6ba125bfb2061a772649788747375f557 Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Fri, 13 Apr 2012 13:45:03 +0000 Subject: [PATCH 19/57] MAPREDUCE-4128. AM Recovery expects all attempts of a completed task to also be completed. (Bikas Saha via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1325765 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 + .../mapreduce/v2/app/job/impl/TaskImpl.java | 1 + .../TestJobHistoryEventHandler.java | 8 +- .../mapreduce/v2/app/TestFetchFailure.java | 120 ++++++++++++++++++ .../src/main/avro/Events.avpr | 3 +- .../jobhistory/JobHistoryParser.java | 17 +++ .../jobhistory/TaskFinishedEvent.java | 22 +++- .../rumen/Task20LineHistoryEventEmitter.java | 2 +- 8 files changed, 169 insertions(+), 7 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 51e01b55949..901aa9b0f5a 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -332,6 +332,9 @@ Release 0.23.3 - UNRELEASED text on the UI to N/A instead of a link to null. (Bhallamudi Venkata Siva Kamesh via sseth) + MAPREDUCE-4128. AM Recovery expects all attempts of a completed task to + also be completed. (Bikas Saha via bobby) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java index 174e9d1f443..58edd1690c4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java @@ -656,6 +656,7 @@ private void handleTaskAttemptCompletion(TaskAttemptId attemptId, private static TaskFinishedEvent createTaskFinishedEvent(TaskImpl task, TaskState taskState) { TaskFinishedEvent tfe = new TaskFinishedEvent(TypeConverter.fromYarn(task.taskId), + TypeConverter.fromYarn(task.successfulAttempt), task.getFinishTime(task.successfulAttempt), TypeConverter.fromYarn(task.taskId.getTaskType()), taskState.toString(), diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java index fce41d6086c..c1c227064eb 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java @@ -93,7 +93,7 @@ public void testFirstFlushOnCompletionEvent() throws Exception { // First completion event, but min-queue-size for batching flushes is 10 handleEvent(jheh, new JobHistoryEvent(t.jobId, new TaskFinishedEvent( - t.taskID, 0, TaskType.MAP, "", null))); + t.taskID, null, 0, TaskType.MAP, "", null))); verify(mockWriter).flush(); } finally { @@ -129,7 +129,7 @@ public void testMaxUnflushedCompletionEvents() throws Exception { for (int i = 0 ; i < 100 ; i++) { queueEvent(jheh, new JobHistoryEvent(t.jobId, new TaskFinishedEvent( - t.taskID, 0, TaskType.MAP, "", null))); + t.taskID, null, 0, TaskType.MAP, "", null))); } handleNextNEvents(jheh, 9); @@ -174,7 +174,7 @@ public void testUnflushedTimer() throws Exception { for (int i = 0 ; i < 100 ; i++) { queueEvent(jheh, new JobHistoryEvent(t.jobId, new TaskFinishedEvent( - t.taskID, 0, TaskType.MAP, "", null))); + t.taskID, null, 0, TaskType.MAP, "", null))); } handleNextNEvents(jheh, 9); @@ -215,7 +215,7 @@ public void testBatchedFlushJobEndMultiplier() throws Exception { for (int i = 0 ; i < 100 ; i++) { queueEvent(jheh, new JobHistoryEvent(t.jobId, new TaskFinishedEvent( - t.taskID, 0, TaskType.MAP, "", null))); + t.taskID, null, 0, TaskType.MAP, "", null))); } queueEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent( TypeConverter.fromYarn(t.jobId), 0, 10, 10, 0, 0, null, null, new Counters()))); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFetchFailure.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFetchFailure.java index 827e727e564..bc895a4ff10 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFetchFailure.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFetchFailure.java @@ -25,6 +25,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.MRJobConfig; +import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent; +import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler; import org.apache.hadoop.mapreduce.v2.api.records.JobState; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEventStatus; @@ -37,6 +39,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskAttemptFetchFailureEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; +import org.apache.hadoop.yarn.event.EventHandler; import org.junit.Test; public class TestFetchFailure { @@ -142,6 +145,107 @@ public void testFetchFailure() throws Exception { Assert.assertEquals("Event status not correct for reduce attempt1", TaskAttemptCompletionEventStatus.SUCCEEDED, events[3].getStatus()); } + + /** + * This tests that if a map attempt was failed (say due to fetch failures), + * then it gets re-run. When the next map attempt is running, if the AM dies, + * then, on AM re-run, the AM does not incorrectly remember the first failed + * attempt. Currently recovery does not recover running tasks. Effectively, + * the AM re-runs the maps from scratch. + */ + @Test + public void testFetchFailureWithRecovery() throws Exception { + int runCount = 0; + MRApp app = new MRAppWithHistory(1, 1, false, this.getClass().getName(), true, ++runCount); + Configuration conf = new Configuration(); + // map -> reduce -> fetch-failure -> map retry is incompatible with + // sequential, single-task-attempt approach in uber-AM, so disable: + conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false); + Job job = app.submit(conf); + app.waitForState(job, JobState.RUNNING); + //all maps would be running + Assert.assertEquals("Num tasks not correct", + 2, job.getTasks().size()); + Iterator it = job.getTasks().values().iterator(); + Task mapTask = it.next(); + Task reduceTask = it.next(); + + //wait for Task state move to RUNNING + app.waitForState(mapTask, TaskState.RUNNING); + TaskAttempt mapAttempt1 = mapTask.getAttempts().values().iterator().next(); + app.waitForState(mapAttempt1, TaskAttemptState.RUNNING); + + //send the done signal to the map attempt + app.getContext().getEventHandler().handle( + new TaskAttemptEvent(mapAttempt1.getID(), + TaskAttemptEventType.TA_DONE)); + + // wait for map success + app.waitForState(mapTask, TaskState.SUCCEEDED); + + TaskAttemptCompletionEvent[] events = + job.getTaskAttemptCompletionEvents(0, 100); + Assert.assertEquals("Num completion events not correct", + 1, events.length); + Assert.assertEquals("Event status not correct", + TaskAttemptCompletionEventStatus.SUCCEEDED, events[0].getStatus()); + + // wait for reduce to start running + app.waitForState(reduceTask, TaskState.RUNNING); + TaskAttempt reduceAttempt = + reduceTask.getAttempts().values().iterator().next(); + app.waitForState(reduceAttempt, TaskAttemptState.RUNNING); + + //send 3 fetch failures from reduce to trigger map re execution + sendFetchFailure(app, reduceAttempt, mapAttempt1); + sendFetchFailure(app, reduceAttempt, mapAttempt1); + sendFetchFailure(app, reduceAttempt, mapAttempt1); + + //wait for map Task state move back to RUNNING + app.waitForState(mapTask, TaskState.RUNNING); + + // Crash the app again. + app.stop(); + + //rerun + app = + new MRAppWithHistory(1, 1, false, this.getClass().getName(), false, + ++runCount); + conf = new Configuration(); + conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true); + conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false); + job = app.submit(conf); + app.waitForState(job, JobState.RUNNING); + //all maps would be running + Assert.assertEquals("Num tasks not correct", + 2, job.getTasks().size()); + it = job.getTasks().values().iterator(); + mapTask = it.next(); + reduceTask = it.next(); + + // the map is not in a SUCCEEDED state after restart of AM + app.waitForState(mapTask, TaskState.RUNNING); + mapAttempt1 = mapTask.getAttempts().values().iterator().next(); + app.waitForState(mapAttempt1, TaskAttemptState.RUNNING); + + //send the done signal to the map attempt + app.getContext().getEventHandler().handle( + new TaskAttemptEvent(mapAttempt1.getID(), + TaskAttemptEventType.TA_DONE)); + + // wait for map success + app.waitForState(mapTask, TaskState.SUCCEEDED); + + reduceAttempt = reduceTask.getAttempts().values().iterator().next(); + //send done to reduce + app.getContext().getEventHandler().handle( + new TaskAttemptEvent(reduceAttempt.getID(), + TaskAttemptEventType.TA_DONE)); + + app.waitForState(job, JobState.SUCCEEDED); + events = job.getTaskAttemptCompletionEvents(0, 100); + Assert.assertEquals("Num completion events not correct", 2, events.length); + } private void sendFetchFailure(MRApp app, TaskAttempt reduceAttempt, TaskAttempt mapAttempt) { @@ -150,4 +254,20 @@ private void sendFetchFailure(MRApp app, TaskAttempt reduceAttempt, reduceAttempt.getID(), Arrays.asList(new TaskAttemptId[] {mapAttempt.getID()}))); } + + static class MRAppWithHistory extends MRApp { + public MRAppWithHistory(int maps, int reduces, boolean autoComplete, + String testName, boolean cleanOnStart, int startCount) { + super(maps, reduces, autoComplete, testName, cleanOnStart, startCount); + } + + @Override + protected EventHandler createJobHistoryHandler( + AppContext context) { + JobHistoryEventHandler eventHandler = new JobHistoryEventHandler(context, + getStartCount()); + return eventHandler; + } + } + } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/avro/Events.avpr b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/avro/Events.avpr index 22864a6fc2f..050433a4887 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/avro/Events.avpr +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/avro/Events.avpr @@ -230,7 +230,8 @@ {"name": "taskType", "type": "string"}, {"name": "finishTime", "type": "long"}, {"name": "status", "type": "string"}, - {"name": "counters", "type": "JhCounters"} + {"name": "counters", "type": "JhCounters"}, + {"name": "successfulAttemptId", "type": "string"} ] }, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java index aa1089f1db1..34eb59449cd 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java @@ -276,6 +276,17 @@ private void handleTaskAttemptFailedEvent( attemptInfo.shuffleFinishTime = event.getFinishTime(); attemptInfo.sortFinishTime = event.getFinishTime(); attemptInfo.mapFinishTime = event.getFinishTime(); + if(TaskStatus.State.SUCCEEDED.toString().equals(taskInfo.status)) + { + //this is a successful task + if(attemptInfo.getAttemptId().equals(taskInfo.getSuccessfulAttemptId())) + { + // the failed attempt is the one that made this task successful + // so its no longer successful + taskInfo.status = null; + // not resetting the other fields set in handleTaskFinishedEvent() + } + } } private void handleTaskAttemptStartedEvent(TaskAttemptStartedEvent event) { @@ -299,6 +310,7 @@ private void handleTaskFinishedEvent(TaskFinishedEvent event) { taskInfo.counters = event.getCounters(); taskInfo.finishTime = event.getFinishTime(); taskInfo.status = TaskStatus.State.SUCCEEDED.toString(); + taskInfo.successfulAttemptId = event.getSuccessfulTaskAttemptId(); } private void handleTaskUpdatedEvent(TaskUpdatedEvent event) { @@ -514,6 +526,7 @@ public static class TaskInfo { String status; String error; TaskAttemptID failedDueToAttemptId; + TaskAttemptID successfulAttemptId; Map attemptsMap; public TaskInfo() { @@ -554,6 +567,10 @@ public void printAll() { public TaskAttemptID getFailedDueToAttemptId() { return failedDueToAttemptId; } + /** @return the attempt Id that caused this task to succeed */ + public TaskAttemptID getSuccessfulAttemptId() { + return successfulAttemptId; + } /** @return the error */ public String getError() { return error; } /** @return the map of all attempts for this task */ diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskFinishedEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskFinishedEvent.java index 35399709bfa..55de80ca63f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskFinishedEvent.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskFinishedEvent.java @@ -22,6 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.mapreduce.Counters; +import org.apache.hadoop.mapreduce.TaskAttemptID; import org.apache.hadoop.mapreduce.TaskID; import org.apache.hadoop.mapreduce.TaskType; @@ -36,6 +37,7 @@ public class TaskFinishedEvent implements HistoryEvent { private TaskFinished datum = null; private TaskID taskid; + private TaskAttemptID successfulAttemptId; private long finishTime; private TaskType taskType; private String status; @@ -44,15 +46,17 @@ public class TaskFinishedEvent implements HistoryEvent { /** * Create an event to record the successful completion of a task * @param id Task ID + * @param attemptId Task Attempt ID of the successful attempt for this task * @param finishTime Finish time of the task * @param taskType Type of the task * @param status Status string * @param counters Counters for the task */ - public TaskFinishedEvent(TaskID id, long finishTime, + public TaskFinishedEvent(TaskID id, TaskAttemptID attemptId, long finishTime, TaskType taskType, String status, Counters counters) { this.taskid = id; + this.successfulAttemptId = attemptId; this.finishTime = finishTime; this.taskType = taskType; this.status = status; @@ -65,6 +69,10 @@ public Object getDatum() { if (datum == null) { datum = new TaskFinished(); datum.taskid = new Utf8(taskid.toString()); + if(successfulAttemptId != null) + { + datum.successfulAttemptId = new Utf8(successfulAttemptId.toString()); + } datum.finishTime = finishTime; datum.counters = EventWriter.toAvro(counters); datum.taskType = new Utf8(taskType.name()); @@ -76,6 +84,10 @@ public Object getDatum() { public void setDatum(Object oDatum) { this.datum = (TaskFinished)oDatum; this.taskid = TaskID.forName(datum.taskid.toString()); + if (datum.successfulAttemptId != null) { + this.successfulAttemptId = TaskAttemptID + .forName(datum.successfulAttemptId.toString()); + } this.finishTime = datum.finishTime; this.taskType = TaskType.valueOf(datum.taskType.toString()); this.status = datum.status.toString(); @@ -84,6 +96,14 @@ public void setDatum(Object oDatum) { /** Get task id */ public TaskID getTaskId() { return TaskID.forName(taskid.toString()); } + /** Get successful task attempt id */ + public TaskAttemptID getSuccessfulTaskAttemptId() { + if(successfulAttemptId != null) + { + return TaskAttemptID.forName(successfulAttemptId.toString()); + } + return null; + } /** Get the task finish time */ public long getFinishTime() { return finishTime; } /** Get task counters */ diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Task20LineHistoryEventEmitter.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Task20LineHistoryEventEmitter.java index 6ed9130c27c..dd002a4342a 100644 --- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Task20LineHistoryEventEmitter.java +++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Task20LineHistoryEventEmitter.java @@ -128,7 +128,7 @@ HistoryEvent maybeEmitEvent(ParsedLine line, String taskIDName, return null; } - return new TaskFinishedEvent(taskID, Long.parseLong(finishTime), + return new TaskFinishedEvent(taskID, null, Long.parseLong(finishTime), that.originalTaskType, status, eventCounters); } From 5a20d446cf2a947b37fd5856a7e1fe6c21547557 Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Fri, 13 Apr 2012 21:31:27 +0000 Subject: [PATCH 20/57] HDFS-2799. Trim fs.checkpoint.dir values. Contributed by Amith D K git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1325963 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../hadoop/hdfs/server/namenode/FSImage.java | 7 ++-- .../server/namenode/TestNameEditsConfigs.java | 42 +++++++++++++++++++ 3 files changed, 48 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 0dceb8c7137..e3ec5a464ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -509,6 +509,8 @@ Release 2.0.0 - UNRELEASED HDFS-3256. HDFS considers blocks under-replicated if topology script is configured with only 1 rack. (atm) + HDFS-2799. Trim fs.checkpoint.dir values. (Amith D K via eli) + BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index a9bf5c70667..70d184d9142 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -1076,7 +1076,8 @@ synchronized public void close() throws IOException { */ static Collection getCheckpointDirs(Configuration conf, String defaultValue) { - Collection dirNames = conf.getStringCollection(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY); + Collection dirNames = conf.getTrimmedStringCollection( + DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY); if (dirNames.size() == 0 && defaultValue != null) { dirNames.add(defaultValue); } @@ -1085,8 +1086,8 @@ static Collection getCheckpointDirs(Configuration conf, static List getCheckpointEditsDirs(Configuration conf, String defaultName) { - Collection dirNames = - conf.getStringCollection(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY); + Collection dirNames = conf.getTrimmedStringCollection( + DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY); if (dirNames.size() == 0 && defaultName != null) { dirNames.add(defaultName); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java index 5a188152c64..70f5b577851 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java @@ -490,4 +490,46 @@ public void testNameEditsConfigsFailure() throws IOException { cluster.shutdown(); } } + + /** + * Test dfs.namenode.checkpoint.dir and dfs.namenode.checkpoint.edits.dir + * should tolerate white space between values. + */ + @Test + public void testCheckPointDirsAreTrimmed() throws Exception { + MiniDFSCluster cluster = null; + SecondaryNameNode secondary = null; + File checkpointNameDir1 = new File(base_dir, "chkptName1"); + File checkpointEditsDir1 = new File(base_dir, "chkptEdits1"); + File checkpointNameDir2 = new File(base_dir, "chkptName2"); + File checkpointEditsDir2 = new File(base_dir, "chkptEdits2"); + File nameDir = new File(base_dir, "name1"); + String whiteSpace = " \n \n "; + Configuration conf = new HdfsConfiguration(); + conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getPath()); + conf.setStrings(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, whiteSpace + + checkpointNameDir1.getPath() + whiteSpace, whiteSpace + + checkpointNameDir2.getPath() + whiteSpace); + conf.setStrings(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, + whiteSpace + checkpointEditsDir1.getPath() + whiteSpace, whiteSpace + + checkpointEditsDir2.getPath() + whiteSpace); + cluster = new MiniDFSCluster.Builder(conf).manageNameDfsDirs(false) + .numDataNodes(3).build(); + try { + cluster.waitActive(); + secondary = startSecondaryNameNode(conf); + secondary.doCheckpoint(); + assertTrue(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + " must be trimmed ", + checkpointNameDir1.exists()); + assertTrue(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + " must be trimmed ", + checkpointNameDir2.exists()); + assertTrue(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY + + " must be trimmed ", checkpointEditsDir1.exists()); + assertTrue(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY + + " must be trimmed ", checkpointEditsDir2.exists()); + } finally { + secondary.shutdown(); + cluster.shutdown(); + } + } } From 9a10b4e773ac937b59b458343457bbbd686d7f1e Mon Sep 17 00:00:00 2001 From: Siddharth Seth Date: Fri, 13 Apr 2012 22:24:16 +0000 Subject: [PATCH 21/57] MAPREDUCE-4144. Fix a NPE in the ResourceManager when handling node updates. (Contributed by Jason Lowe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1325991 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 + .../scheduler/capacity/LeafQueue.java | 9 +- .../scheduler/capacity/TestLeafQueue.java | 97 +++++++++++++++++++ 3 files changed, 104 insertions(+), 5 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 901aa9b0f5a..d0eaa60a734 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -335,6 +335,9 @@ Release 0.23.3 - UNRELEASED MAPREDUCE-4128. AM Recovery expects all attempts of a completed task to also be completed. (Bikas Saha via bobby) + MAPREDUCE-4144. Fix a NPE in the ResourceManager when handling node + updates. (Jason Lowe via sseth) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index c171fa1e2bd..2256799f9b5 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -1118,13 +1118,12 @@ private Resource assignOffSwitchContainers(Resource clusterResource, SchedulerNo boolean canAssign(SchedulerApp application, Priority priority, SchedulerNode node, NodeType type, RMContainer reservedContainer) { - // Reserved... - if (reservedContainer != null) { - return true; - } - // Clearly we need containers for this application... if (type == NodeType.OFF_SWITCH) { + if (reservedContainer != null) { + return true; + } + // 'Delay' off-switch ResourceRequest offSwitchRequest = application.getResourceRequest(priority, RMNode.ANY); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index 29aaaa4761f..8be9b20193e 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -926,6 +926,103 @@ public void testReservation() throws Exception { assertEquals(4*GB, a.getMetrics().getAllocatedMB()); } + @Test + public void testStolenReservedContainer() throws Exception { + // Manipulate queue 'a' + LeafQueue a = stubLeafQueue((LeafQueue)queues.get(A)); + //unset maxCapacity + a.setMaxCapacity(1.0f); + + // Users + final String user_0 = "user_0"; + final String user_1 = "user_1"; + + // Submit applications + final ApplicationAttemptId appAttemptId_0 = + TestUtils.getMockApplicationAttemptId(0, 0); + SchedulerApp app_0 = + new SchedulerApp(appAttemptId_0, user_0, a, + mock(ActiveUsersManager.class), rmContext, null); + a.submitApplication(app_0, user_0, A); + + final ApplicationAttemptId appAttemptId_1 = + TestUtils.getMockApplicationAttemptId(1, 0); + SchedulerApp app_1 = + new SchedulerApp(appAttemptId_1, user_1, a, + mock(ActiveUsersManager.class), rmContext, null); + a.submitApplication(app_1, user_1, A); + + // Setup some nodes + String host_0 = "host_0"; + SchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 4*GB); + String host_1 = "host_1"; + SchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 4*GB); + + final int numNodes = 3; + Resource clusterResource = Resources.createResource(numNodes * (4*GB)); + when(csContext.getNumClusterNodes()).thenReturn(numNodes); + + // Setup resource-requests + Priority priority = TestUtils.createMockPriority(1); + app_0.updateResourceRequests(Collections.singletonList( + TestUtils.createResourceRequest(RMNodeImpl.ANY, 2*GB, 1, priority, + recordFactory))); + + // Setup app_1 to request a 4GB container on host_0 and + // another 4GB container anywhere. + ArrayList appRequests_1 = + new ArrayList(4); + appRequests_1.add(TestUtils.createResourceRequest(host_0, 4*GB, 1, + priority, recordFactory)); + appRequests_1.add(TestUtils.createResourceRequest(DEFAULT_RACK, 4*GB, 1, + priority, recordFactory)); + appRequests_1.add(TestUtils.createResourceRequest(RMNodeImpl.ANY, 4*GB, 2, + priority, recordFactory)); + app_1.updateResourceRequests(appRequests_1); + + // Start testing... + + a.assignContainers(clusterResource, node_0); + assertEquals(2*GB, a.getUsedResources().getMemory()); + assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(0*GB, a.getMetrics().getReservedMB()); + assertEquals(2*GB, a.getMetrics().getAllocatedMB()); + assertEquals(0*GB, a.getMetrics().getAvailableMB()); + + // Now, reservation should kick in for app_1 + a.assignContainers(clusterResource, node_0); + assertEquals(6*GB, a.getUsedResources().getMemory()); + assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(4*GB, app_1.getCurrentReservation().getMemory()); + assertEquals(2*GB, node_0.getUsedResource().getMemory()); + assertEquals(4*GB, a.getMetrics().getReservedMB()); + assertEquals(2*GB, a.getMetrics().getAllocatedMB()); + + // node_1 heartbeats in and gets the DEFAULT_RACK request for app_1 + a.assignContainers(clusterResource, node_1); + assertEquals(10*GB, a.getUsedResources().getMemory()); + assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(4*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(4*GB, app_1.getCurrentReservation().getMemory()); + assertEquals(4*GB, node_1.getUsedResource().getMemory()); + assertEquals(4*GB, a.getMetrics().getReservedMB()); + assertEquals(6*GB, a.getMetrics().getAllocatedMB()); + + // Now free 1 container from app_0 and try to assign to node_0 + a.completedContainer(clusterResource, app_0, node_0, + app_0.getLiveContainers().iterator().next(), null, RMContainerEventType.KILL); + a.assignContainers(clusterResource, node_0); + assertEquals(8*GB, a.getUsedResources().getMemory()); + assertEquals(0*GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(8*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(0*GB, app_1.getCurrentReservation().getMemory()); + assertEquals(4*GB, node_0.getUsedResource().getMemory()); + assertEquals(0*GB, a.getMetrics().getReservedMB()); + assertEquals(8*GB, a.getMetrics().getAllocatedMB()); + } + @Test public void testReservationExchange() throws Exception { From d6533cc3219da932844abb0d6ab9b95da4876cf1 Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Fri, 13 Apr 2012 23:32:17 +0000 Subject: [PATCH 22/57] Move CHANGES.txt message for HDFS-2765 to be under 2.0.0 instead of trunk. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1326013 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index e3ec5a464ba..d0de5f39d2f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -91,8 +91,6 @@ Trunk (unreleased changes) HDFS-2373. Commands using WebHDFS and hftp print unnecessary debug info on the console with security enabled. (Arpit Gupta via suresh) - HDFS-2765. TestNameEditsConfigs is incorrectly swallowing IOE. (atm) - HDFS-2776. Missing interface annotation on JournalSet. (Brandon Li via jitendra) @@ -511,6 +509,8 @@ Release 2.0.0 - UNRELEASED HDFS-2799. Trim fs.checkpoint.dir values. (Amith D K via eli) + HDFS-2765. TestNameEditsConfigs is incorrectly swallowing IOE. (atm) + BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) From f694ecdd9376d8df9a3b6a942c2cce2beede9675 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Fri, 13 Apr 2012 23:38:59 +0000 Subject: [PATCH 23/57] Add .classpath, .project and .settings to svn:ignore. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1326015 13f79535-47bb-0310-9956-ffa450edef68 From 841fdc5628fbba341efe0bfc6763fe12e7fca7f4 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Fri, 13 Apr 2012 23:41:01 +0000 Subject: [PATCH 24/57] HDFS-3273. Refactor BackupImage and FSEditLog, and rename JournalListener.rollLogs(..) to startLogSegment(..). git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1326016 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../journalservice/JournalListener.java | 2 +- .../server/journalservice/JournalService.java | 2 +- .../hdfs/server/namenode/BackupImage.java | 40 +------- .../hdfs/server/namenode/BackupNode.java | 2 +- .../hdfs/server/namenode/FSEditLog.java | 92 ++++++++++++++++--- .../hadoop/hdfs/server/namenode/FSImage.java | 2 +- .../journalservice/TestJournalService.java | 4 +- 8 files changed, 93 insertions(+), 54 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index d0de5f39d2f..3f361a0e7ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -62,6 +62,9 @@ Trunk (unreleased changes) HDFS-3178. Add states and state handler for journal synchronization in JournalService. (szetszwo) + HDFS-3273. Refactor BackupImage and FSEditLog, and rename + JournalListener.rollLogs(..) to startLogSegment(..). (szetszwo) + OPTIMIZATIONS HDFS-2834. Add a ByteBuffer-based read API to DFSInputStream. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/journalservice/JournalListener.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/journalservice/JournalListener.java index 5d93a4cbaea..2d5ec9e9860 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/journalservice/JournalListener.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/journalservice/JournalListener.java @@ -60,5 +60,5 @@ public void journal(JournalService service, long firstTxnId, int numTxns, * Any IOException thrown from the listener is thrown back in * {@link JournalProtocol#startLogSegment} */ - public void rollLogs(JournalService service, long txid) throws IOException; + public void startLogSegment(JournalService service, long txid) throws IOException; } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/journalservice/JournalService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/journalservice/JournalService.java index 4e25eea3135..e8d7073670b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/journalservice/JournalService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/journalservice/JournalService.java @@ -256,7 +256,7 @@ public void startLogSegment(JournalInfo journalInfo, long epoch, long txid) } stateHandler.isStartLogSegmentAllowed(); verify(epoch, journalInfo); - listener.rollLogs(this, txid); + listener.startLogSegment(this, txid); stateHandler.startLogSegment(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java index 85f0245928c..a9aa20d4d2b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java @@ -21,6 +21,7 @@ import java.util.Collection; import java.util.Iterator; import java.util.List; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; @@ -183,21 +184,9 @@ synchronized void journal(long firstTxId, int numTxns, byte[] data) throws IOExc } // write to BN's local edit log. - logEditsLocally(firstTxId, numTxns, data); + editLog.journal(firstTxId, numTxns, data); } - /** - * Write the batch of edits to the local copy of the edit logs. - */ - private void logEditsLocally(long firstTxId, int numTxns, byte[] data) { - long expectedTxId = editLog.getLastWrittenTxId() + 1; - Preconditions.checkState(firstTxId == expectedTxId, - "received txid batch starting at %s but expected txn %s", - firstTxId, expectedTxId); - editLog.setNextTxId(firstTxId + numTxns - 1); - editLog.logEdit(data.length, data); - editLog.logSync(); - } /** * Apply the batch of edits to the local namespace. @@ -342,28 +331,9 @@ private synchronized void setState(BNState newState) { * This causes the BN to also start the new edit log in its local * directories. */ - synchronized void namenodeStartedLogSegment(long txid) - throws IOException { - LOG.info("NameNode started a new log segment at txid " + txid); - if (editLog.isSegmentOpen()) { - if (editLog.getLastWrittenTxId() == txid - 1) { - // We are in sync with the NN, so end and finalize the current segment - editLog.endCurrentLogSegment(false); - } else { - // We appear to have missed some transactions -- the NN probably - // lost contact with us temporarily. So, mark the current segment - // as aborted. - LOG.warn("NN started new log segment at txid " + txid + - ", but BN had only written up to txid " + - editLog.getLastWrittenTxId() + - "in the log segment starting at " + - editLog.getCurSegmentTxId() + ". Aborting this " + - "log segment."); - editLog.abortCurrentLogSegment(); - } - } - editLog.setNextTxId(txid); - editLog.startLogSegment(txid, false); + synchronized void namenodeStartedLogSegment(long txid) throws IOException { + editLog.startLogSegment(txid, true); + if (bnState == BNState.DROP_UNTIL_NEXT_ROLL) { setState(BNState.JOURNAL_ONLY); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java index cb826f6e089..fea05e91354 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java @@ -260,7 +260,7 @@ private void verifyJournalRequest(JournalInfo journalInfo) } ///////////////////////////////////////////////////// - // BackupNodeProtocol implementation for backup node. + // JournalProtocol implementation for backup node. ///////////////////////////////////////////////////// @Override public void startLogSegment(JournalInfo journalInfo, long epoch, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index d96af1ee226..c90998a6924 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -18,18 +18,20 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.apache.hadoop.hdfs.server.common.Util.now; -import java.net.URI; + import java.io.IOException; +import java.lang.reflect.Constructor; +import java.net.URI; import java.util.ArrayList; import java.util.Collection; import java.util.List; -import java.lang.reflect.Constructor; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -37,14 +39,34 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; -import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.*; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CancelDelegationTokenOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CloseOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ConcatDeleteOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.GetDelegationTokenOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.LogSegmentOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.MkdirOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenewDelegationTokenOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetPermissionsOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetQuotaOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetReplicationOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SymlinkOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp; import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.token.delegation.DelegationKey; -import org.apache.hadoop.conf.Configuration; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; @@ -269,7 +291,7 @@ synchronized void openForWrite() throws IOException { IOUtils.closeStream(s); } - startLogSegment(segmentTxId, true); + startLogSegmentAndWriteHeaderTxn(segmentTxId); assert state == State.IN_SEGMENT : "Bad state: " + state; } @@ -864,18 +886,48 @@ synchronized long rollEditLog() throws IOException { endCurrentLogSegment(true); long nextTxId = getLastWrittenTxId() + 1; - startLogSegment(nextTxId, true); + startLogSegmentAndWriteHeaderTxn(nextTxId); assert curSegmentTxId == nextTxId; return nextTxId; } + + /** + * Remote namenode just has started a log segment, start log segment locally. + */ + public synchronized void startLogSegment(long txid, + boolean abortCurrentLogSegment) throws IOException { + LOG.info("Namenode started a new log segment at txid " + txid); + if (isSegmentOpen()) { + if (getLastWrittenTxId() == txid - 1) { + //In sync with the NN, so end and finalize the current segment` + endCurrentLogSegment(false); + } else { + //Missed some transactions: probably lost contact with NN temporarily. + final String mess = "Cannot start a new log segment at txid " + txid + + " since only up to txid " + getLastWrittenTxId() + + " have been written in the log segment starting at " + + getCurSegmentTxId() + "."; + if (abortCurrentLogSegment) { + //Mark the current segment as aborted. + LOG.warn(mess); + abortCurrentLogSegment(); + } else { + throw new IOException(mess); + } + } + } + setNextTxId(txid); + startLogSegment(txid); + } /** * Start writing to the log segment with the given txid. * Transitions from BETWEEN_LOG_SEGMENTS state to IN_LOG_SEGMENT state. */ - synchronized void startLogSegment(final long segmentTxId, - boolean writeHeaderTxn) throws IOException { + private void startLogSegment(final long segmentTxId) throws IOException { + assert Thread.holdsLock(this); + LOG.info("Starting log segment at " + segmentTxId); Preconditions.checkArgument(segmentTxId > 0, "Bad txid: %s", segmentTxId); @@ -903,12 +955,15 @@ synchronized void startLogSegment(final long segmentTxId, curSegmentTxId = segmentTxId; state = State.IN_SEGMENT; + } - if (writeHeaderTxn) { - logEdit(LogSegmentOp.getInstance(cache.get(), - FSEditLogOpCodes.OP_START_LOG_SEGMENT)); - logSync(); - } + synchronized void startLogSegmentAndWriteHeaderTxn(final long segmentTxId + ) throws IOException { + startLogSegment(segmentTxId); + + logEdit(LogSegmentOp.getInstance(cache.get(), + FSEditLogOpCodes.OP_START_LOG_SEGMENT)); + logSync(); } /** @@ -1057,6 +1112,17 @@ private synchronized BackupJournalManager findBackupJournal( return null; } + /** Write the batch of edits to edit log. */ + public synchronized void journal(long firstTxId, int numTxns, byte[] data) { + final long expectedTxId = getLastWrittenTxId() + 1; + Preconditions.checkState(firstTxId == expectedTxId, + "received txid batch starting at %s but expected txid %s", + firstTxId, expectedTxId); + setNextTxId(firstTxId + numTxns - 1); + logEdit(data.length, data); + logSync(); + } + /** * Write an operation to the edit log. Do not sync to persistent * store yet. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index 70d184d9142..0279337b5cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -823,7 +823,7 @@ public synchronized void saveNamespace(FSNamesystem source) throws IOException { storage.writeAll(); } finally { if (editLogWasOpen) { - editLog.startLogSegment(imageTxId + 1, true); + editLog.startLogSegmentAndWriteHeaderTxn(imageTxId + 1); // Take this opportunity to note the current transaction. // Even if the namespace save was cancelled, this marker // is only used to determine what transaction ID is required diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/journalservice/TestJournalService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/journalservice/TestJournalService.java index ab3ce9fee37..b2cb080066b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/journalservice/TestJournalService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/journalservice/TestJournalService.java @@ -43,7 +43,7 @@ public class TestJournalService { private Configuration conf = new HdfsConfiguration(); /** - * Test calls backs {@link JournalListener#rollLogs(JournalService, long)} and + * Test calls backs {@link JournalListener#startLogSegment(JournalService, long)} and * {@link JournalListener#journal(JournalService, long, int, byte[])} are * called. */ @@ -85,7 +85,7 @@ private JournalService startJournalService(JournalListener listener) */ private void verifyRollLogsCallback(JournalService s, JournalListener l) throws IOException { - Mockito.verify(l, Mockito.times(1)).rollLogs(Mockito.eq(s), Mockito.anyLong()); + Mockito.verify(l, Mockito.times(1)).startLogSegment(Mockito.eq(s), Mockito.anyLong()); } /** From 15fe3ae61b9931bdd24fbc6e4d3181132fcfffce Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Sat, 14 Apr 2012 01:51:03 +0000 Subject: [PATCH 25/57] HDFS-2708. Stats for the # of blocks per DN. Contributed by Aaron T. Myers. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1326039 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 1 + .../apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java | 2 ++ 3 files changed, 5 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 3f361a0e7ab..e0e82baae7e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -377,6 +377,8 @@ Release 2.0.0 - UNRELEASED HDFS-3259. NameNode#initializeSharedEdits should populate shared edits dir with edit log segments. (atm) + HDFS-2708. Stats for the # of blocks per DN. (atm) + OPTIMIZATIONS HDFS-3024. Improve performance of stringification in addStoredBlock (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 1363c6cda05..beab87447c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -5051,6 +5051,7 @@ public String getLiveNodes() { innerinfo.put("adminState", node.getAdminState().toString()); innerinfo.put("nonDfsUsedSpace", node.getNonDfsUsed()); innerinfo.put("capacity", node.getCapacity()); + innerinfo.put("numBlocks", node.numBlocks()); info.put(node.getHostName(), innerinfo); } return JSON.toString(info); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java index 97a81d3a774..fcbc4890178 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java @@ -101,6 +101,8 @@ public void testNameNodeMXBeanInfo() throws Exception { assertTrue(((Long)liveNode.get("nonDfsUsedSpace")) > 0); assertTrue(liveNode.containsKey("capacity")); assertTrue(((Long)liveNode.get("capacity")) > 0); + assertTrue(liveNode.containsKey("numBlocks")); + assertTrue(((Long)liveNode.get("numBlocks")) == 0); } Assert.assertEquals(fsn.getLiveNodes(), alivenodeinfo); // get attribute deadnodeinfo From fccbc53357d7387724f4468c9260b1942811b686 Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Mon, 16 Apr 2012 16:08:44 +0000 Subject: [PATCH 26/57] HADOOP-8283. Allow tests to control token service value (Daryn Sharp via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1326668 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 ++ .../security/SecurityUtilTestHelper.java | 30 +++++++++++++++++++ 2 files changed, 33 insertions(+) create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/SecurityUtilTestHelper.java diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 57bc9e0fb9d..88ea20d4f34 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -435,6 +435,9 @@ Release 0.23.3 - UNRELEASED HADOOP-7510. Tokens should use original hostname provided instead of ip (Daryn Sharp via bobby) + HADOOP-8283. Allow tests to control token service value (Daryn Sharp via + bobby) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/SecurityUtilTestHelper.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/SecurityUtilTestHelper.java new file mode 100644 index 00000000000..7c5f5e1e146 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/SecurityUtilTestHelper.java @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.security; + +/** helper utils for tests */ +public class SecurityUtilTestHelper { + + /** + * Allow tests to change the resolver used for tokens + * @param flag boolean for whether token services use ips or hosts + */ + public static void setTokenServiceUseIp(boolean flag) { + SecurityUtil.setTokenServiceUseIp(flag); + } +} From 24d6a8b29ceac12337ab94d7d4c042094e107560 Mon Sep 17 00:00:00 2001 From: Thomas Graves Date: Mon, 16 Apr 2012 18:02:51 +0000 Subject: [PATCH 27/57] MAPREDUCE-4008. ResourceManager throws MetricsException on start up saying QueueMetrics MBean already exists (Devaraj K via tgraves) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1326707 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 +++ .../server/resourcemanager/scheduler/fifo/FifoScheduler.java | 5 +++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index d0eaa60a734..65dae1e50e9 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -241,6 +241,9 @@ Release 2.0.0 - UNRELEASED MAPREDUCE-4147. YARN should not have a compile-time dependency on HDFS. (tomwhite) + MAPREDUCE-4008. ResourceManager throws MetricsException on start up + saying QueueMetrics MBean already exists (Devaraj K via tgraves) + Release 0.23.3 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java index eab2b6b8880..c64d21db1e8 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java @@ -185,8 +185,6 @@ public List getQueueUserAclInfo( @Override public synchronized void setConf(Configuration conf) { this.conf = conf; - metrics = QueueMetrics.forQueue(DEFAULT_QUEUE_NAME, null, false, conf); - activeUsersManager = new ActiveUsersManager(metrics); } @Override @@ -223,6 +221,9 @@ public synchronized void reinitialize(Configuration conf, Resources.createResource(conf.getInt(MINIMUM_ALLOCATION, MINIMUM_MEMORY)); this.maximumAllocation = Resources.createResource(conf.getInt(MAXIMUM_ALLOCATION, MAXIMUM_MEMORY)); + this.metrics = QueueMetrics.forQueue(DEFAULT_QUEUE_NAME, null, false, + conf); + this.activeUsersManager = new ActiveUsersManager(metrics); this.initialized = true; } } From 551468385cb4522ef99dab6595a8dfc8b5a617ec Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Mon, 16 Apr 2012 18:28:36 +0000 Subject: [PATCH 28/57] HADOOP-8280. Move VersionUtil/TestVersionUtil and GenericTestUtils from HDFS into Common. Contributed by Ahmed Radwan. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1326727 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../src/main/java/org/apache/hadoop}/util/VersionUtil.java | 2 +- .../src/test/java/org/apache/hadoop/test/GenericTestUtils.java | 0 .../src/test/java/org/apache/hadoop}/util/TestVersionUtil.java | 3 ++- .../org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java | 2 +- .../apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java | 2 +- 6 files changed, 8 insertions(+), 4 deletions(-) rename {hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs => hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop}/util/VersionUtil.java (99%) rename {hadoop-hdfs-project/hadoop-hdfs => hadoop-common-project/hadoop-common}/src/test/java/org/apache/hadoop/test/GenericTestUtils.java (100%) rename {hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs => hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop}/util/TestVersionUtil.java (96%) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 88ea20d4f34..927e424572b 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -261,6 +261,9 @@ Release 2.0.0 - UNRELEASED HADOOP-8086. KerberosName silently sets defaultRealm to "" if the Kerberos config is not found, it should log a WARN (tucu) + HADOOP-8280. Move VersionUtil/TestVersionUtil and GenericTestUtils from + HDFS into Common. (Ahmed Radwan via atm) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/VersionUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionUtil.java similarity index 99% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/VersionUtil.java rename to hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionUtil.java index 59aa5e128ed..dd68c4d74b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/VersionUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionUtil.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.util; +package org.apache.hadoop.util; import java.util.regex.Matcher; import java.util.regex.Pattern; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/GenericTestUtils.java rename to hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestVersionUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestVersionUtil.java similarity index 96% rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestVersionUtil.java rename to hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestVersionUtil.java index c2537fd515a..b5b1ebf3de8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestVersionUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestVersionUtil.java @@ -15,11 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.util; +package org.apache.hadoop.util; import static org.junit.Assert.*; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.VersionUtil; import org.junit.Test; public class TestVersionUtil { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index f5d09b1fef2..f018f53e731 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -48,11 +48,11 @@ import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.server.protocol.StorageReport; -import org.apache.hadoop.hdfs.util.VersionUtil; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.VersionInfo; +import org.apache.hadoop.util.VersionUtil; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Maps; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 4c891d3d4d9..af3dd16d768 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -108,7 +108,6 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; -import org.apache.hadoop.hdfs.util.VersionUtil; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.ProtobufRpcEngine; @@ -124,6 +123,7 @@ import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.VersionInfo; +import org.apache.hadoop.util.VersionUtil; import com.google.protobuf.BlockingService; From 258da66cc7c74e48fe4224ac8552bf8ce8c68e2c Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Mon, 16 Apr 2012 19:08:29 +0000 Subject: [PATCH 29/57] HDFS-3268. FileContext API mishandles token service and incompatible with HA. Contributed by Daryn Sharp. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1326747 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../src/main/java/org/apache/hadoop/fs/Hdfs.java | 7 +++++-- .../java/org/apache/hadoop/hdfs/DFSClient.java | 10 ++++++++++ .../hadoop/hdfs/DistributedFileSystem.java | 7 +------ .../namenode/ha/TestDelegationTokensWithHA.java | 16 ++++++++++++++++ 5 files changed, 35 insertions(+), 8 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index e0e82baae7e..b99fd0438c2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -516,6 +516,9 @@ Release 2.0.0 - UNRELEASED HDFS-2765. TestNameEditsConfigs is incorrectly swallowing IOE. (atm) + HDFS-3268. FileContext API mishandles token service and incompatible with + HA (Daryn Sharp via todd) + BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java index 82d0c3663cc..a3217aa7e93 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java @@ -20,7 +20,6 @@ import java.io.FileNotFoundException; import java.io.IOException; -import java.net.InetSocketAddress; import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; @@ -391,11 +390,15 @@ public Path getLinkTarget(Path p) throws IOException { return new Path(dfs.getLinkTarget(getUriPath(p))); } + @Override + public String getCanonicalServiceName() { + return dfs.getCanonicalServiceName(); + } + @Override //AbstractFileSystem public List> getDelegationTokens(String renewer) throws IOException { Token result = dfs .getDelegationToken(renewer == null ? null : new Text(renewer)); - result.setService(new Text(this.getCanonicalServiceName())); List> tokenList = new ArrayList>(); tokenList.add(result); return tokenList; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 70285262bfe..5f9f6f7994a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -637,6 +637,16 @@ public FsServerDefaults getServerDefaults() throws IOException { return serverDefaults; } + /** + * Get a canonical token service name for this client's tokens. Null should + * be returned if the client is not using tokens. + * @return the token service for the client + */ + @InterfaceAudience.LimitedPrivate( { "HDFS" }) + public String getCanonicalServiceName() { + return (dtService != null) ? dtService.toString() : null; + } + /** * @see ClientProtocol#getDelegationToken(Text) */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index d335aa8b71b..988a6e7ee3f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -848,12 +848,7 @@ public void setBalancerBandwidth(long bandwidth) throws IOException { */ @Override public String getCanonicalServiceName() { - URI uri = getUri(); - if (HAUtil.isLogicalUri(getConf(), uri)) { - return HAUtil.buildTokenServiceForLogicalUri(uri).toString(); - } else { - return super.getCanonicalServiceName(); - } + return dfs.getCanonicalServiceName(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java index f7755814c4d..5c380915d04 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java @@ -30,6 +30,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.AbstractFileSystem; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DistributedFileSystem; @@ -223,6 +224,21 @@ public void testDFSGetCanonicalServiceName() throws Exception { token.cancel(dfs.getConf()); } + @Test + public void testHdfsGetCanonicalServiceName() throws Exception { + Configuration conf = dfs.getConf(); + URI haUri = HATestUtil.getLogicalUri(cluster); + AbstractFileSystem afs = AbstractFileSystem.createFileSystem(haUri, conf); + String haService = HAUtil.buildTokenServiceForLogicalUri(haUri).toString(); + assertEquals(haService, afs.getCanonicalServiceName()); + Token token = afs.getDelegationTokens( + UserGroupInformation.getCurrentUser().getShortUserName()).get(0); + assertEquals(haService, token.getService().toString()); + // make sure the logical uri is handled correctly + token.renew(conf); + token.cancel(conf); + } + enum TokenTestAction { RENEW, CANCEL; } From f1667dee146fe5e3edbb50e409ae96fa0777473a Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Mon, 16 Apr 2012 19:31:34 +0000 Subject: [PATCH 30/57] HDFS-3279. Move the FSEditLog constructor with @VisibleForTesting to TestEditLog. Contributed by Arpit Gupta git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1326762 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../hdfs/server/namenode/FSEditLog.java | 15 ---------- .../hdfs/server/namenode/TestEditLog.java | 30 ++++++++++++++----- 3 files changed, 25 insertions(+), 23 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index b99fd0438c2..67152967d36 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -379,6 +379,9 @@ Release 2.0.0 - UNRELEASED HDFS-2708. Stats for the # of blocks per DN. (atm) + HDFS-3279. Move the FSEditLog constructor with @VisibleForTesting to + TestEditLog. (Arpit Gupta via szetszwo) + OPTIMIZATIONS HDFS-3024. Improve performance of stringification in addStoredBlock (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index c90998a6924..c144906c5c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -26,7 +26,6 @@ import java.util.Collection; import java.util.List; -import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -178,20 +177,6 @@ protected synchronized TransactionId initialValue() { } }; - /** - * Construct FSEditLog with default configuration, taking editDirs from NNStorage - * - * @param storage Storage object used by namenode - */ - @VisibleForTesting - FSEditLog(NNStorage storage) throws IOException { - Configuration conf = new Configuration(); - // Make sure the edits dirs are set in the provided configuration object. - conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, - StringUtils.join(storage.getEditsDirectories(), ",")); - init(conf, storage, FSNamesystem.getNamespaceEditsDirs(conf)); - } - /** * Constructor for FSEditLog. Underlying journals are constructed, but * no streams are opened until open() is called. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index 05df7fe9835..7f4872198bc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -141,6 +141,20 @@ public void run() { } } } + + /** + * Construct FSEditLog with default configuration, taking editDirs from NNStorage + * + * @param storage Storage object used by namenode + */ + private static FSEditLog getFSEditLog(NNStorage storage) throws IOException { + Configuration conf = new Configuration(); + // Make sure the edits dirs are set in the provided configuration object. + conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, + StringUtils.join(",", storage.getEditsDirectories())); + FSEditLog log = new FSEditLog(conf, storage, FSNamesystem.getNamespaceEditsDirs(conf)); + return log; + } /** * Test case for an empty edit log from a prior version of Hadoop. @@ -863,7 +877,7 @@ public void testEditLogManifestMocks() throws IOException { storage = mockStorageWithEdits( "[1,100]|[101,200]|[201,]", "[1,100]|[101,200]|[201,]"); - log = new FSEditLog(storage); + log = getFSEditLog(storage); log.initJournalsForWrite(); assertEquals("[[1,100], [101,200]]", log.getEditLogManifest(1).toString()); @@ -875,7 +889,7 @@ public void testEditLogManifestMocks() throws IOException { storage = mockStorageWithEdits( "[1,100]|[101,200]", "[1,100]|[201,300]|[301,400]"); // nothing starting at 101 - log = new FSEditLog(storage); + log = getFSEditLog(storage); log.initJournalsForWrite(); assertEquals("[[1,100], [101,200], [201,300], [301,400]]", log.getEditLogManifest(1).toString()); @@ -885,7 +899,7 @@ public void testEditLogManifestMocks() throws IOException { storage = mockStorageWithEdits( "[1,100]|[301,400]", // gap from 101 to 300 "[301,400]|[401,500]"); - log = new FSEditLog(storage); + log = getFSEditLog(storage); log.initJournalsForWrite(); assertEquals("[[301,400], [401,500]]", log.getEditLogManifest(1).toString()); @@ -895,7 +909,7 @@ public void testEditLogManifestMocks() throws IOException { storage = mockStorageWithEdits( "[1,100]|[101,150]", // short log at 101 "[1,50]|[101,200]"); // short log at 1 - log = new FSEditLog(storage); + log = getFSEditLog(storage); log.initJournalsForWrite(); assertEquals("[[1,100], [101,200]]", log.getEditLogManifest(1).toString()); @@ -908,7 +922,7 @@ public void testEditLogManifestMocks() throws IOException { storage = mockStorageWithEdits( "[1,100]|[101,]", "[1,100]|[101,200]"); - log = new FSEditLog(storage); + log = getFSEditLog(storage); log.initJournalsForWrite(); assertEquals("[[1,100], [101,200]]", log.getEditLogManifest(1).toString()); @@ -998,7 +1012,7 @@ public static NNStorage setupEdits(List editUris, int numrolls, Collections.emptyList(), editUris); storage.format(new NamespaceInfo()); - FSEditLog editlog = new FSEditLog(storage); + FSEditLog editlog = getFSEditLog(storage); // open the edit log and add two transactions // logGenerationStamp is used, simply because it doesn't // require complex arguments. @@ -1080,7 +1094,7 @@ public void testAlternatingJournalFailure() throws IOException { new AbortSpec(9, 0), new AbortSpec(10, 1)); long totaltxnread = 0; - FSEditLog editlog = new FSEditLog(storage); + FSEditLog editlog = getFSEditLog(storage); editlog.initJournalsForWrite(); long startTxId = 1; Iterable editStreams = editlog.selectInputStreams(startTxId, @@ -1130,7 +1144,7 @@ public boolean accept(File dir, String name) { assertEquals(1, files.length); assertTrue(files[0].delete()); - FSEditLog editlog = new FSEditLog(storage); + FSEditLog editlog = getFSEditLog(storage); editlog.initJournalsForWrite(); long startTxId = 1; try { From 159646f2a4907f4662e09d1a13f5e5210356f9f3 Mon Sep 17 00:00:00 2001 From: Thomas Graves Date: Mon, 16 Apr 2012 20:46:30 +0000 Subject: [PATCH 31/57] MAPREDUCE-4156. ant build fails compiling JobInProgress (tgraves) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1326795 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 2 ++ .../src/java/org/apache/hadoop/mapred/JobInProgress.java | 2 +- .../mapred/org/apache/hadoop/mapred/TestJobHistoryParsing.java | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 65dae1e50e9..353e3db4600 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -341,6 +341,8 @@ Release 0.23.3 - UNRELEASED MAPREDUCE-4144. Fix a NPE in the ResourceManager when handling node updates. (Jason Lowe via sseth) + MAPREDUCE-4156. ant build fails compiling JobInProgress (tgraves) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/JobInProgress.java b/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/JobInProgress.java index 9f92707077b..59256b40d76 100644 --- a/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/JobInProgress.java +++ b/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/JobInProgress.java @@ -2731,7 +2731,7 @@ public synchronized boolean completedTask(TaskInProgress tip, } TaskFinishedEvent tfe = new TaskFinishedEvent(tip.getTIPId(), - tip.getExecFinishTime(), taskType, + null, tip.getExecFinishTime(), taskType, TaskStatus.State.SUCCEEDED.toString(), new org.apache.hadoop.mapreduce.Counters(status.getCounters())); diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobHistoryParsing.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobHistoryParsing.java index 7ef641ff3a9..35abd48d124 100644 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobHistoryParsing.java +++ b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobHistoryParsing.java @@ -97,7 +97,7 @@ public void testHistoryParsing() throws IOException { // Try to write one more event now, should not fail TaskID tid = TaskID.forName("task_200809171136_0001_m_000002"); TaskFinishedEvent tfe = - new TaskFinishedEvent(tid, 0, TaskType.MAP, "", null); + new TaskFinishedEvent(tid, null, 0, TaskType.MAP, "", null); boolean caughtException = false; try { From 2d370ef2bac5f3b1f29e1d0a4685c7ce556e60c9 Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Mon, 16 Apr 2012 21:14:23 +0000 Subject: [PATCH 32/57] HADOOP-8117. Upgrade test build to Surefire 2.12. Contributed by Todd Lipcon. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1326802 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++ hadoop-project/pom.xml | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 927e424572b..c4096b31baf 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -61,6 +61,8 @@ Trunk (unreleased changes) HADOOP-8147. test-patch should run tests with -fn to avoid masking test failures (Robert Evans via tgraves) + HADOOP-8117. Upgrade test build to Surefire 2.12 (todd) + BUG FIXES HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName. diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 3e1ac2a98e9..1281908e752 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -668,7 +668,7 @@ org.apache.maven.plugins maven-surefire-plugin - 2.10 + 2.12 org.apache.maven.plugins From 574f99bd6b596c39bd1accc7a134de3f5ad96bd2 Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Mon, 16 Apr 2012 21:51:52 +0000 Subject: [PATCH 33/57] HDFS-3284. bootstrapStandby fails in secure cluster. Contributed by Todd Lipcon. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1326813 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../server/namenode/ha/BootstrapStandby.java | 7 ++-- .../apache/hadoop/hdfs/tools/DFSHAAdmin.java | 33 ++++++++++++------- 3 files changed, 28 insertions(+), 14 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 67152967d36..1eb6d2c80d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -522,6 +522,8 @@ Release 2.0.0 - UNRELEASED HDFS-3268. FileContext API mishandles token service and incompatible with HA (Daryn Sharp via todd) + HDFS-3284. bootstrapStandby fails in secure cluster (todd) + BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java index 481dde3cd2d..1777ca6f6a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hdfs.server.namenode.TransferFsImage; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.hdfs.tools.DFSHAAdmin; import org.apache.hadoop.hdfs.tools.NNHAServiceTarget; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.MD5Hash; @@ -144,8 +145,8 @@ private NamenodeProtocol createNNProtocolProxy() private HAServiceProtocol createHAProtocolProxy() throws IOException { - return new NNHAServiceTarget(new HdfsConfiguration(conf), - nsId, otherNNId).getProxy(conf, 15000); + return new NNHAServiceTarget(new HdfsConfiguration(conf), nsId, otherNNId) + .getProxy(conf, 15000); } private int doRun() throws IOException { @@ -334,7 +335,7 @@ private void parseConfAndFindOtherNN() throws IOException { @Override public void setConf(Configuration conf) { - this.conf = conf; + this.conf = DFSHAAdmin.addSecurityConfiguration(conf); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java index 4db5a86b93d..d4397276ea1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java @@ -46,21 +46,32 @@ protected void setErrOut(PrintStream errOut) { @Override public void setConf(Configuration conf) { if (conf != null) { - // Make a copy so we don't mutate it. Also use an HdfsConfiguration to - // force loading of hdfs-site.xml. - conf = new HdfsConfiguration(conf); - String nameNodePrincipal = conf.get( - DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, ""); - if (LOG.isDebugEnabled()) { - LOG.debug("Using NN principal: " + nameNodePrincipal); - } - - conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, - nameNodePrincipal); + conf = addSecurityConfiguration(conf); } super.setConf(conf); } + /** + * Add the requisite security principal settings to the given Configuration, + * returning a copy. + * @param conf the original config + * @return a copy with the security settings added + */ + public static Configuration addSecurityConfiguration(Configuration conf) { + // Make a copy so we don't mutate it. Also use an HdfsConfiguration to + // force loading of hdfs-site.xml. + conf = new HdfsConfiguration(conf); + String nameNodePrincipal = conf.get( + DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, ""); + if (LOG.isDebugEnabled()) { + LOG.debug("Using NN principal: " + nameNodePrincipal); + } + + conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, + nameNodePrincipal); + return conf; + } + /** * Try to map the given namenode ID to its service address. */ From 1d5861a8c45dae1f5c5ab80c037ae5795d5e651d Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Tue, 17 Apr 2012 00:50:40 +0000 Subject: [PATCH 34/57] HDFS-3165. HDFS Balancer scripts are refering to wrong path of hadoop-daemon.sh. Contributed by Amith D K git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1326848 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-balancer.sh | 2 +- hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-balancer.sh | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 1eb6d2c80d4..60ee1cc64d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -524,6 +524,9 @@ Release 2.0.0 - UNRELEASED HDFS-3284. bootstrapStandby fails in secure cluster (todd) + HDFS-3165. HDFS Balancer scripts are refering to wrong path of + hadoop-daemon.sh (Amith D K via eli) + BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-balancer.sh b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-balancer.sh index 24c622764e2..2c14a59f8a0 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-balancer.sh +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-balancer.sh @@ -24,4 +24,4 @@ HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} # Start balancer daemon. -"$HADOOP_PREFIX"/bin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start balancer $@ +"$HADOOP_PREFIX"/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start balancer $@ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-balancer.sh b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-balancer.sh index 5026c8c7cca..df824560cc3 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-balancer.sh +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-balancer.sh @@ -25,4 +25,4 @@ HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} # Stop balancer daemon. # Run this on the machine where the balancer is running -"$HADOOP_PREFIX"/bin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop balancer +"$HADOOP_PREFIX"/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop balancer From 207a2ada0c26e581b7cc769618a20279b33004ac Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Tue, 17 Apr 2012 02:44:06 +0000 Subject: [PATCH 35/57] HADOOP-8282. start-all.sh refers incorrectly start-dfs.sh existence for starting start-yarn.sh. Contributed by Devaraj K git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1326890 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ hadoop-common-project/hadoop-common/src/main/bin/start-all.sh | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index c4096b31baf..79ccf477ced 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -358,6 +358,9 @@ Release 2.0.0 - UNRELEASED properly if no local node and first node is local rack node. (Junping Du) + HADOOP-8282. start-all.sh refers incorrectly start-dfs.sh + existence for starting start-yarn.sh. (Devaraj K via eli) + BREAKDOWN OF HADOOP-7454 SUBTASKS HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh) diff --git a/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh b/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh index 9d579b29afa..f4047db4e22 100755 --- a/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh +++ b/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh @@ -33,6 +33,6 @@ if [ -f "${HADOOP_HDFS_HOME}"/sbin/start-dfs.sh ]; then fi # start yarn daemons if yarn is present -if [ -f "${YARN_HOME}"/sbin/start-dfs.sh ]; then +if [ -f "${YARN_HOME}"/sbin/start-yarn.sh ]; then "${YARN_HOME}"/sbin/start-yarn.sh --config $HADOOP_CONF_DIR fi From a98ba41092b8f40baa75105831f2abfe6ec62faf Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Tue, 17 Apr 2012 14:21:11 +0000 Subject: [PATCH 36/57] HADOOP-8286. Simplify getting a socket address from conf (Daryn Sharp via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1327108 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 ++ .../org/apache/hadoop/conf/Configuration.java | 16 +++++++++ .../apache/hadoop/conf/TestConfiguration.java | 34 +++++++++++++++++++ 3 files changed, 53 insertions(+) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 79ccf477ced..9be76d829d0 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -446,6 +446,9 @@ Release 0.23.3 - UNRELEASED HADOOP-8283. Allow tests to control token service value (Daryn Sharp via bobby) + HADOOP-8286. Simplify getting a socket address from conf (Daryn Sharp via + bobby) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java index bfab319a3c0..bf0960dc212 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java @@ -30,6 +30,7 @@ import java.io.OutputStreamWriter; import java.io.Reader; import java.io.Writer; +import java.net.InetSocketAddress; import java.net.URL; import java.util.ArrayList; import java.util.Collection; @@ -68,6 +69,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableUtils; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; import org.codehaus.jackson.JsonFactory; @@ -1162,6 +1164,20 @@ public void setStrings(String name, String... values) { set(name, StringUtils.arrayToString(values)); } + /** + * Get the socket address for name property as a + * InetSocketAddress. + * @param name property name. + * @param defaultAddress the default value + * @param defaultPort the default port + * @return InetSocketAddress + */ + public InetSocketAddress getSocketAddr( + String name, String defaultAddress, int defaultPort) { + final String address = get(name, defaultAddress); + return NetUtils.createSocketAddr(address, defaultPort, name); + } + /** * Load a class by name. * diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java index bbe82914b21..4f1ec878bbe 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java @@ -23,6 +23,7 @@ import java.io.FileWriter; import java.io.IOException; import java.io.StringWriter; +import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; @@ -35,6 +36,7 @@ import org.apache.commons.lang.StringUtils; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.net.NetUtils; import org.codehaus.jackson.map.ObjectMapper; public class TestConfiguration extends TestCase { @@ -604,6 +606,38 @@ public void testPattern() throws IOException { conf.getPattern("test.pattern3", defaultPattern).pattern()); } + public void testSocketAddress() throws IOException { + Configuration conf = new Configuration(); + final String defaultAddr = "host:1"; + final int defaultPort = 2; + InetSocketAddress addr = null; + + addr = conf.getSocketAddr("myAddress", defaultAddr, defaultPort); + assertEquals(defaultAddr, NetUtils.getHostPortString(addr)); + + conf.set("myAddress", "host2"); + addr = conf.getSocketAddr("myAddress", defaultAddr, defaultPort); + assertEquals("host2:"+defaultPort, NetUtils.getHostPortString(addr)); + + conf.set("myAddress", "host2:3"); + addr = conf.getSocketAddr("myAddress", defaultAddr, defaultPort); + assertEquals("host2:3", NetUtils.getHostPortString(addr)); + + boolean threwException = false; + conf.set("myAddress", "bad:-port"); + try { + addr = conf.getSocketAddr("myAddress", defaultAddr, defaultPort); + } catch (IllegalArgumentException iae) { + threwException = true; + assertEquals("Does not contain a valid host:port authority: " + + "bad:-port (configuration property 'myAddress')", + iae.getMessage()); + + } finally { + assertTrue(threwException); + } + } + public void testReload() throws IOException { out=new BufferedWriter(new FileWriter(CONFIG)); startConfig(); From e8eed2f62d30e0bf2f915ee3ad6b9c9f6d2d97cb Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Tue, 17 Apr 2012 15:04:20 +0000 Subject: [PATCH 37/57] HADOOP-8227. Allow RPC to limit ephemeral port range. (bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1327127 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 2 + .../org/apache/hadoop/conf/Configuration.java | 61 ++++++++- .../apache/hadoop/ipc/ProtobufRpcEngine.java | 13 +- .../main/java/org/apache/hadoop/ipc/RPC.java | 30 +++-- .../java/org/apache/hadoop/ipc/RpcEngine.java | 22 +++- .../java/org/apache/hadoop/ipc/Server.java | 46 ++++++- .../apache/hadoop/ipc/WritableRpcEngine.java | 19 +-- .../apache/hadoop/conf/TestConfiguration.java | 33 +++++ .../java/org/apache/hadoop/ipc/TestRPC.java | 3 +- .../org/apache/hadoop/ipc/TestServer.java | 118 ++++++++++++++++++ 10 files changed, 320 insertions(+), 27 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestServer.java diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 9be76d829d0..35acb90ca3b 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -449,6 +449,8 @@ Release 0.23.3 - UNRELEASED HADOOP-8286. Simplify getting a socket address from conf (Daryn Sharp via bobby) + HADOOP-8227. Allow RPC to limit ephemeral port range. (bobby) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java index bf0960dc212..aa738f5ddec 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java @@ -964,11 +964,57 @@ public void setPattern(String name, Pattern pattern) { * bound may be omitted meaning all values up to or over. So the string * above means 2, 3, 5, and 7, 8, 9, ... */ - public static class IntegerRanges { + public static class IntegerRanges implements Iterable{ private static class Range { int start; int end; } + + private static class RangeNumberIterator implements Iterator { + Iterator internal; + int at; + int end; + + public RangeNumberIterator(List ranges) { + if (ranges != null) { + internal = ranges.iterator(); + } + at = -1; + end = -2; + } + + @Override + public boolean hasNext() { + if (at <= end) { + return true; + } else if (internal != null){ + return internal.hasNext(); + } + return false; + } + + @Override + public Integer next() { + if (at <= end) { + at++; + return at - 1; + } else if (internal != null){ + Range found = internal.next(); + if (found != null) { + at = found.start; + end = found.end; + at++; + return at - 1; + } + } + return null; + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + }; List ranges = new ArrayList(); @@ -1027,6 +1073,13 @@ public boolean isIncluded(int value) { return false; } + /** + * @return true if there are no values in this range, else false. + */ + public boolean isEmpty() { + return ranges == null || ranges.isEmpty(); + } + @Override public String toString() { StringBuilder result = new StringBuilder(); @@ -1043,6 +1096,12 @@ public String toString() { } return result.toString(); } + + @Override + public Iterator iterator() { + return new RangeNumberIterator(ranges); + } + } /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java index 556f7101a4e..befc8f70e03 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java @@ -319,10 +319,12 @@ static Client getClient(Configuration conf) { public RPC.Server getServer(Class protocol, Object protocolImpl, String bindAddress, int port, int numHandlers, int numReaders, int queueSizePerHandler, boolean verbose, Configuration conf, - SecretManager secretManager) + SecretManager secretManager, + String portRangeConfig) throws IOException { return new Server(protocol, protocolImpl, conf, bindAddress, port, - numHandlers, numReaders, queueSizePerHandler, verbose, secretManager); + numHandlers, numReaders, queueSizePerHandler, verbose, secretManager, + portRangeConfig); } public static class Server extends RPC.Server { @@ -336,15 +338,18 @@ public static class Server extends RPC.Server { * @param port the port to listen for connections on * @param numHandlers the number of method handler threads to run * @param verbose whether each call should be logged + * @param portRangeConfig A config parameter that can be used to restrict + * the range of ports used when port is 0 (an ephemeral port) */ public Server(Class protocolClass, Object protocolImpl, Configuration conf, String bindAddress, int port, int numHandlers, int numReaders, int queueSizePerHandler, boolean verbose, - SecretManager secretManager) + SecretManager secretManager, + String portRangeConfig) throws IOException { super(bindAddress, port, null, numHandlers, numReaders, queueSizePerHandler, conf, classNameBase(protocolImpl - .getClass().getName()), secretManager); + .getClass().getName()), secretManager, portRangeConfig); this.verbose = verbose; registerProtocolAndImpl(RpcKind.RPC_PROTOCOL_BUFFER, protocolClass, protocolImpl); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java index 0c848bb40d9..d0f268ec5d2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java @@ -654,7 +654,8 @@ public static Server getServer(final Object instance, final String bindAddress, final boolean verbose, Configuration conf) throws IOException { return getServer(instance.getClass(), // use impl class for protocol - instance, bindAddress, port, numHandlers, false, conf, null); + instance, bindAddress, port, numHandlers, false, conf, null, + null); } /** Construct a server for a protocol implementation instance. */ @@ -662,7 +663,8 @@ public static Server getServer(Class protocol, Object instance, String bindAddress, int port, Configuration conf) throws IOException { - return getServer(protocol, instance, bindAddress, port, 1, false, conf, null); + return getServer(protocol, instance, bindAddress, port, 1, false, conf, null, + null); } /** Construct a server for a protocol implementation instance. @@ -676,7 +678,7 @@ public static Server getServer(Class protocol, throws IOException { return getServer(protocol, instance, bindAddress, port, numHandlers, verbose, - conf, null); + conf, null, null); } /** Construct a server for a protocol implementation instance. */ @@ -686,10 +688,20 @@ public static Server getServer(Class protocol, boolean verbose, Configuration conf, SecretManager secretManager) throws IOException { - + return getServer(protocol, instance, bindAddress, port, numHandlers, verbose, + conf, secretManager, null); + } + + public static Server getServer(Class protocol, + Object instance, String bindAddress, int port, + int numHandlers, + boolean verbose, Configuration conf, + SecretManager secretManager, + String portRangeConfig) + throws IOException { return getProtocolEngine(protocol, conf) .getServer(protocol, instance, bindAddress, port, numHandlers, -1, -1, - verbose, conf, secretManager); + verbose, conf, secretManager, portRangeConfig); } /** Construct a server for a protocol implementation instance. */ @@ -704,7 +716,8 @@ Server getServer(Class protocol, return getProtocolEngine(protocol, conf) .getServer(protocol, instance, bindAddress, port, numHandlers, - numReaders, queueSizePerHandler, verbose, conf, secretManager); + numReaders, queueSizePerHandler, verbose, conf, secretManager, + null); } /** An RPC Server. */ @@ -855,9 +868,10 @@ protected Server(String bindAddress, int port, Class paramClass, int handlerCount, int numReaders, int queueSizePerHandler, Configuration conf, String serverName, - SecretManager secretManager) throws IOException { + SecretManager secretManager, + String portRangeConfig) throws IOException { super(bindAddress, port, paramClass, handlerCount, numReaders, queueSizePerHandler, - conf, serverName, secretManager); + conf, serverName, secretManager, portRangeConfig); initProtocolMetaInfo(conf); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcEngine.java index 0fc7d60bd32..09980da452c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcEngine.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcEngine.java @@ -47,12 +47,30 @@ Object[] call(Method method, Object[][] params, InetSocketAddress[] addrs, UserGroupInformation ticket, Configuration conf) throws IOException, InterruptedException; - /** Construct a server for a protocol implementation instance. */ + /** + * Construct a server for a protocol implementation instance. + * + * @param protocol the class of protocol to use + * @param instance the instance of protocol whose methods will be called + * @param conf the configuration to use + * @param bindAddress the address to bind on to listen for connection + * @param port the port to listen for connections on + * @param numHandlers the number of method handler threads to run + * @param numReaders the number of reader threads to run + * @param queueSizePerHandler the size of the queue per hander thread + * @param verbose whether each call should be logged + * @param secretManager The secret manager to use to validate incoming requests. + * @param portRangeConfig A config parameter that can be used to restrict + * the range of ports used when port is 0 (an ephemeral port) + * @return The Server instance + * @throws IOException on any error + */ RPC.Server getServer(Class protocol, Object instance, String bindAddress, int port, int numHandlers, int numReaders, int queueSizePerHandler, boolean verbose, Configuration conf, - SecretManager secretManager + SecretManager secretManager, + String portRangeConfig ) throws IOException; /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index f11224c1d2d..d9ac47eb663 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -63,6 +63,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configuration.IntegerRanges; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.io.BytesWritable; @@ -291,6 +292,7 @@ public static boolean isRpcInvocation() { protected RpcDetailedMetrics rpcDetailedMetrics; private Configuration conf; + private String portRangeConfig = null; private SecretManager secretManager; private ServiceAuthorizationManager serviceAuthorizationManager = new ServiceAuthorizationManager(); @@ -323,8 +325,33 @@ public static boolean isRpcInvocation() { */ public static void bind(ServerSocket socket, InetSocketAddress address, int backlog) throws IOException { + bind(socket, address, backlog, null, null); + } + + public static void bind(ServerSocket socket, InetSocketAddress address, + int backlog, Configuration conf, String rangeConf) throws IOException { try { - socket.bind(address, backlog); + IntegerRanges range = null; + if (rangeConf != null) { + range = conf.getRange(rangeConf, ""); + } + if (range == null || range.isEmpty() || (address.getPort() != 0)) { + socket.bind(address, backlog); + } else { + for (Integer port : range) { + if (socket.isBound()) break; + try { + InetSocketAddress temp = new InetSocketAddress(address.getAddress(), + port); + socket.bind(temp, backlog); + } catch(BindException e) { + //Ignored + } + } + if (!socket.isBound()) { + throw new BindException("Could not find a free port in "+range); + } + } } catch (SocketException e) { throw NetUtils.wrapException(null, 0, @@ -424,7 +451,7 @@ public Listener() throws IOException { acceptChannel.configureBlocking(false); // Bind the server socket to the local host and port - bind(acceptChannel.socket(), address, backlogLength); + bind(acceptChannel.socket(), address, backlogLength, conf, portRangeConfig); port = acceptChannel.socket().getLocalPort(); //Could be an ephemeral port // create a selector; selector= Selector.open(); @@ -1725,7 +1752,16 @@ protected Server(String bindAddress, int port, throws IOException { this(bindAddress, port, paramClass, handlerCount, -1, -1, conf, Integer - .toString(port), null); + .toString(port), null, null); + } + + protected Server(String bindAddress, int port, + Class rpcRequestClass, int handlerCount, + int numReaders, int queueSizePerHandler, Configuration conf, + String serverName, SecretManager secretManager) + throws IOException { + this(bindAddress, port, rpcRequestClass, handlerCount, numReaders, + queueSizePerHandler, conf, serverName, secretManager, null); } /** @@ -1745,10 +1781,12 @@ protected Server(String bindAddress, int port, protected Server(String bindAddress, int port, Class rpcRequestClass, int handlerCount, int numReaders, int queueSizePerHandler, Configuration conf, - String serverName, SecretManager secretManager) + String serverName, SecretManager secretManager, + String portRangeConfig) throws IOException { this.bindAddress = bindAddress; this.conf = conf; + this.portRangeConfig = portRangeConfig; this.port = port; this.rpcRequestClass = rpcRequestClass; this.handlerCount = handlerCount; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java index fc0da0cf90d..e4cd9b9e08f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java @@ -299,16 +299,19 @@ public Object[] call(Method method, Object[][] params, } } - /** Construct a server for a protocol implementation instance listening on a + /* Construct a server for a protocol implementation instance listening on a * port and address. */ + @Override public RPC.Server getServer(Class protocolClass, Object protocolImpl, String bindAddress, int port, int numHandlers, int numReaders, int queueSizePerHandler, boolean verbose, Configuration conf, - SecretManager secretManager) + SecretManager secretManager, + String portRangeConfig) throws IOException { return new Server(protocolClass, protocolImpl, conf, bindAddress, port, - numHandlers, numReaders, queueSizePerHandler, verbose, secretManager); + numHandlers, numReaders, queueSizePerHandler, verbose, secretManager, + portRangeConfig); } @@ -341,7 +344,7 @@ public Server(Class protocolClass, Object protocolImpl, Configuration conf, String bindAddress, int port) throws IOException { this(protocolClass, protocolImpl, conf, bindAddress, port, 1, -1, -1, - false, null); + false, null, null); } /** @@ -363,7 +366,7 @@ public Server(Object protocolImpl, Configuration conf, String bindAddress, throws IOException { this(null, protocolImpl, conf, bindAddress, port, numHandlers, numReaders, queueSizePerHandler, verbose, - secretManager); + secretManager, null); } @@ -381,11 +384,13 @@ public Server(Object protocolImpl, Configuration conf, String bindAddress, public Server(Class protocolClass, Object protocolImpl, Configuration conf, String bindAddress, int port, int numHandlers, int numReaders, int queueSizePerHandler, - boolean verbose, SecretManager secretManager) + boolean verbose, SecretManager secretManager, + String portRangeConfig) throws IOException { super(bindAddress, port, null, numHandlers, numReaders, queueSizePerHandler, conf, - classNameBase(protocolImpl.getClass().getName()), secretManager); + classNameBase(protocolImpl.getClass().getName()), secretManager, + portRangeConfig); this.verbose = verbose; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java index 4f1ec878bbe..c48a25de183 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java @@ -25,16 +25,20 @@ import java.io.StringWriter; import java.net.InetSocketAddress; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.HashMap; +import java.util.HashSet; import java.util.Map; import java.util.Random; +import java.util.Set; import java.util.regex.Pattern; import junit.framework.TestCase; import static org.junit.Assert.assertArrayEquals; import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.conf.Configuration.IntegerRanges; import org.apache.hadoop.fs.Path; import org.apache.hadoop.net.NetUtils; import org.codehaus.jackson.map.ObjectMapper; @@ -362,6 +366,35 @@ public void testIntegerRanges() { assertEquals(true, range.isIncluded(34)); assertEquals(true, range.isIncluded(100000000)); } + + public void testGetRangeIterator() throws Exception { + Configuration config = new Configuration(false); + IntegerRanges ranges = config.getRange("Test", ""); + assertFalse("Empty range has values", ranges.iterator().hasNext()); + ranges = config.getRange("Test", "5"); + Set expected = new HashSet(Arrays.asList(5)); + Set found = new HashSet(); + for(Integer i: ranges) { + found.add(i); + } + assertEquals(expected, found); + + ranges = config.getRange("Test", "5-10,13-14"); + expected = new HashSet(Arrays.asList(5,6,7,8,9,10,13,14)); + found = new HashSet(); + for(Integer i: ranges) { + found.add(i); + } + assertEquals(expected, found); + + ranges = config.getRange("Test", "8-12, 5- 7"); + expected = new HashSet(Arrays.asList(5,6,7,8,9,10,11,12)); + found = new HashSet(); + for(Integer i: ranges) { + found.add(i); + } + assertEquals(expected, found); + } public void testHexValues() throws IOException{ out=new BufferedWriter(new FileWriter(CONFIG)); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java index f22cd614100..56b2b2487ba 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java @@ -260,7 +260,8 @@ public ProtocolProxy getProxy(Class protocol, long clientVersion, public org.apache.hadoop.ipc.RPC.Server getServer(Class protocol, Object instance, String bindAddress, int port, int numHandlers, int numReaders, int queueSizePerHandler, boolean verbose, Configuration conf, - SecretManager secretManager) throws IOException { + SecretManager secretManager, + String portRangeConfig) throws IOException { return null; } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestServer.java new file mode 100644 index 00000000000..db0d2ccc15a --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestServer.java @@ -0,0 +1,118 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ipc; + +import static org.junit.Assert.*; + +import java.net.BindException; +import java.net.InetSocketAddress; +import java.net.ServerSocket; + +import org.apache.hadoop.conf.Configuration; +import org.junit.Test; + +/** + * This is intended to be a set of unit tests for the + * org.apache.hadoop.ipc.Server class. + */ +public class TestServer { + + @Test + public void testBind() throws Exception { + Configuration conf = new Configuration(); + ServerSocket socket = new ServerSocket(); + InetSocketAddress address = new InetSocketAddress("0.0.0.0",0); + socket.bind(address); + try { + int min = socket.getLocalPort(); + int max = min + 100; + conf.set("TestRange", min+"-"+max); + + + ServerSocket socket2 = new ServerSocket(); + InetSocketAddress address2 = new InetSocketAddress("0.0.0.0", 0); + Server.bind(socket2, address2, 10, conf, "TestRange"); + try { + assertTrue(socket2.isBound()); + assertTrue(socket2.getLocalPort() > min); + assertTrue(socket2.getLocalPort() <= max); + } finally { + socket2.close(); + } + } finally { + socket.close(); + } + } + + @Test + public void testBindSimple() throws Exception { + ServerSocket socket = new ServerSocket(); + InetSocketAddress address = new InetSocketAddress("0.0.0.0",0); + Server.bind(socket, address, 10); + try { + assertTrue(socket.isBound()); + } finally { + socket.close(); + } + } + + @Test + public void testEmptyConfig() throws Exception { + Configuration conf = new Configuration(); + conf.set("TestRange", ""); + + + ServerSocket socket = new ServerSocket(); + InetSocketAddress address = new InetSocketAddress("0.0.0.0", 0); + try { + Server.bind(socket, address, 10, conf, "TestRange"); + assertTrue(socket.isBound()); + } finally { + socket.close(); + } + } + + + @Test + public void testBindError() throws Exception { + Configuration conf = new Configuration(); + ServerSocket socket = new ServerSocket(); + InetSocketAddress address = new InetSocketAddress("0.0.0.0",0); + socket.bind(address); + try { + int min = socket.getLocalPort(); + conf.set("TestRange", min+"-"+min); + + + ServerSocket socket2 = new ServerSocket(); + InetSocketAddress address2 = new InetSocketAddress("0.0.0.0", 0); + boolean caught = false; + try { + Server.bind(socket2, address2, 10, conf, "TestRange"); + } catch (BindException e) { + caught = true; + } finally { + socket2.close(); + } + assertTrue("Failed to catch the expected bind exception",caught); + } finally { + socket.close(); + } + } +} From c7fb5ad40e2e96f70a90cd78fea814b63274742d Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Tue, 17 Apr 2012 15:23:04 +0000 Subject: [PATCH 38/57] Pulled HADOOP-8108 into branch-0.23 git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1327140 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 35acb90ca3b..51db2e5c329 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -210,9 +210,6 @@ Release 2.0.0 - UNRELEASED HADOOP-7358. Improve log levels when exceptions caught in RPC handler (Todd Lipcon via shv) - HADOOP-8108. Move method getHostPortString() from NameNode to NetUtils. - (Brandon Li via jitendra) - HADOOP-7557 Make IPC header be extensible (sanjay radia) HADOOP-7806. Support binding to sub-interfaces (eli) @@ -421,6 +418,9 @@ Release 0.23.3 - UNRELEASED IMPROVEMENTS + HADOOP-8108. Move method getHostPortString() from NameNode to NetUtils. + (Brandon Li via jitendra) + OPTIMIZATIONS BUG FIXES From 1675c18ded4ae7a03c2236a773f4f4b6c8057af3 Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Tue, 17 Apr 2012 16:49:04 +0000 Subject: [PATCH 39/57] Revert HADOOP-8280 so it can be recommitted using `svn mv' git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1327179 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 --- .../org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java | 2 +- .../apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java | 2 +- .../main/java/org/apache/hadoop/hdfs}/util/VersionUtil.java | 2 +- .../java/org/apache/hadoop/hdfs}/util/TestVersionUtil.java | 3 +-- .../src/test/java/org/apache/hadoop/test/GenericTestUtils.java | 0 6 files changed, 4 insertions(+), 8 deletions(-) rename {hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop => hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs}/util/VersionUtil.java (99%) rename {hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop => hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs}/util/TestVersionUtil.java (96%) rename {hadoop-common-project/hadoop-common => hadoop-hdfs-project/hadoop-hdfs}/src/test/java/org/apache/hadoop/test/GenericTestUtils.java (100%) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 51db2e5c329..44a87234598 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -260,9 +260,6 @@ Release 2.0.0 - UNRELEASED HADOOP-8086. KerberosName silently sets defaultRealm to "" if the Kerberos config is not found, it should log a WARN (tucu) - HADOOP-8280. Move VersionUtil/TestVersionUtil and GenericTestUtils from - HDFS into Common. (Ahmed Radwan via atm) - OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index f018f53e731..f5d09b1fef2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -48,11 +48,11 @@ import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.server.protocol.StorageReport; +import org.apache.hadoop.hdfs.util.VersionUtil; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.VersionInfo; -import org.apache.hadoop.util.VersionUtil; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Maps; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index af3dd16d768..4c891d3d4d9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -108,6 +108,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; +import org.apache.hadoop.hdfs.util.VersionUtil; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.ProtobufRpcEngine; @@ -123,7 +124,6 @@ import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.VersionInfo; -import org.apache.hadoop.util.VersionUtil; import com.google.protobuf.BlockingService; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/VersionUtil.java similarity index 99% rename from hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionUtil.java rename to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/VersionUtil.java index dd68c4d74b1..59aa5e128ed 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/VersionUtil.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.util; +package org.apache.hadoop.hdfs.util; import java.util.regex.Matcher; import java.util.regex.Pattern; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestVersionUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestVersionUtil.java similarity index 96% rename from hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestVersionUtil.java rename to hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestVersionUtil.java index b5b1ebf3de8..c2537fd515a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestVersionUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestVersionUtil.java @@ -15,12 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.util; +package org.apache.hadoop.hdfs.util; import static org.junit.Assert.*; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.VersionUtil; import org.junit.Test; public class TestVersionUtil { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/GenericTestUtils.java similarity index 100% rename from hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java rename to hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/GenericTestUtils.java From c21bd72a2f1ff9de49dad102e43788f2667e9c49 Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Tue, 17 Apr 2012 17:06:38 +0000 Subject: [PATCH 40/57] HADOOP-8280. Move VersionUtil/TestVersionUtil and GenericTestUtils from HDFS into Common. Contributed by Ahmed Radwan. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1327182 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../src/main/java/org/apache/hadoop}/util/VersionUtil.java | 2 +- .../src/test/java/org/apache/hadoop/test/GenericTestUtils.java | 0 .../src/test/java/org/apache/hadoop}/util/TestVersionUtil.java | 2 +- .../org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java | 2 +- .../apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java | 2 +- 6 files changed, 7 insertions(+), 4 deletions(-) rename {hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs => hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop}/util/VersionUtil.java (99%) rename {hadoop-hdfs-project/hadoop-hdfs => hadoop-common-project/hadoop-common}/src/test/java/org/apache/hadoop/test/GenericTestUtils.java (100%) rename {hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs => hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop}/util/TestVersionUtil.java (98%) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 44a87234598..51db2e5c329 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -260,6 +260,9 @@ Release 2.0.0 - UNRELEASED HADOOP-8086. KerberosName silently sets defaultRealm to "" if the Kerberos config is not found, it should log a WARN (tucu) + HADOOP-8280. Move VersionUtil/TestVersionUtil and GenericTestUtils from + HDFS into Common. (Ahmed Radwan via atm) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/VersionUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionUtil.java similarity index 99% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/VersionUtil.java rename to hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionUtil.java index 59aa5e128ed..dd68c4d74b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/VersionUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionUtil.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.util; +package org.apache.hadoop.util; import java.util.regex.Matcher; import java.util.regex.Pattern; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/GenericTestUtils.java rename to hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestVersionUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestVersionUtil.java similarity index 98% rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestVersionUtil.java rename to hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestVersionUtil.java index c2537fd515a..a300cd25fb7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestVersionUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestVersionUtil.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.util; +package org.apache.hadoop.util; import static org.junit.Assert.*; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index f5d09b1fef2..f018f53e731 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -48,11 +48,11 @@ import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.server.protocol.StorageReport; -import org.apache.hadoop.hdfs.util.VersionUtil; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.VersionInfo; +import org.apache.hadoop.util.VersionUtil; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Maps; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 4c891d3d4d9..af3dd16d768 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -108,7 +108,6 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; -import org.apache.hadoop.hdfs.util.VersionUtil; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.ProtobufRpcEngine; @@ -124,6 +123,7 @@ import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.VersionInfo; +import org.apache.hadoop.util.VersionUtil; import com.google.protobuf.BlockingService; From fe7711df98b9dd16259f6534e8461a29f24caadc Mon Sep 17 00:00:00 2001 From: Siddharth Seth Date: Tue, 17 Apr 2012 18:48:42 +0000 Subject: [PATCH 41/57] MAPREDUCE-3942. Randomize master key generation for ApplicationTokenSecretManager and roll it every so often. (Contributed by Vinod Kumar Vavilapalli) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1327220 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 4 + .../pb/client/AMRMProtocolPBClientImpl.java | 7 +- .../hadoop/yarn/conf/YarnConfiguration.java | 6 + .../security/ApplicationTokenIdentifier.java | 30 ++- .../ApplicationTokenSecretManager.java | 78 ------ .../src/main/resources/yarn-default.xml | 8 + .../security/LocalizerSecurityInfo.java | 6 +- .../ApplicationMasterService.java | 41 +-- .../server/resourcemanager/RMContext.java | 3 + .../server/resourcemanager/RMContextImpl.java | 11 +- .../resourcemanager/ResourceManager.java | 54 ++-- .../amlauncher/AMLauncher.java | 7 +- .../amlauncher/ApplicationMasterLauncher.java | 12 +- .../resourcemanager/rmapp/RMAppImpl.java | 6 +- .../rmapp/attempt/RMAppAttemptImpl.java | 18 +- .../ApplicationTokenSecretManager.java | 155 ++++++++++++ .../yarn/server/resourcemanager/MockRM.java | 11 +- .../resourcemanager/TestAMAuthorization.java | 42 ++-- .../resourcemanager/TestAppManager.java | 17 +- .../TestApplicationMasterLauncher.java | 9 +- .../TestRMNodeTransitions.java | 2 +- .../TestAMLaunchFailure.java | 2 +- .../applicationsmanager/TestAMRestart.java | 2 +- .../resourcetracker/TestNMExpiry.java | 2 +- .../TestRMNMRPCResponseId.java | 2 +- .../rmapp/TestRMAppTransitions.java | 14 +- .../attempt/TestRMAppAttemptTransitions.java | 19 +- .../scheduler/capacity/TestUtils.java | 12 +- .../scheduler/fifo/TestFifoScheduler.java | 3 +- .../security/TestApplicationTokens.java | 234 ++++++++++++++++++ .../resourcemanager/webapp/TestRMWebApp.java | 2 +- .../server/TestContainerManagerSecurity.java | 25 +- 32 files changed, 620 insertions(+), 224 deletions(-) delete mode 100644 hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ApplicationTokenSecretManager.java create mode 100644 hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/ApplicationTokenSecretManager.java create mode 100644 hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestApplicationTokens.java diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 353e3db4600..3b63624de8d 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -258,6 +258,10 @@ Release 0.23.3 - UNRELEASED MAPREDUCE-4059. The history server should have a separate pluggable storage/query interface. (Robert Evans via tgraves) + MAPREDUCE-3942. Randomize master key generation for + ApplicationTokenSecretManager and roll it every so often. (Vinod Kumar + Vavilapalli via sseth) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/AMRMProtocolPBClientImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/AMRMProtocolPBClientImpl.java index c43863c57b6..29314300480 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/AMRMProtocolPBClientImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/AMRMProtocolPBClientImpl.java @@ -56,7 +56,12 @@ public AMRMProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Conf AMRMProtocolPB.class, clientVersion, addr, conf); } - + public void close() { + if (this.proxy != null) { + RPC.stopProxy(this.proxy); + } + } + @Override public AllocateResponse allocate(AllocateRequest request) throws YarnRemoteException { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 3bc81503ae5..c7747139f30 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -246,6 +246,12 @@ public class YarnConfiguration extends Configuration { public static final String DEFAULT_RM_METRICS_RUNTIME_BUCKETS = "60,300,1440"; + public static final String RM_APP_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS = RM_PREFIX + + "application-tokens.master-key-rolling-interval-secs"; + + public static final long DEFAULT_RM_APP_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS = + 24 * 60 * 60; + //////////////////////////////// // Node Manager Configs //////////////////////////////// diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ApplicationTokenIdentifier.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ApplicationTokenIdentifier.java index 1a63e107f28..2103bf65253 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ApplicationTokenIdentifier.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ApplicationTokenIdentifier.java @@ -23,34 +23,55 @@ import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.util.BuilderUtils; +/** + * ApplicationTokenIdentifier is the TokenIdentifier to be used by + * ApplicationMasters to authenticate to the ResourceManager. + */ public class ApplicationTokenIdentifier extends TokenIdentifier { public static final Text KIND_NAME = new Text("YARN_APPLICATION_TOKEN"); - private String applicationAttemptId; + private ApplicationAttemptId applicationAttemptId; public ApplicationTokenIdentifier() { } public ApplicationTokenIdentifier(ApplicationAttemptId appAttemptId) { this(); - this.applicationAttemptId = appAttemptId.toString(); + this.applicationAttemptId = appAttemptId; + } + + @Private + public ApplicationAttemptId getApplicationAttemptId() { + return this.applicationAttemptId; } @Override public void write(DataOutput out) throws IOException { - Text.writeString(out, this.applicationAttemptId); + ApplicationId appId = this.applicationAttemptId.getApplicationId(); + out.writeLong(appId.getClusterTimestamp()); + out.writeInt(appId.getId()); + out.writeInt(this.applicationAttemptId.getAttemptId()); } @Override public void readFields(DataInput in) throws IOException { - this.applicationAttemptId = Text.readString(in); + long clusterTimeStamp = in.readLong(); + int appId = in.readInt(); + int attemptId = in.readInt(); + ApplicationId applicationId = + BuilderUtils.newApplicationId(clusterTimeStamp, appId); + this.applicationAttemptId = + BuilderUtils.newApplicationAttemptId(applicationId, attemptId); } @Override @@ -68,6 +89,7 @@ public UserGroupInformation getUser() { .toString()); } + // TODO: Needed? @InterfaceAudience.Private public static class Renewer extends Token.TrivialRenewer { @Override diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ApplicationTokenSecretManager.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ApplicationTokenSecretManager.java deleted file mode 100644 index 0d83f40e0ca..00000000000 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ApplicationTokenSecretManager.java +++ /dev/null @@ -1,78 +0,0 @@ -/** -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -package org.apache.hadoop.yarn.security; - -import javax.crypto.SecretKey; - -import org.apache.hadoop.security.token.SecretManager; - -public class ApplicationTokenSecretManager extends - SecretManager { - - // TODO: mark as final - private SecretKey masterKey; // For now only one masterKey, for ever. - - // TODO: add expiry for masterKey - // TODO: add logic to handle with multiple masterKeys, only one being used for - // creating new tokens at any time. - // TODO: Make he masterKey more secure, non-transferrable etc. - - /** - * Default constructor - */ - public ApplicationTokenSecretManager() { - this.masterKey = generateSecret(); - } - - // TODO: this should go away. - public void setMasterKey(SecretKey mk) { - this.masterKey = mk; - } - - // TODO: this should go away. - public SecretKey getMasterKey() { - return masterKey; - } - - /** - * Convert the byte[] to a secret key - * @param key the byte[] to create the secret key from - * @return the secret key - */ - public static SecretKey createSecretKey(byte[] key) { - return SecretManager.createSecretKey(key); - } - - @Override - public byte[] createPassword(ApplicationTokenIdentifier identifier) { - return createPassword(identifier.getBytes(), masterKey); - } - - @Override - public byte[] retrievePassword(ApplicationTokenIdentifier identifier) - throws SecretManager.InvalidToken { - return createPassword(identifier.getBytes(), masterKey); - } - - @Override - public ApplicationTokenIdentifier createIdentifier() { - return new ApplicationTokenIdentifier(); - } - -} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index 686cb492109..e10b359fcb9 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -215,6 +215,14 @@ 30000 + + Interval for the roll over for the master key used to generate + application tokens + + yarn.resourcemanager.application-tokens.master-key-rolling-interval-secs + 86400 + + address of node manager IPC. diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerSecurityInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerSecurityInfo.java index 04fec512417..bd204c473d9 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerSecurityInfo.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerSecurityInfo.java @@ -20,6 +20,8 @@ import java.lang.annotation.Annotation; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.KerberosInfo; import org.apache.hadoop.security.SecurityInfo; @@ -30,6 +32,8 @@ public class LocalizerSecurityInfo extends SecurityInfo { + private static final Log LOG = LogFactory.getLog(LocalizerSecurityInfo.class); + @Override public KerberosInfo getKerberosInfo(Class protocol, Configuration conf) { return null; @@ -51,7 +55,7 @@ public Class annotationType() { @Override public Class> value() { - System.err.print("=========== Using localizerTokenSecurityInfo"); + LOG.debug("Using localizerTokenSecurityInfo"); return LocalizerTokenSelector.class; } }; diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java index af0f84bb158..80de3660cf5 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java @@ -55,7 +55,6 @@ import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.ipc.RPCUtil; import org.apache.hadoop.yarn.ipc.YarnRPC; -import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState; @@ -72,14 +71,14 @@ import org.apache.hadoop.yarn.service.AbstractService; import org.apache.hadoop.yarn.util.BuilderUtils; +@SuppressWarnings("unchecked") @Private public class ApplicationMasterService extends AbstractService implements AMRMProtocol { private static final Log LOG = LogFactory.getLog(ApplicationMasterService.class); private final AMLivelinessMonitor amLivelinessMonitor; private YarnScheduler rScheduler; - private ApplicationTokenSecretManager appTokenManager; - private InetSocketAddress masterServiceAddress; + private InetSocketAddress bindAddress; private Server server; private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); private final ConcurrentMap responseMap = @@ -87,35 +86,31 @@ public class ApplicationMasterService extends AbstractService implements private final AMResponse reboot = recordFactory.newRecordInstance(AMResponse.class); private final RMContext rmContext; - public ApplicationMasterService(RMContext rmContext, - ApplicationTokenSecretManager appTokenManager, YarnScheduler scheduler) { + public ApplicationMasterService(RMContext rmContext, YarnScheduler scheduler) { super(ApplicationMasterService.class.getName()); this.amLivelinessMonitor = rmContext.getAMLivelinessMonitor(); - this.appTokenManager = appTokenManager; this.rScheduler = scheduler; this.reboot.setReboot(true); // this.reboot.containers = new ArrayList(); this.rmContext = rmContext; } - @Override - public void init(Configuration conf) { - String bindAddress = - conf.get(YarnConfiguration.RM_SCHEDULER_ADDRESS, - YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS); - masterServiceAddress = NetUtils.createSocketAddr(bindAddress, - YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT, - YarnConfiguration.RM_SCHEDULER_ADDRESS); - super.init(conf); - } - @Override public void start() { Configuration conf = getConfig(); YarnRPC rpc = YarnRPC.create(conf); + + String bindAddressStr = + conf.get(YarnConfiguration.RM_SCHEDULER_ADDRESS, + YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS); + InetSocketAddress masterServiceAddress = + NetUtils.createSocketAddr(bindAddressStr, + YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT, + YarnConfiguration.RM_SCHEDULER_ADDRESS); + this.server = rpc.getServer(AMRMProtocol.class, this, masterServiceAddress, - conf, this.appTokenManager, + conf, this.rmContext.getApplicationTokenSecretManager(), conf.getInt(YarnConfiguration.RM_SCHEDULER_CLIENT_THREAD_COUNT, YarnConfiguration.DEFAULT_RM_SCHEDULER_CLIENT_THREAD_COUNT)); @@ -127,9 +122,19 @@ public void start() { } this.server.start(); + + this.bindAddress = + NetUtils.createSocketAddr(masterServiceAddress.getHostName(), + this.server.getPort()); + super.start(); } + @Private + public InetSocketAddress getBindAddress() { + return this.bindAddress; + } + private void authorizeRequest(ApplicationAttemptId appAttemptID) throws YarnRemoteException { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java index 117e77cb77b..4f0158bb175 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java @@ -29,6 +29,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; +import org.apache.hadoop.yarn.server.resourcemanager.security.ApplicationTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer; /** @@ -53,4 +54,6 @@ public interface RMContext { ContainerAllocationExpirer getContainerAllocationExpirer(); DelegationTokenRenewer getDelegationTokenRenewer(); + + ApplicationTokenSecretManager getApplicationTokenSecretManager(); } \ No newline at end of file diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java index 029a22c8fe3..56f53d134a2 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java @@ -31,6 +31,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; +import org.apache.hadoop.yarn.server.resourcemanager.security.ApplicationTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer; public class RMContextImpl implements RMContext { @@ -50,16 +51,19 @@ public class RMContextImpl implements RMContext { private AMLivelinessMonitor amLivelinessMonitor; private ContainerAllocationExpirer containerAllocationExpirer; private final DelegationTokenRenewer tokenRenewer; + private final ApplicationTokenSecretManager appTokenSecretManager; public RMContextImpl(Store store, Dispatcher rmDispatcher, ContainerAllocationExpirer containerAllocationExpirer, AMLivelinessMonitor amLivelinessMonitor, - DelegationTokenRenewer tokenRenewer) { + DelegationTokenRenewer tokenRenewer, + ApplicationTokenSecretManager appTokenSecretManager) { this.store = store; this.rmDispatcher = rmDispatcher; this.containerAllocationExpirer = containerAllocationExpirer; this.amLivelinessMonitor = amLivelinessMonitor; this.tokenRenewer = tokenRenewer; + this.appTokenSecretManager = appTokenSecretManager; } @Override @@ -106,4 +110,9 @@ public AMLivelinessMonitor getAMLivelinessMonitor() { public DelegationTokenRenewer getDelegationTokenRenewer() { return tokenRenewer; } + + @Override + public ApplicationTokenSecretManager getApplicationTokenSecretManager() { + return this.appTokenSecretManager; + } } \ No newline at end of file diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index 1df109e0995..22056065d0a 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -27,7 +27,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.source.JvmMetrics; import org.apache.hadoop.security.SecurityUtil; @@ -41,7 +40,6 @@ import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.EventHandler; -import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager; import org.apache.hadoop.yarn.security.client.ClientToAMSecretManager; import org.apache.hadoop.yarn.server.RMDelegationTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType; @@ -65,6 +63,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.security.ApplicationTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer; import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebApp; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; @@ -82,8 +81,10 @@ /** * The ResourceManager is the main class that is a set of components. + * "I am the ResourceManager. All your resources are belong to us..." * */ +@SuppressWarnings("unchecked") public class ResourceManager extends CompositeService implements Recoverable { private static final Log LOG = LogFactory.getLog(ResourceManager.class); public static final long clusterTimeStamp = System.currentTimeMillis(); @@ -94,8 +95,7 @@ public class ResourceManager extends CompositeService implements Recoverable { protected ContainerTokenSecretManager containerTokenSecretManager = new ContainerTokenSecretManager(); - protected ApplicationTokenSecretManager appTokenSecretManager = - new ApplicationTokenSecretManager(); + protected ApplicationTokenSecretManager appTokenSecretManager; private Dispatcher rmDispatcher; @@ -137,6 +137,8 @@ public synchronized void init(Configuration conf) { this.rmDispatcher = createDispatcher(); addIfService(this.rmDispatcher); + this.appTokenSecretManager = createApplicationTokenSecretManager(conf); + this.containerAllocationExpirer = new ContainerAllocationExpirer( this.rmDispatcher); addService(this.containerAllocationExpirer); @@ -147,8 +149,10 @@ public synchronized void init(Configuration conf) { DelegationTokenRenewer tokenRenewer = createDelegationTokenRenewer(); addService(tokenRenewer); - this.rmContext = new RMContextImpl(this.store, this.rmDispatcher, - this.containerAllocationExpirer, amLivelinessMonitor, tokenRenewer); + this.rmContext = + new RMContextImpl(this.store, this.rmDispatcher, + this.containerAllocationExpirer, amLivelinessMonitor, tokenRenewer, + this.appTokenSecretManager); // Register event handler for NodesListManager this.nodesListManager = new NodesListManager(this.rmContext); @@ -175,10 +179,6 @@ public synchronized void init(Configuration conf) { this.rmDispatcher.register(RMNodeEventType.class, new NodeEventDispatcher(this.rmContext)); - //TODO change this to be random - this.appTokenSecretManager.setMasterKey(ApplicationTokenSecretManager - .createSecretKey("Dummy".getBytes())); - this.nmLivelinessMonitor = createNMLivelinessMonitor(); addService(this.nmLivelinessMonitor); @@ -233,6 +233,11 @@ protected void addIfService(Object object) { } } + protected ApplicationTokenSecretManager createApplicationTokenSecretManager( + Configuration conf) { + return new ApplicationTokenSecretManager(conf); + } + protected ResourceScheduler createScheduler() { return ReflectionUtils.newInstance(this.conf.getClass( YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, @@ -240,9 +245,8 @@ protected ResourceScheduler createScheduler() { } protected ApplicationMasterLauncher createAMLauncher() { - return new ApplicationMasterLauncher( - this.appTokenSecretManager, this.clientToAMSecretManager, - this.rmContext); + return new ApplicationMasterLauncher(this.clientToAMSecretManager, + this.rmContext); } private NMLivelinessMonitor createNMLivelinessMonitor() { @@ -273,6 +277,7 @@ public static class SchedulerEventDispatcher extends AbstractService new LinkedBlockingQueue(); private final Thread eventProcessor; private volatile boolean stopped = false; + private boolean shouldExitOnError = false; public SchedulerEventDispatcher(ResourceScheduler scheduler) { super(SchedulerEventDispatcher.class.getName()); @@ -281,6 +286,14 @@ public SchedulerEventDispatcher(ResourceScheduler scheduler) { this.eventProcessor.setName("ResourceManager Event Processor"); } + @Override + public synchronized void init(Configuration conf) { + this.shouldExitOnError = + conf.getBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY, + Dispatcher.DEFAULT_DISPATCHER_EXIT_ON_ERROR); + super.init(conf); + } + @Override public synchronized void start() { this.eventProcessor.start(); @@ -306,8 +319,7 @@ public void run() { } catch (Throwable t) { LOG.fatal("Error in handling event type " + event.getType() + " to the scheduler", t); - if (getConfig().getBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY, - Dispatcher.DEFAULT_DISPATCHER_EXIT_ON_ERROR)) { + if (shouldExitOnError) { LOG.info("Exiting, bbye.."); System.exit(-1); } @@ -453,6 +465,8 @@ public void start() { throw new YarnException("Failed to login", ie); } + this.appTokenSecretManager.start(); + startWepApp(); DefaultMetricsSystem.initialize("ResourceManager"); JvmMetrics.initSingleton("ResourceManager", null); @@ -487,6 +501,8 @@ public void stop() { } rmDTSecretManager.stopThreads(); + this.appTokenSecretManager.stop(); + /*synchronized(shutdown) { shutdown.set(true); shutdown.notifyAll(); @@ -524,8 +540,7 @@ protected ClientRMService createClientRMService() { } protected ApplicationMasterService createApplicationMasterService() { - return new ApplicationMasterService(this.rmContext, - this.appTokenSecretManager, scheduler); + return new ApplicationMasterService(this.rmContext, scheduler); } @@ -571,6 +586,11 @@ public ApplicationACLsManager getApplicationACLsManager() { return this.applicationACLsManager; } + @Private + public ApplicationTokenSecretManager getApplicationTokenSecretManager(){ + return this.appTokenSecretManager; + } + @Override public void recover(RMState state) throws Exception { resourceTracker.recover(state); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java index f66d1466a47..45641d4d7b2 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java @@ -53,7 +53,6 @@ import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.security.ApplicationTokenIdentifier; -import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager; import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; import org.apache.hadoop.yarn.security.client.ClientToAMSecretManager; import org.apache.hadoop.yarn.security.client.ClientTokenIdentifier; @@ -76,7 +75,6 @@ public class AMLauncher implements Runnable { private final Configuration conf; private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); - private final ApplicationTokenSecretManager applicationTokenSecretManager; private final ClientToAMSecretManager clientToAMSecretManager; private final AMLauncherEventType eventType; private final RMContext rmContext; @@ -86,11 +84,9 @@ public class AMLauncher implements Runnable { public AMLauncher(RMContext rmContext, RMAppAttempt application, AMLauncherEventType eventType, - ApplicationTokenSecretManager applicationTokenSecretManager, ClientToAMSecretManager clientToAMSecretManager, Configuration conf) { this.application = application; this.conf = conf; - this.applicationTokenSecretManager = applicationTokenSecretManager; this.clientToAMSecretManager = clientToAMSecretManager; this.eventType = eventType; this.rmContext = rmContext; @@ -129,6 +125,7 @@ private void cleanup() throws IOException { containerMgrProxy.stopContainer(stopRequest); } + // Protected. For tests. protected ContainerManager getContainerMgrProxy( final ContainerId containerId) { @@ -220,7 +217,7 @@ private void setupTokensAndEnv( application.getAppAttemptId()); Token token = new Token(id, - this.applicationTokenSecretManager); + this.rmContext.getApplicationTokenSecretManager()); String schedulerAddressStr = this.conf.get(YarnConfiguration.RM_SCHEDULER_ADDRESS, YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java index b618e65f8f9..52d201dba47 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java @@ -25,7 +25,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.yarn.event.EventHandler; -import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager; import org.apache.hadoop.yarn.security.client.ClientToAMSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; @@ -42,20 +41,16 @@ public class ApplicationMasterLauncher extends AbstractService implements private final BlockingQueue masterEvents = new LinkedBlockingQueue(); - protected ApplicationTokenSecretManager applicationTokenSecretManager; private ClientToAMSecretManager clientToAMSecretManager; protected final RMContext context; public ApplicationMasterLauncher( - ApplicationTokenSecretManager applicationTokenSecretManager, - ClientToAMSecretManager clientToAMSecretManager, - RMContext context) { + ClientToAMSecretManager clientToAMSecretManager, RMContext context) { super(ApplicationMasterLauncher.class.getName()); this.context = context; this.launcherPool = new ThreadPoolExecutor(10, 10, 1, TimeUnit.HOURS, new LinkedBlockingQueue()); this.launcherHandlingThread = new LauncherThread(); - this.applicationTokenSecretManager = applicationTokenSecretManager; this.clientToAMSecretManager = clientToAMSecretManager; } @@ -66,8 +61,9 @@ public void start() { protected Runnable createRunnableLauncher(RMAppAttempt application, AMLauncherEventType event) { - Runnable launcher = new AMLauncher(context, application, event, - applicationTokenSecretManager, clientToAMSecretManager, getConfig()); + Runnable launcher = + new AMLauncher(context, application, event, clientToAMSecretManager, + getConfig()); return launcher; } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java index ddfe615d2a0..46e17b33b48 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java @@ -33,13 +33,13 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; -import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; -import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; -import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.EventHandler; diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java index c58d514f3e3..334f7977a6c 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java @@ -77,6 +77,7 @@ import org.apache.hadoop.yarn.state.StateMachineFactory; import org.apache.hadoop.yarn.util.BuilderUtils; +@SuppressWarnings({"unchecked", "rawtypes"}) public class RMAppAttemptImpl implements RMAppAttempt { private static final Log LOG = LogFactory.getLog(RMAppAttemptImpl.class); @@ -95,7 +96,6 @@ public class RMAppAttemptImpl implements RMAppAttempt { RMAppAttemptEvent> stateMachine; private final RMContext rmContext; - @SuppressWarnings("rawtypes") private final EventHandler eventHandler; private final YarnScheduler scheduler; private final ApplicationMasterService masterService; @@ -539,7 +539,6 @@ public void transition(RMAppAttemptImpl appAttempt, } private static final class AttemptStartedTransition extends BaseTransition { - @SuppressWarnings("unchecked") @Override public void transition(RMAppAttemptImpl appAttempt, RMAppAttemptEvent event) { @@ -638,12 +637,13 @@ public BaseFinalTransition(RMAppAttemptState finalAttemptState) { public void transition(RMAppAttemptImpl appAttempt, RMAppAttemptEvent event) { + ApplicationAttemptId appAttemptId = appAttempt.getAppAttemptId(); + // Tell the AMS. Unregister from the ApplicationMasterService - appAttempt.masterService - .unregisterAttempt(appAttempt.applicationAttemptId); + appAttempt.masterService.unregisterAttempt(appAttemptId); // Tell the application and the scheduler - ApplicationId applicationId = appAttempt.getAppAttemptId().getApplicationId(); + ApplicationId applicationId = appAttemptId.getApplicationId(); RMAppEvent appEvent = null; switch (finalAttemptState) { case FINISHED: @@ -676,8 +676,12 @@ public void transition(RMAppAttemptImpl appAttempt, } appAttempt.eventHandler.handle(appEvent); - appAttempt.eventHandler.handle(new AppRemovedSchedulerEvent(appAttempt - .getAppAttemptId(), finalAttemptState)); + appAttempt.eventHandler.handle(new AppRemovedSchedulerEvent(appAttemptId, + finalAttemptState)); + + // Remove the AppAttempt from the ApplicationTokenSecretManager + appAttempt.rmContext.getApplicationTokenSecretManager() + .applicationMasterFinished(appAttemptId); } } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/ApplicationTokenSecretManager.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/ApplicationTokenSecretManager.java new file mode 100644 index 00000000000..8a65c094d2d --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/ApplicationTokenSecretManager.java @@ -0,0 +1,155 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.server.resourcemanager.security; + +import java.util.HashMap; +import java.util.Map; +import java.util.Timer; +import java.util.TimerTask; + +import javax.crypto.SecretKey; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.token.SecretManager; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.security.ApplicationTokenIdentifier; + +/** + * Application-tokens are per ApplicationAttempt. If users redistribute their + * tokens, it is their headache, god save them. I mean you are not supposed to + * distribute keys to your vault, right? Anyways, ResourceManager saves each + * token locally in memory till application finishes and to a store for restart, + * so no need to remember master-keys even after rolling them. + */ +public class ApplicationTokenSecretManager extends + SecretManager { + + private static final Log LOG = LogFactory + .getLog(ApplicationTokenSecretManager.class); + + private SecretKey masterKey; + private final Timer timer; + private final long rollingInterval; + + private final Map passwords = + new HashMap(); + + /** + * Create an {@link ApplicationTokenSecretManager} + */ + public ApplicationTokenSecretManager(Configuration conf) { + rollMasterKey(); + this.timer = new Timer(); + this.rollingInterval = + conf + .getLong( + YarnConfiguration.RM_APP_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS, + YarnConfiguration.DEFAULT_RM_APP_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS) * 1000; + } + + public void start() { + this.timer.scheduleAtFixedRate(new MasterKeyRoller(), 0, rollingInterval); + } + + public void stop() { + this.timer.cancel(); + } + + public synchronized void applicationMasterFinished( + ApplicationAttemptId appAttemptId) { + if (LOG.isDebugEnabled()) { + LOG.debug("Application finished, removing password for " + appAttemptId); + } + this.passwords.remove(appAttemptId); + } + + private class MasterKeyRoller extends TimerTask { + @Override + public void run() { + rollMasterKey(); + } + } + + @Private + public synchronized void setMasterKey(SecretKey masterKey) { + this.masterKey = masterKey; + } + + @Private + public synchronized SecretKey getMasterKey() { + return this.masterKey; + } + + @Private + synchronized void rollMasterKey() { + LOG.info("Rolling master-key for application-tokens"); + this.masterKey = generateSecret(); + } + + /** + * Create a password for a given {@link ApplicationTokenIdentifier}. Used to + * send to the AppicationAttempt which can give it back during authentication. + */ + @Override + public synchronized byte[] createPassword( + ApplicationTokenIdentifier identifier) { + ApplicationAttemptId applicationAttemptId = + identifier.getApplicationAttemptId(); + if (LOG.isDebugEnabled()) { + LOG.debug("Creating password for " + applicationAttemptId); + } + byte[] password = createPassword(identifier.getBytes(), masterKey); + this.passwords.put(applicationAttemptId, password); + return password; + } + + /** + * Retrieve the password for the given {@link ApplicationTokenIdentifier}. + * Used by RPC layer to validate a remote {@link ApplicationTokenIdentifier}. + */ + @Override + public synchronized byte[] retrievePassword( + ApplicationTokenIdentifier identifier) throws InvalidToken { + ApplicationAttemptId applicationAttemptId = + identifier.getApplicationAttemptId(); + if (LOG.isDebugEnabled()) { + LOG.debug("Trying to retrieve password for " + applicationAttemptId); + } + byte[] password = this.passwords.get(applicationAttemptId); + if (password == null) { + throw new InvalidToken("Password not found for ApplicationAttempt " + + applicationAttemptId); + } + return password; + } + + /** + * Creates an empty TokenId to be used for de-serializing an + * {@link ApplicationTokenIdentifier} by the RPC layer. + */ + @Override + public ApplicationTokenIdentifier createIdentifier() { + return new ApplicationTokenIdentifier(); + } + +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java index 0cb69917ed6..75ae612165e 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java @@ -24,9 +24,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.ClientRMProtocol; -import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; @@ -35,7 +35,6 @@ import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Resource; -import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher; import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory; @@ -55,6 +54,7 @@ import org.apache.log4j.LogManager; import org.apache.log4j.Logger; +@SuppressWarnings("unchecked") public class MockRM extends ResourceManager { public MockRM() { @@ -224,8 +224,7 @@ public void stop() { @Override protected ApplicationMasterService createApplicationMasterService() { - return new ApplicationMasterService(getRMContext(), - this.appTokenSecretManager, scheduler) { + return new ApplicationMasterService(getRMContext(), scheduler) { @Override public void start() { // override to not start rpc handler @@ -240,8 +239,8 @@ public void stop() { @Override protected ApplicationMasterLauncher createAMLauncher() { - return new ApplicationMasterLauncher(this.appTokenSecretManager, - this.clientToAMSecretManager, getRMContext()) { + return new ApplicationMasterLauncher(this.clientToAMSecretManager, + getRMContext()) { @Override public void start() { // override to not start rpc handler diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java index 5ac5deef334..e62a64cb2fd 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java @@ -60,9 +60,9 @@ public class TestAMAuthorization { private static final Log LOG = LogFactory.getLog(TestAMAuthorization.class); - private static final class MyContainerManager implements ContainerManager { + public static final class MyContainerManager implements ContainerManager { - Map containerEnv; + public Map amContainerEnv; public MyContainerManager() { } @@ -71,7 +71,7 @@ public MyContainerManager() { public StartContainerResponse startContainer(StartContainerRequest request) throws YarnRemoteException { - containerEnv = request.getContainerLaunchContext().getEnvironment(); + amContainerEnv = request.getContainerLaunchContext().getEnvironment(); return null; } @@ -90,19 +90,15 @@ public GetContainerStatusResponse getContainerStatus( } } - private static class MockRMWithAMS extends MockRMWithCustomAMLauncher { + public static class MockRMWithAMS extends MockRMWithCustomAMLauncher { - private static final Configuration conf = new Configuration(); - static { + public MockRMWithAMS(Configuration conf, ContainerManager containerManager) { + super(conf, containerManager); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); UserGroupInformation.setConfiguration(conf); } - public MockRMWithAMS(ContainerManager containerManager) { - super(conf, containerManager); - } - @Override protected void doSecureLogin() throws IOException { // Skip the login. @@ -111,15 +107,14 @@ protected void doSecureLogin() throws IOException { @Override protected ApplicationMasterService createApplicationMasterService() { - return new ApplicationMasterService(getRMContext(), - this.appTokenSecretManager, this.scheduler); + return new ApplicationMasterService(getRMContext(), this.scheduler); } } @Test public void testAuthorizedAccess() throws Exception { MyContainerManager containerManager = new MyContainerManager(); - MockRM rm = new MockRMWithAMS(containerManager); + final MockRM rm = new MockRMWithAMS(new Configuration(), containerManager); rm.start(); MockNM nm1 = rm.registerNode("localhost:1234", 5120); @@ -132,11 +127,11 @@ public void testAuthorizedAccess() throws Exception { nm1.nodeHeartbeat(true); int waitCount = 0; - while (containerManager.containerEnv == null && waitCount++ < 20) { + while (containerManager.amContainerEnv == null && waitCount++ < 20) { LOG.info("Waiting for AM Launch to happen.."); Thread.sleep(1000); } - Assert.assertNotNull(containerManager.containerEnv); + Assert.assertNotNull(containerManager.amContainerEnv); RMAppAttempt attempt = app.getCurrentAppAttempt(); ApplicationAttemptId applicationAttemptId = attempt.getAppAttemptId(); @@ -145,13 +140,10 @@ public void testAuthorizedAccess() throws Exception { // Create a client to the RM. final Configuration conf = rm.getConfig(); final YarnRPC rpc = YarnRPC.create(conf); - final String serviceAddr = conf.get( - YarnConfiguration.RM_SCHEDULER_ADDRESS, - YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS); UserGroupInformation currentUser = UserGroupInformation .createRemoteUser(applicationAttemptId.toString()); - String tokenURLEncodedStr = containerManager.containerEnv + String tokenURLEncodedStr = containerManager.amContainerEnv .get(ApplicationConstants.APPLICATION_MASTER_TOKEN_ENV_NAME); LOG.info("AppMasterToken is " + tokenURLEncodedStr); Token token = new Token(); @@ -162,8 +154,8 @@ public void testAuthorizedAccess() throws Exception { .doAs(new PrivilegedAction() { @Override public AMRMProtocol run() { - return (AMRMProtocol) rpc.getProxy(AMRMProtocol.class, NetUtils - .createSocketAddr(serviceAddr), conf); + return (AMRMProtocol) rpc.getProxy(AMRMProtocol.class, rm + .getApplicationMasterService().getBindAddress(), conf); } }); @@ -181,7 +173,7 @@ public AMRMProtocol run() { @Test public void testUnauthorizedAccess() throws Exception { MyContainerManager containerManager = new MyContainerManager(); - MockRM rm = new MockRMWithAMS(containerManager); + MockRM rm = new MockRMWithAMS(new Configuration(), containerManager); rm.start(); MockNM nm1 = rm.registerNode("localhost:1234", 5120); @@ -191,11 +183,11 @@ public void testUnauthorizedAccess() throws Exception { nm1.nodeHeartbeat(true); int waitCount = 0; - while (containerManager.containerEnv == null && waitCount++ < 20) { + while (containerManager.amContainerEnv == null && waitCount++ < 20) { LOG.info("Waiting for AM Launch to happen.."); Thread.sleep(1000); } - Assert.assertNotNull(containerManager.containerEnv); + Assert.assertNotNull(containerManager.amContainerEnv); RMAppAttempt attempt = app.getCurrentAppAttempt(); ApplicationAttemptId applicationAttemptId = attempt.getAppAttemptId(); @@ -210,7 +202,7 @@ public void testUnauthorizedAccess() throws Exception { UserGroupInformation currentUser = UserGroupInformation .createRemoteUser(applicationAttemptId.toString()); - String tokenURLEncodedStr = containerManager.containerEnv + String tokenURLEncodedStr = containerManager.amContainerEnv .get(ApplicationConstants.APPLICATION_MASTER_TOKEN_ENV_NAME); LOG.info("AppMasterToken is " + tokenURLEncodedStr); Token token = new Token(); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java index 882115b6658..df199faaf8f 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java @@ -27,8 +27,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.MockApps; -import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; +import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -37,7 +37,6 @@ import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; -import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager; import org.apache.hadoop.yarn.security.client.ClientToAMSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemStore; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.MockRMApp; @@ -93,7 +92,7 @@ public static RMContext mockRMContext(int n, long time) { AMLivelinessMonitor amLivelinessMonitor = new AMLivelinessMonitor( rmDispatcher); return new RMContextImpl(new MemStore(), rmDispatcher, - containerAllocationExpirer, amLivelinessMonitor, null) { + containerAllocationExpirer, amLivelinessMonitor, null, null) { @Override public ConcurrentMap getRMApps() { return map; @@ -336,9 +335,9 @@ public void testRMAppSubmit() throws Exception { RMContext rmContext = mockRMContext(0, now - 10); ResourceScheduler scheduler = new CapacityScheduler(); - ApplicationMasterService masterService = new ApplicationMasterService(rmContext, - new ApplicationTokenSecretManager(), scheduler); Configuration conf = new Configuration(); + ApplicationMasterService masterService = + new ApplicationMasterService(rmContext, scheduler); TestRMAppManager appMonitor = new TestRMAppManager(rmContext, new ClientToAMSecretManager(), scheduler, masterService, new ApplicationACLsManager(conf), conf); @@ -384,9 +383,9 @@ public void testRMAppSubmitWithQueueAndName() throws Exception { RMContext rmContext = mockRMContext(1, now - 10); ResourceScheduler scheduler = new CapacityScheduler(); - ApplicationMasterService masterService = new ApplicationMasterService(rmContext, - new ApplicationTokenSecretManager(), scheduler); Configuration conf = new Configuration(); + ApplicationMasterService masterService = + new ApplicationMasterService(rmContext, scheduler); TestRMAppManager appMonitor = new TestRMAppManager(rmContext, new ClientToAMSecretManager(), scheduler, masterService, new ApplicationACLsManager(conf), conf); @@ -432,9 +431,9 @@ public void testRMAppSubmitError() throws Exception { // specify 1 here and use same appId below so it gets duplicate entry RMContext rmContext = mockRMContext(1, now - 10); ResourceScheduler scheduler = new CapacityScheduler(); - ApplicationMasterService masterService = new ApplicationMasterService(rmContext, - new ApplicationTokenSecretManager(), scheduler); Configuration conf = new Configuration(); + ApplicationMasterService masterService = + new ApplicationMasterService(rmContext, scheduler); TestRMAppManager appMonitor = new TestRMAppManager(rmContext, new ClientToAMSecretManager(), scheduler, masterService, new ApplicationACLsManager(conf), conf); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java index 1fae7a4b3ed..5948ffe57c8 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java @@ -18,7 +18,6 @@ package org.apache.hadoop.yarn.server.resourcemanager; -import java.io.IOException; import java.util.Map; import org.apache.commons.logging.Log; @@ -33,7 +32,6 @@ import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.StopContainerResponse; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; -import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncher; @@ -123,14 +121,13 @@ public MockRMWithCustomAMLauncher(Configuration conf, @Override protected ApplicationMasterLauncher createAMLauncher() { - return new ApplicationMasterLauncher(super.appTokenSecretManager, - super.clientToAMSecretManager, getRMContext()) { + return new ApplicationMasterLauncher(super.clientToAMSecretManager, + getRMContext()) { @Override protected Runnable createRunnableLauncher(RMAppAttempt application, AMLauncherEventType event) { return new AMLauncher(context, application, event, - applicationTokenSecretManager, clientToAMSecretManager, - getConfig()) { + clientToAMSecretManager, getConfig()) { @Override protected ContainerManager getContainerMgrProxy( ContainerId containerId) { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java index ccd8d57a0d1..96b25443cd4 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java @@ -78,7 +78,7 @@ public void setUp() throws Exception { rmContext = new RMContextImpl(new MemStore(), rmDispatcher, null, null, - mock(DelegationTokenRenewer.class)); + mock(DelegationTokenRenewer.class), null); scheduler = mock(YarnScheduler.class); doAnswer( new Answer() { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java index 2ee54311f9e..05ae726e3ad 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java @@ -44,7 +44,6 @@ import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; -import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.ClientRMService; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; @@ -55,6 +54,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.security.ApplicationTokenSecretManager; import org.junit.After; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java index 08a9198e5be..ec323ea59c8 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java @@ -48,7 +48,6 @@ import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; -import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.ClientRMService; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; @@ -62,6 +61,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.security.ApplicationTokenSecretManager; import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; import org.junit.After; import org.junit.Before; diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java index 363fe57d603..deb71a9ea8d 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java @@ -73,7 +73,7 @@ public void setUp() { // Dispatcher that processes events inline Dispatcher dispatcher = new InlineDispatcher(); RMContext context = new RMContextImpl(new MemStore(), dispatcher, null, - null, null); + null, null, null); dispatcher.register(SchedulerEventType.class, new InlineDispatcher.EmptyEventHandler()); dispatcher.register(RMNodeEventType.class, diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java index cd19209bce1..ef7d4b2f9fe 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java @@ -67,7 +67,7 @@ public void handle(Event event) { } }); RMContext context = - new RMContextImpl(new MemStore(), dispatcher, null, null, null); + new RMContextImpl(new MemStore(), dispatcher, null, null, null, null); dispatcher.register(RMNodeEventType.class, new ResourceManager.NodeEventDispatcher(context)); NodesListManager nodesListManager = new NodesListManager(context); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java index ceba12ad2e4..5782d9189a0 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java @@ -34,7 +34,6 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.event.EventHandler; -import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; @@ -48,7 +47,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; - +import org.apache.hadoop.yarn.server.resourcemanager.security.ApplicationTokenSecretManager; import org.junit.Before; import org.junit.Test; @@ -118,8 +117,10 @@ public void setUp() throws Exception { ContainerAllocationExpirer containerAllocationExpirer = mock(ContainerAllocationExpirer.class); AMLivelinessMonitor amLivelinessMonitor = mock(AMLivelinessMonitor.class); - this.rmContext = new RMContextImpl(new MemStore(), rmDispatcher, - containerAllocationExpirer, amLivelinessMonitor, null); + this.rmContext = + new RMContextImpl(new MemStore(), rmDispatcher, + containerAllocationExpirer, amLivelinessMonitor, null, + new ApplicationTokenSecretManager(conf)); rmDispatcher.register(RMAppAttemptEventType.class, new TestApplicationAttemptEventDispatcher(this.rmContext)); @@ -142,9 +143,8 @@ protected RMApp createNewTestApp() { String clientTokenStr = "bogusstring"; ApplicationStore appStore = mock(ApplicationStore.class); YarnScheduler scheduler = mock(YarnScheduler.class); - ApplicationMasterService masterService = - new ApplicationMasterService(rmContext, - new ApplicationTokenSecretManager(), scheduler); + ApplicationMasterService masterService = + new ApplicationMasterService(rmContext, scheduler); RMApp application = new RMAppImpl(applicationId, rmContext, conf, name, user, diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java index d962df841e8..44f47e474f8 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java @@ -17,9 +17,15 @@ */ package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt; -import static org.junit.Assert.*; -import static org.mockito.Matchers.*; -import static org.mockito.Mockito.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import java.util.Collections; import java.util.List; @@ -61,6 +67,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType; +import org.apache.hadoop.yarn.server.resourcemanager.security.ApplicationTokenSecretManager; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -136,8 +143,10 @@ public void setUp() throws Exception { ContainerAllocationExpirer containerAllocationExpirer = mock(ContainerAllocationExpirer.class); AMLivelinessMonitor amLivelinessMonitor = mock(AMLivelinessMonitor.class); - rmContext = new RMContextImpl(new MemStore(), rmDispatcher, - containerAllocationExpirer, amLivelinessMonitor, null); + rmContext = + new RMContextImpl(new MemStore(), rmDispatcher, + containerAllocationExpirer, amLivelinessMonitor, null, + new ApplicationTokenSecretManager(new Configuration())); scheduler = mock(YarnScheduler.class); masterService = mock(ApplicationMasterService.class); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java index 52a67cf0c51..5dc6dfbe6dc 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java @@ -18,10 +18,14 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; -import static org.mockito.Mockito.*; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Container; @@ -42,6 +46,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; +import org.apache.hadoop.yarn.server.resourcemanager.security.ApplicationTokenSecretManager; public class TestUtils { private static final Log LOG = LogFactory.getLog(TestUtils.class); @@ -74,8 +79,9 @@ public EventHandler getEventHandler() { ContainerAllocationExpirer cae = new ContainerAllocationExpirer(nullDispatcher); - RMContext rmContext = - new RMContextImpl(null, nullDispatcher, cae, null, null); + RMContext rmContext = + new RMContextImpl(null, nullDispatcher, cae, null, null, + new ApplicationTokenSecretManager(new Configuration())); return rmContext; } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java index 88e6e63e17b..123d235846e 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java @@ -85,7 +85,8 @@ public void testFifoSchedulerCapacityWhenNoNMs() { @Test public void testAppAttemptMetrics() throws Exception { AsyncDispatcher dispatcher = new InlineDispatcher(); - RMContext rmContext = new RMContextImpl(null, dispatcher, null, null, null); + RMContext rmContext = + new RMContextImpl(null, dispatcher, null, null, null, null); FifoScheduler schedular = new FifoScheduler(); schedular.reinitialize(new Configuration(), null, rmContext); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestApplicationTokens.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestApplicationTokens.java new file mode 100644 index 00000000000..ae1ced715ab --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestApplicationTokens.java @@ -0,0 +1,234 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.security; + +import java.security.PrivilegedAction; + +import javax.crypto.SecretKey; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.yarn.api.AMRMProtocol; +import org.apache.hadoop.yarn.api.ApplicationConstants; +import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; +import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.ipc.YarnRPC; +import org.apache.hadoop.yarn.server.resourcemanager.MockNM; +import org.apache.hadoop.yarn.server.resourcemanager.MockRM; +import org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization.MockRMWithAMS; +import org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization.MyContainerManager; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; +import org.apache.hadoop.yarn.util.BuilderUtils; +import org.apache.hadoop.yarn.util.Records; +import org.junit.Assert; +import org.junit.Test; + +public class TestApplicationTokens { + + private static final Log LOG = LogFactory.getLog(TestApplicationTokens.class); + + /** + * Validate that application tokens are unusable after the + * application-finishes. + * + * @throws Exception + */ + @Test + public void testTokenExpiry() throws Exception { + + MyContainerManager containerManager = new MyContainerManager(); + final MockRM rm = new MockRMWithAMS(new Configuration(), containerManager); + rm.start(); + + try { + MockNM nm1 = rm.registerNode("localhost:1234", 5120); + + RMApp app = rm.submitApp(1024); + + nm1.nodeHeartbeat(true); + + int waitCount = 0; + while (containerManager.amContainerEnv == null && waitCount++ < 20) { + LOG.info("Waiting for AM Launch to happen.."); + Thread.sleep(1000); + } + Assert.assertNotNull(containerManager.amContainerEnv); + + RMAppAttempt attempt = app.getCurrentAppAttempt(); + ApplicationAttemptId applicationAttemptId = attempt.getAppAttemptId(); + + // Create a client to the RM. + final Configuration conf = rm.getConfig(); + final YarnRPC rpc = YarnRPC.create(conf); + + UserGroupInformation currentUser = + UserGroupInformation + .createRemoteUser(applicationAttemptId.toString()); + String tokenURLEncodedStr = + containerManager.amContainerEnv + .get(ApplicationConstants.APPLICATION_MASTER_TOKEN_ENV_NAME); + LOG.info("AppMasterToken is " + tokenURLEncodedStr); + Token token = new Token(); + token.decodeFromUrlString(tokenURLEncodedStr); + currentUser.addToken(token); + + AMRMProtocol rmClient = createRMClient(rm, conf, rpc, currentUser); + + RegisterApplicationMasterRequest request = + Records.newRecord(RegisterApplicationMasterRequest.class); + request.setApplicationAttemptId(applicationAttemptId); + rmClient.registerApplicationMaster(request); + + FinishApplicationMasterRequest finishAMRequest = + Records.newRecord(FinishApplicationMasterRequest.class); + finishAMRequest.setAppAttemptId(applicationAttemptId); + finishAMRequest + .setFinishApplicationStatus(FinalApplicationStatus.SUCCEEDED); + finishAMRequest.setDiagnostics("diagnostics"); + finishAMRequest.setTrackingUrl("url"); + rmClient.finishApplicationMaster(finishAMRequest); + + // Now simulate trying to allocate. RPC call itself should throw auth + // exception. + rpc.stopProxy(rmClient, conf); // To avoid using cached client + rmClient = createRMClient(rm, conf, rpc, currentUser); + request.setApplicationAttemptId(BuilderUtils.newApplicationAttemptId( + BuilderUtils.newApplicationId(12345, 78), 987)); + AllocateRequest allocateRequest = + Records.newRecord(AllocateRequest.class); + allocateRequest.setApplicationAttemptId(applicationAttemptId); + try { + rmClient.allocate(allocateRequest); + Assert.fail("You got to be kidding me! " + + "Using App tokens after app-finish should fail!"); + } catch (Throwable t) { + LOG.info("Exception found is ", t); + // The exception will still have the earlier appAttemptId as it picks it + // up from the token. + Assert.assertTrue(t.getCause().getMessage().contains( + "Password not found for ApplicationAttempt " + + applicationAttemptId.toString())); + } + + } finally { + rm.stop(); + } + } + + /** + * Validate master-key-roll-over and that tokens are usable even after + * master-key-roll-over. + * + * @throws Exception + */ + @Test + public void testMasterKeyRollOver() throws Exception { + + Configuration config = new Configuration(); + MyContainerManager containerManager = new MyContainerManager(); + final MockRM rm = new MockRMWithAMS(config, containerManager); + rm.start(); + + try { + MockNM nm1 = rm.registerNode("localhost:1234", 5120); + + RMApp app = rm.submitApp(1024); + + nm1.nodeHeartbeat(true); + + int waitCount = 0; + while (containerManager.amContainerEnv == null && waitCount++ < 20) { + LOG.info("Waiting for AM Launch to happen.."); + Thread.sleep(1000); + } + Assert.assertNotNull(containerManager.amContainerEnv); + + RMAppAttempt attempt = app.getCurrentAppAttempt(); + ApplicationAttemptId applicationAttemptId = attempt.getAppAttemptId(); + + // Create a client to the RM. + final Configuration conf = rm.getConfig(); + final YarnRPC rpc = YarnRPC.create(conf); + + UserGroupInformation currentUser = + UserGroupInformation + .createRemoteUser(applicationAttemptId.toString()); + String tokenURLEncodedStr = + containerManager.amContainerEnv + .get(ApplicationConstants.APPLICATION_MASTER_TOKEN_ENV_NAME); + LOG.info("AppMasterToken is " + tokenURLEncodedStr); + Token token = new Token(); + token.decodeFromUrlString(tokenURLEncodedStr); + currentUser.addToken(token); + + AMRMProtocol rmClient = createRMClient(rm, conf, rpc, currentUser); + + RegisterApplicationMasterRequest request = + Records.newRecord(RegisterApplicationMasterRequest.class); + request.setApplicationAttemptId(applicationAttemptId); + rmClient.registerApplicationMaster(request); + + // One allocate call. + AllocateRequest allocateRequest = + Records.newRecord(AllocateRequest.class); + allocateRequest.setApplicationAttemptId(applicationAttemptId); + Assert.assertFalse(rmClient.allocate(allocateRequest).getAMResponse() + .getReboot()); + + // Simulate a master-key-roll-over + ApplicationTokenSecretManager appTokenSecretManager = + rm.getRMContext().getApplicationTokenSecretManager(); + SecretKey oldKey = appTokenSecretManager.getMasterKey(); + appTokenSecretManager.rollMasterKey(); + SecretKey newKey = appTokenSecretManager.getMasterKey(); + Assert.assertFalse("Master key should have changed!", + oldKey.equals(newKey)); + + // Another allocate call. Should continue to work. + rpc.stopProxy(rmClient, conf); // To avoid using cached client + rmClient = createRMClient(rm, conf, rpc, currentUser); + allocateRequest = Records.newRecord(AllocateRequest.class); + allocateRequest.setApplicationAttemptId(applicationAttemptId); + Assert.assertFalse(rmClient.allocate(allocateRequest).getAMResponse() + .getReboot()); + } finally { + rm.stop(); + } + } + + private AMRMProtocol createRMClient(final MockRM rm, + final Configuration conf, final YarnRPC rpc, + UserGroupInformation currentUser) { + return currentUser.doAs(new PrivilegedAction() { + @Override + public AMRMProtocol run() { + return (AMRMProtocol) rpc.getProxy(AMRMProtocol.class, rm + .getApplicationMasterService().getBindAddress(), conf); + } + }); + } +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java index 3a2d562cd54..6689fd57773 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java @@ -152,7 +152,7 @@ public static RMContext mockRMContext(int numApps, int racks, int numNodes, for (RMNode node : deactivatedNodes) { deactivatedNodesMap.put(node.getHostName(), node); } - return new RMContextImpl(new MemStore(), null, null, null, null) { + return new RMContextImpl(new MemStore(), null, null, null, null, null) { @Override public ConcurrentMap getRMApps() { return applicationsMaps; diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java index 226bccded55..a1dc277a838 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java @@ -78,12 +78,12 @@ import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.security.ApplicationTokenIdentifier; -import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager; import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; +import org.apache.hadoop.yarn.server.resourcemanager.security.ApplicationTokenSecretManager; import org.apache.hadoop.yarn.util.BuilderUtils; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.Records; @@ -387,20 +387,19 @@ private AMRMProtocol submitAndRegisterApplication( appAttempt.getAppAttemptId().toString()); // Ask for a container from the RM - String schedulerAddressString = conf.get( - YarnConfiguration.RM_SCHEDULER_ADDRESS, - YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS); - final InetSocketAddress schedulerAddr = NetUtils - .createSocketAddr(schedulerAddressString); + final InetSocketAddress schedulerAddr = + resourceManager.getApplicationMasterService().getBindAddress(); ApplicationTokenIdentifier appTokenIdentifier = new ApplicationTokenIdentifier( appAttempt.getAppAttemptId()); - ApplicationTokenSecretManager appTokenSecretManager = new ApplicationTokenSecretManager(); - appTokenSecretManager.setMasterKey(ApplicationTokenSecretManager - .createSecretKey("Dummy".getBytes())); // TODO: FIX. Be in Sync with - // ResourceManager.java - Token appToken = new Token( - appTokenIdentifier, appTokenSecretManager); - appToken.setService(new Text(schedulerAddressString)); + ApplicationTokenSecretManager appTokenSecretManager = + new ApplicationTokenSecretManager(conf); + appTokenSecretManager.setMasterKey(resourceManager + .getApplicationTokenSecretManager().getMasterKey()); + Token appToken = + new Token(appTokenIdentifier, + appTokenSecretManager); + appToken.setService(new Text(schedulerAddr.getHostName() + ":" + + schedulerAddr.getPort())); currentUser.addToken(appToken); AMRMProtocol scheduler = currentUser From b89b6bd75f1c4d86778242c30d018aa13cb9af26 Mon Sep 17 00:00:00 2001 From: Thomas Graves Date: Tue, 17 Apr 2012 19:21:34 +0000 Subject: [PATCH 42/57] MAPREDUCE-4160. some mrv1 ant tests fail with timeout - due to 4156 (tgraves) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1327233 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 +++ .../src/java/org/apache/hadoop/mapred/JobInProgress.java | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 3b63624de8d..a879ce7305c 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -347,6 +347,9 @@ Release 0.23.3 - UNRELEASED MAPREDUCE-4156. ant build fails compiling JobInProgress (tgraves) + MAPREDUCE-4160. some mrv1 ant tests fail with timeout - due to 4156 + (tgraves) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/JobInProgress.java b/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/JobInProgress.java index 59256b40d76..736a30cbb2e 100644 --- a/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/JobInProgress.java +++ b/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/JobInProgress.java @@ -2731,7 +2731,7 @@ public synchronized boolean completedTask(TaskInProgress tip, } TaskFinishedEvent tfe = new TaskFinishedEvent(tip.getTIPId(), - null, tip.getExecFinishTime(), taskType, + statusAttemptID, tip.getExecFinishTime(), taskType, TaskStatus.State.SUCCEEDED.toString(), new org.apache.hadoop.mapreduce.Counters(status.getCounters())); From ca9f62121e829aeb9fc67122a78ba9f673eba074 Mon Sep 17 00:00:00 2001 From: Thomas Graves Date: Tue, 17 Apr 2012 20:05:44 +0000 Subject: [PATCH 43/57] MAPREDUCE-4151. RM scheduler web page should filter apps to those that are relevant to scheduling (Jason Lowe via tgraves) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1327263 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 +++ .../resourcemanager/webapp/AppsBlock.java | 18 ++++++++++++++---- .../resourcemanager/webapp/AppsList.java | 7 ++++--- .../resourcemanager/webapp/RmController.java | 10 ++++++++++ .../resourcemanager/webapp/TestRMWebApp.java | 6 ++++++ 5 files changed, 37 insertions(+), 7 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index a879ce7305c..e278fe3f5f2 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -262,6 +262,9 @@ Release 0.23.3 - UNRELEASED ApplicationTokenSecretManager and roll it every so often. (Vinod Kumar Vavilapalli via sseth) + MAPREDUCE-4151. RM scheduler web page should filter apps to those that + are relevant to scheduling (Jason Lowe via tgraves) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java index d517ce117a9..7e8fd746d01 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java @@ -23,6 +23,9 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR; import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR_VALUE; +import java.util.Collection; +import java.util.HashSet; + import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo; @@ -60,10 +63,17 @@ class AppsBlock extends HtmlBlock { th(".ui", "Tracking UI")._()._(). tbody(); int i = 0; - String reqState = $(APP_STATE); - reqState = (reqState == null ? "" : reqState); + Collection reqAppStates = null; + String reqStateString = $(APP_STATE); + if (reqStateString != null && !reqStateString.isEmpty()) { + String[] appStateStrings = reqStateString.split(","); + reqAppStates = new HashSet(appStateStrings.length); + for(String stateString : appStateStrings) { + reqAppStates.add(RMAppState.valueOf(stateString)); + } + } for (RMApp app : list.apps.values()) { - if (!reqState.isEmpty() && app.getState() != RMAppState.valueOf(reqState)) { + if (reqAppStates != null && !reqAppStates.contains(app.getState())) { continue; } AppInfo appInfo = new AppInfo(app, true); @@ -100,7 +110,7 @@ class AppsBlock extends HtmlBlock { if (list.rendering == Render.JS_ARRAY) { echo("\n"); } } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsList.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsList.java index 2efcf4258fa..5d0060fe438 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsList.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsList.java @@ -26,6 +26,7 @@ import static org.apache.hadoop.yarn.webapp.view.Jsons.appendSortable; import java.io.PrintWriter; +import java.util.Collection; import java.util.concurrent.ConcurrentMap; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -53,12 +54,12 @@ class AppsList implements ToJSON { apps = rmContext.getRMApps(); } - void toDataTableArrays(String requiredAppState, PrintWriter out) { + void toDataTableArrays(Collection requiredAppStates, PrintWriter out) { out.append('['); boolean first = true; for (RMApp app : apps.values()) { - if (requiredAppState != null && !requiredAppState.isEmpty() - && app.getState() != RMAppState.valueOf(requiredAppState)) { + if (requiredAppStates != null && + !requiredAppStates.contains(app.getState())) { continue; } AppInfo appInfo = new AppInfo(app, true); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java index c8778b87be8..74c10264ac2 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java @@ -31,14 +31,17 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.apache.hadoop.yarn.util.Apps; +import org.apache.hadoop.yarn.util.StringHelper; import org.apache.hadoop.yarn.util.Times; import org.apache.hadoop.yarn.webapp.Controller; import org.apache.hadoop.yarn.webapp.ResponseInfo; +import org.apache.hadoop.yarn.webapp.YarnWebParams; import com.google.inject.Inject; @@ -123,6 +126,13 @@ public void nodes() { } public void scheduler() { + // limit applications to those in states relevant to scheduling + set(YarnWebParams.APP_STATE, StringHelper.cjoin( + RMAppState.NEW.toString(), + RMAppState.SUBMITTED.toString(), + RMAppState.ACCEPTED.toString(), + RMAppState.RUNNING.toString())); + ResourceManager rm = getInstance(ResourceManager.class); ResourceScheduler rs = rm.getResourceScheduler(); if (rs == null || rs instanceof CapacityScheduler) { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java index 6689fd57773..588930f6efb 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java @@ -47,6 +47,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; +import org.apache.hadoop.yarn.util.StringHelper; import org.apache.hadoop.yarn.webapp.WebApps; import org.apache.hadoop.yarn.webapp.YarnWebParams; import org.apache.hadoop.yarn.webapp.test.WebAppTests; @@ -93,6 +94,11 @@ public void configure(Binder binder) { rmViewInstance.set(YarnWebParams.APP_STATE, RMAppState.RUNNING.toString()); rmViewInstance.render(); WebAppTests.flushOutput(injector); + + rmViewInstance.set(YarnWebParams.APP_STATE, StringHelper.cjoin( + RMAppState.ACCEPTED.toString(), RMAppState.RUNNING.toString())); + rmViewInstance.render(); + WebAppTests.flushOutput(injector); } @Test public void testNodesPage() { From a15e69cb9fcd64153b7b3ef6f319b04f565f2ad0 Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Tue, 17 Apr 2012 22:05:42 +0000 Subject: [PATCH 44/57] MAPREDUCE-4134. Remove references of mapred.child.ulimit etc. since they are not being used any more (Ravi Prakash via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1327304 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 + .../org/apache/hadoop/mapred/JobConf.java | 56 +++++++++---------- .../apache/hadoop/mapreduce/MRJobConfig.java | 4 -- .../hadoop/mapreduce/util/ConfigUtil.java | 4 -- .../src/main/resources/mapred-default.xml | 17 ------ .../src/java/mapred-default.xml | 17 ------ .../apache/hadoop/mapred/MapTaskRunner.java | 6 -- .../hadoop/mapred/ReduceTaskRunner.java | 6 -- .../org/apache/hadoop/mapred/TaskRunner.java | 38 +------------ 9 files changed, 31 insertions(+), 120 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index e278fe3f5f2..2c85b38b065 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -265,6 +265,9 @@ Release 0.23.3 - UNRELEASED MAPREDUCE-4151. RM scheduler web page should filter apps to those that are relevant to scheduling (Jason Lowe via tgraves) + MAPREDUCE-4134. Remove references of mapred.child.ulimit etc. since they + are not being used any more (Ravi Prakash via bobby) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java index 4d6787918f9..fde88dbe09c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java @@ -187,9 +187,6 @@ public class JobConf extends Configuration { * /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of: * -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc * - * The configuration variable {@link #MAPRED_TASK_ULIMIT} can be used to - * control the maximum virtual memory of the child processes. - * * The configuration variable {@link #MAPRED_TASK_ENV} can be used to pass * other environment variables to the child processes. * @@ -210,9 +207,6 @@ public class JobConf extends Configuration { * /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of: * -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc * - * The configuration variable {@link #MAPRED_MAP_TASK_ULIMIT} can be used to - * control the maximum virtual memory of the map processes. - * * The configuration variable {@link #MAPRED_MAP_TASK_ENV} can be used to pass * other environment variables to the map processes. */ @@ -230,9 +224,6 @@ public class JobConf extends Configuration { * /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of: * -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc * - * The configuration variable {@link #MAPRED_REDUCE_TASK_ULIMIT} can be used - * to control the maximum virtual memory of the reduce processes. - * * The configuration variable {@link #MAPRED_REDUCE_TASK_ENV} can be used to * pass process environment variables to the reduce processes. */ @@ -242,36 +233,32 @@ public class JobConf extends Configuration { public static final String DEFAULT_MAPRED_TASK_JAVA_OPTS = "-Xmx200m"; /** - * Configuration key to set the maximum virutal memory available to the child - * map and reduce tasks (in kilo-bytes). - * - * Note: This must be greater than or equal to the -Xmx passed to the JavaVM - * via {@link #MAPRED_TASK_JAVA_OPTS}, else the VM might not start. - * - * @deprecated Use {@link #MAPRED_MAP_TASK_ULIMIT} or - * {@link #MAPRED_REDUCE_TASK_ULIMIT} + * @deprecated + * Configuration key to set the maximum virtual memory available to the child + * map and reduce tasks (in kilo-bytes). This has been deprecated and will no + * longer have any effect. */ @Deprecated public static final String MAPRED_TASK_ULIMIT = "mapred.child.ulimit"; /** - * Configuration key to set the maximum virutal memory available to the - * map tasks (in kilo-bytes). - * - * Note: This must be greater than or equal to the -Xmx passed to the JavaVM - * via {@link #MAPRED_MAP_TASK_JAVA_OPTS}, else the VM might not start. + * @deprecated + * Configuration key to set the maximum virtual memory available to the + * map tasks (in kilo-bytes). This has been deprecated and will no + * longer have any effect. */ - public static final String MAPRED_MAP_TASK_ULIMIT = JobContext.MAP_ULIMIT; + @Deprecated + public static final String MAPRED_MAP_TASK_ULIMIT = "mapreduce.map.ulimit"; /** - * Configuration key to set the maximum virutal memory available to the - * reduce tasks (in kilo-bytes). - * - * Note: This must be greater than or equal to the -Xmx passed to the JavaVM - * via {@link #MAPRED_REDUCE_TASK_JAVA_OPTS}, else the VM might not start. + * @deprecated + * Configuration key to set the maximum virtual memory available to the + * reduce tasks (in kilo-bytes). This has been deprecated and will no + * longer have any effect. */ - public static final String MAPRED_REDUCE_TASK_ULIMIT = - JobContext.REDUCE_ULIMIT; + @Deprecated + public static final String MAPRED_REDUCE_TASK_ULIMIT = + "mapreduce.reduce.ulimit"; /** @@ -1966,6 +1953,15 @@ private void checkAndWarnDeprecation() { + " Instead use " + JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY + " and " + JobConf.MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY); } + if(get(JobConf.MAPRED_TASK_ULIMIT) != null ) { + LOG.warn(JobConf.deprecatedString(JobConf.MAPRED_TASK_ULIMIT)); + } + if(get(JobConf.MAPRED_MAP_TASK_ULIMIT) != null ) { + LOG.warn(JobConf.deprecatedString(JobConf.MAPRED_MAP_TASK_ULIMIT)); + } + if(get(JobConf.MAPRED_REDUCE_TASK_ULIMIT) != null ) { + LOG.warn(JobConf.deprecatedString(JobConf.MAPRED_REDUCE_TASK_ULIMIT)); + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java index 339deef3506..50b6eed7ce3 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java @@ -183,8 +183,6 @@ public interface MRJobConfig { public static final String MAP_JAVA_OPTS = "mapreduce.map.java.opts"; - public static final String MAP_ULIMIT = "mapreduce.map.ulimit"; - public static final String MAP_MAX_ATTEMPTS = "mapreduce.map.maxattempts"; public static final String MAP_DEBUG_SCRIPT = "mapreduce.map.debug.script"; @@ -243,8 +241,6 @@ public interface MRJobConfig { public static final String REDUCE_JAVA_OPTS = "mapreduce.reduce.java.opts"; - public static final String REDUCE_ULIMIT = "mapreduce.reduce.ulimit"; - public static final String MAPREDUCE_JOB_DIR = "mapreduce.job.dir"; public static final String REDUCE_MAX_ATTEMPTS = "mapreduce.reduce.maxattempts"; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java index 438be6598fa..fde6aa4647f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java @@ -314,8 +314,6 @@ private static void addDeprecatedKeys() { new String[] {MRJobConfig.MAP_ENV}); Configuration.addDeprecation("mapred.map.child.java.opts", new String[] {MRJobConfig.MAP_JAVA_OPTS}); - Configuration.addDeprecation("mapred.map.child.ulimit", - new String[] {MRJobConfig.MAP_ULIMIT}); Configuration.addDeprecation("mapred.map.max.attempts", new String[] {MRJobConfig.MAP_MAX_ATTEMPTS}); Configuration.addDeprecation("mapred.map.task.debug.script", @@ -362,8 +360,6 @@ private static void addDeprecatedKeys() { new String[] {MRJobConfig.REDUCE_ENV}); Configuration.addDeprecation("mapred.reduce.child.java.opts", new String[] {MRJobConfig.REDUCE_JAVA_OPTS}); - Configuration.addDeprecation("mapred.reduce.child.ulimit", - new String[] {MRJobConfig.REDUCE_ULIMIT}); Configuration.addDeprecation("mapred.reduce.max.attempts", new String[] {MRJobConfig.REDUCE_MAX_ATTEMPTS}); Configuration.addDeprecation("mapred.reduce.parallel.copies", diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml index a2bfa8080f8..5c533e6f26e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml @@ -411,9 +411,6 @@ /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of: -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc - The configuration variable mapred.child.ulimit can be used to control the - maximum virtual memory of the child processes. - Usage of -Djava.library.path can cause programs to no longer function if hadoop native libraries are used. These values should instead be set as part of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and @@ -431,20 +428,6 @@ - - mapred.child.ulimit - - The maximum virtual memory, in KB, of a process launched by the - Map-Reduce framework. This can be used to control both the Mapper/Reducer - tasks and applications using Hadoop Pipes, Hadoop Streaming etc. - By default it is left unspecified to let cluster admins control it via - limits.conf and other such relevant mechanisms. - - Note: mapred.child.ulimit must be greater than or equal to the -Xmx passed to - JavaVM, else the VM might not start. - - - mapreduce.admin.user.env LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native diff --git a/hadoop-mapreduce-project/src/java/mapred-default.xml b/hadoop-mapreduce-project/src/java/mapred-default.xml index 0d0a91d7787..322e497ada4 100644 --- a/hadoop-mapreduce-project/src/java/mapred-default.xml +++ b/hadoop-mapreduce-project/src/java/mapred-default.xml @@ -403,9 +403,6 @@ For example, to enable verbose gc logging to a file named for the taskid in /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of: -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc - - The configuration variable mapred.child.ulimit can be used to control the - maximum virtual memory of the child processes. @@ -419,20 +416,6 @@ - - mapred.child.ulimit - - The maximum virtual memory, in KB, of a process launched by the - Map-Reduce framework. This can be used to control both the Mapper/Reducer - tasks and applications using Hadoop Pipes, Hadoop Streaming etc. - By default it is left unspecified to let cluster admins control it via - limits.conf and other such relevant mechanisms. - - Note: mapred.child.ulimit must be greater than or equal to the -Xmx passed to - JavaVM, else the VM might not start. - - - mapreduce.map.log.level INFO diff --git a/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/MapTaskRunner.java b/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/MapTaskRunner.java index 286961ef7e0..20f1f9484ab 100644 --- a/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/MapTaskRunner.java +++ b/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/MapTaskRunner.java @@ -33,12 +33,6 @@ public String getChildJavaOpts(JobConf jobConf, String defaultValue) { super.getChildJavaOpts(jobConf, JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS)); } - - @Override - public int getChildUlimit(JobConf jobConf) { - return jobConf.getInt(JobConf.MAPRED_MAP_TASK_ULIMIT, - super.getChildUlimit(jobConf)); - } @Override public String getChildEnv(JobConf jobConf) { diff --git a/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/ReduceTaskRunner.java b/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/ReduceTaskRunner.java index 2a69714477d..9f6b21e0a97 100644 --- a/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/ReduceTaskRunner.java +++ b/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/ReduceTaskRunner.java @@ -41,12 +41,6 @@ public String getChildJavaOpts(JobConf jobConf, String defaultValue) { super.getChildJavaOpts(jobConf, JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS)); } - - @Override - public int getChildUlimit(JobConf jobConf) { - return jobConf.getInt(JobConf.MAPRED_REDUCE_TASK_ULIMIT, - super.getChildUlimit(jobConf)); - } @Override public String getChildEnv(JobConf jobConf) { diff --git a/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/TaskRunner.java b/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/TaskRunner.java index 2a300eedf89..deffe71ea3a 100644 --- a/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/TaskRunner.java +++ b/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/TaskRunner.java @@ -110,20 +110,7 @@ public void close() throws IOException {} public String getChildJavaOpts(JobConf jobConf, String defaultValue) { return jobConf.get(JobConf.MAPRED_TASK_JAVA_OPTS, defaultValue); } - - /** - * Get the maximum virtual memory of the child map/reduce tasks. - * @param jobConf job configuration - * @return the maximum virtual memory of the child task or -1 if - * none is specified - * @deprecated Use limits specific to the map or reduce tasks set via - * {@link JobConf#MAPRED_MAP_TASK_ULIMIT} or - * {@link JobConf#MAPRED_REDUCE_TASK_ULIMIT} - */ - @Deprecated - public int getChildUlimit(JobConf jobConf) { - return jobConf.getInt(JobConf.MAPRED_TASK_ULIMIT, -1); - } + /** * Get the environment variables for the child map/reduce tasks. @@ -188,8 +175,7 @@ public Void run() throws IOException { tracker.addToMemoryManager(t.getTaskID(), t.isMapTask(), conf); - // set memory limit using ulimit if feasible and necessary ... - List setup = getVMSetupCmd(); + List setup = new ArrayList(); // Set up the redirection of the task's stdout and stderr streams File[] logFiles = prepareLogFiles(taskid, t.isTaskCleanupTask()); @@ -310,26 +296,6 @@ void setupChildTaskConfiguration(LocalDirAllocator lDirAlloc) t.setJobFile(localTaskFile.toString()); } - /** - * @return - */ - private List getVMSetupCmd() { - - int ulimit = getChildUlimit(conf); - if (ulimit <= 0) { - return null; - } - List setup = null; - String[] ulimitCmd = Shell.getUlimitMemoryCommand(ulimit); - if (ulimitCmd != null) { - setup = new ArrayList(); - for (String arg : ulimitCmd) { - setup.add(arg); - } - } - return setup; - } - /** * Parse the given string and return an array of individual java opts. Split * on whitespace and replace the special string "@taskid@" with the task ID From c80dbe5e09ab1eb3c1b0277055f28717895d6dd9 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Tue, 17 Apr 2012 22:21:33 +0000 Subject: [PATCH 45/57] HDFS-2652. Add support for host-based delegation tokens. Contributed by Daryn Sharp git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1327309 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../java/org/apache/hadoop/hdfs/HAUtil.java | 32 ++++++---- .../apache/hadoop/hdfs/HftpFileSystem.java | 30 +++++---- .../delegation/DelegationTokenSelector.java | 37 ++++++----- .../ha/ConfiguredFailoverProxyProvider.java | 14 +++-- .../hadoop/hdfs/web/WebHdfsFileSystem.java | 38 +++++++++--- .../apache/hadoop/hdfs/MiniDFSCluster.java | 15 +++-- .../hadoop/hdfs/TestHftpDelegationToken.java | 56 +++++++++++++++++ ...TestClientProtocolWithDelegationToken.java | 7 +-- .../ha/TestDelegationTokensWithHA.java | 51 ++++++++++++--- .../hadoop/hdfs/web/TestWebHdfsUrl.java | 62 +++++++++++++++++++ 11 files changed, 266 insertions(+), 79 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 60ee1cc64d4..b13218a19af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -860,6 +860,9 @@ Release 0.23.3 - UNRELEASED HDFS-3176. Use MD5MD5CRC32FileChecksum.readFields() in JsonUtil . (Kihwal Lee via szetszwo) + HDFS-2652. Add support for host-based delegation tokens. (Daryn Sharp via + szetszwo) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java index 943f47497c5..b56892537b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java @@ -52,6 +52,9 @@ public class HAUtil { private static final Log LOG = LogFactory.getLog(HAUtil.class); + private static final DelegationTokenSelector tokenSelector = + new DelegationTokenSelector(); + private HAUtil() { /* Hidden constructor */ } /** @@ -241,25 +244,28 @@ public static boolean isTokenForLogicalUri( * one is found, clone it to also represent the underlying namenode address. * @param ugi the UGI to modify * @param haUri the logical URI for the cluster - * @param singleNNAddr one of the NNs in the cluster to which the token + * @param nnAddrs collection of NNs in the cluster to which the token * applies */ public static void cloneDelegationTokenForLogicalUri( UserGroupInformation ugi, URI haUri, - InetSocketAddress singleNNAddr) { - Text haService = buildTokenServiceForLogicalUri(haUri); + Collection nnAddrs) { + Text haService = HAUtil.buildTokenServiceForLogicalUri(haUri); Token haToken = - DelegationTokenSelector.selectHdfsDelegationToken(haService, ugi); - if (haToken == null) { - // no token - return; + tokenSelector.selectToken(haService, ugi.getTokens()); + if (haToken != null) { + for (InetSocketAddress singleNNAddr : nnAddrs) { + Token specificToken = + new Token(haToken); + SecurityUtil.setTokenService(specificToken, singleNNAddr); + ugi.addToken(specificToken); + LOG.debug("Mapped HA service delegation token for logical URI " + + haUri + " to namenode " + singleNNAddr); + } + } else { + LOG.debug("No HA service delegation token found for logical URI " + + haUri); } - Token specificToken = - new Token(haToken); - specificToken.setService(SecurityUtil.buildTokenService(singleNNAddr)); - ugi.addToken(specificToken); - LOG.debug("Mapped HA service delegation token for logical URI " + - haUri + " to namenode " + singleNNAddr); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java index 829190623a8..7151e9f9472 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java @@ -30,6 +30,7 @@ import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.ArrayList; +import java.util.Collection; import java.util.TimeZone; import org.apache.hadoop.classification.InterfaceAudience; @@ -48,7 +49,6 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenRenewer; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; import org.apache.hadoop.hdfs.server.common.JspHelper; -import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher; import org.apache.hadoop.hdfs.web.URLUtils; import org.apache.hadoop.io.Text; @@ -168,10 +168,7 @@ public void initialize(final URI name, final Configuration conf) protected void initDelegationToken() throws IOException { // look for hftp token, then try hdfs - Token token = selectHftpDelegationToken(); - if (token == null) { - token = selectHdfsDelegationToken(); - } + Token token = selectDelegationToken(); // if we don't already have a token, go get one over https boolean createdToken = false; @@ -192,14 +189,8 @@ protected void initDelegationToken() throws IOException { } } - protected Token selectHftpDelegationToken() { - Text serviceName = SecurityUtil.buildTokenService(nnSecureAddr); - return hftpTokenSelector.selectToken(serviceName, ugi.getTokens()); - } - - protected Token selectHdfsDelegationToken() { - return DelegationTokenSelector.selectHdfsDelegationToken( - nnAddr, ugi, getConf()); + protected Token selectDelegationToken() { + return hftpTokenSelector.selectToken(getUri(), ugi.getTokens(), getConf()); } @@ -699,9 +690,22 @@ public void cancel(Token token, private static class HftpDelegationTokenSelector extends AbstractDelegationTokenSelector { + private static final DelegationTokenSelector hdfsTokenSelector = + new DelegationTokenSelector(); public HftpDelegationTokenSelector() { super(TOKEN_KIND); } + + Token selectToken(URI nnUri, + Collection> tokens, Configuration conf) { + Token token = + selectToken(SecurityUtil.buildTokenService(nnUri), tokens); + if (token == null) { + // try to get a HDFS token + token = hdfsTokenSelector.selectToken(nnUri, tokens, conf); + } + return token; + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java index 4f73b851645..293611e377f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs.security.token.delegation; -import java.net.InetSocketAddress; +import java.net.URI; +import java.util.Collection; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -25,7 +26,6 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector; @@ -37,32 +37,35 @@ public class DelegationTokenSelector extends AbstractDelegationTokenSelector{ public static final String SERVICE_NAME_KEY = "hdfs.service.host_"; - private static final DelegationTokenSelector INSTANCE = new DelegationTokenSelector(); - - /** Select the delegation token for hdfs from the ugi. */ - public static Token selectHdfsDelegationToken( - final InetSocketAddress nnAddr, final UserGroupInformation ugi, + /** + * Select the delegation token for hdfs. The port will be rewritten to + * the port of hdfs.service.host_$nnAddr, or the default rpc namenode port. + * This method should only be called by non-hdfs filesystems that do not + * use the rpc port to acquire tokens. Ex. webhdfs, hftp + * @param nnUri of the remote namenode + * @param tokens as a collection + * @param conf hadoop configuration + * @return Token + */ + public Token selectToken( + final URI nnUri, Collection> tokens, final Configuration conf) { // this guesses the remote cluster's rpc service port. // the current token design assumes it's the same as the local cluster's // rpc port unless a config key is set. there should be a way to automatic // and correctly determine the value - final String key = SERVICE_NAME_KEY + SecurityUtil.buildTokenService(nnAddr); - final String nnServiceName = conf.get(key); + Text serviceName = SecurityUtil.buildTokenService(nnUri); + final String nnServiceName = conf.get(SERVICE_NAME_KEY + serviceName); int nnRpcPort = NameNode.DEFAULT_PORT; if (nnServiceName != null) { nnRpcPort = NetUtils.createSocketAddr(nnServiceName, nnRpcPort).getPort(); } + // use original hostname from the uri to avoid unintentional host resolving + serviceName = SecurityUtil.buildTokenService( + NetUtils.createSocketAddrForHost(nnUri.getHost(), nnRpcPort)); - final Text serviceName = SecurityUtil.buildTokenService( - new InetSocketAddress(nnAddr.getHostName(), nnRpcPort)); - return INSTANCE.selectToken(serviceName, ugi.getTokens()); - } - - public static Token selectHdfsDelegationToken( - Text serviceName, UserGroupInformation ugi) { - return INSTANCE.selectToken(serviceName, ugi.getTokens()); + return selectToken(serviceName, tokens); } public DelegationTokenSelector() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java index a20880aad65..eab36481e1a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java @@ -22,6 +22,7 @@ import java.net.InetSocketAddress; import java.net.URI; import java.util.ArrayList; +import java.util.Collection; import java.util.List; import java.util.Map; @@ -93,14 +94,15 @@ public ConfiguredFailoverProxyProvider(Configuration conf, URI uri, "for URI " + uri); } - for (InetSocketAddress address : addressesInNN.values()) { + Collection addressesOfNns = addressesInNN.values(); + for (InetSocketAddress address : addressesOfNns) { proxies.add(new AddressRpcProxyPair(address)); - - // The client may have a delegation token set for the logical - // URI of the cluster. Clone this token to apply to each of the - // underlying IPC addresses so that the IPC code can find it. - HAUtil.cloneDelegationTokenForLogicalUri(ugi, uri, address); } + + // The client may have a delegation token set for the logical + // URI of the cluster. Clone this token to apply to each of the + // underlying IPC addresses so that the IPC code can find it. + HAUtil.cloneDelegationTokenForLogicalUri(ugi, uri, addressesOfNns); } catch (IOException e) { throw new RuntimeException(e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index c64dfb14e8a..415bf6c12dd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -29,6 +29,7 @@ import java.net.URI; import java.net.URISyntaxException; import java.net.URL; +import java.util.Collection; import java.util.List; import java.util.Map; import java.util.StringTokenizer; @@ -117,8 +118,8 @@ public class WebHdfsFileSystem extends FileSystem /** Delegation token kind */ public static final Text TOKEN_KIND = new Text("WEBHDFS delegation"); /** Token selector */ - public static final AbstractDelegationTokenSelector DT_SELECTOR - = new AbstractDelegationTokenSelector(TOKEN_KIND) {}; + public static final WebHdfsDelegationTokenSelector DT_SELECTOR + = new WebHdfsDelegationTokenSelector(); private static DelegationTokenRenewer DT_RENEWER = null; @@ -164,7 +165,7 @@ public synchronized void initialize(URI uri, Configuration conf } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } - this.nnAddr = NetUtils.createSocketAddr(uri.toString()); + this.nnAddr = NetUtils.createSocketAddrForHost(uri.getHost(), uri.getPort()); this.workingDir = getHomeDirectory(); if (UserGroupInformation.isSecurityEnabled()) { @@ -174,12 +175,7 @@ public synchronized void initialize(URI uri, Configuration conf protected void initDelegationToken() throws IOException { // look for webhdfs token, then try hdfs - final Text serviceName = SecurityUtil.buildTokenService(nnAddr); - Token token = DT_SELECTOR.selectToken(serviceName, ugi.getTokens()); - if (token == null) { - token = DelegationTokenSelector.selectHdfsDelegationToken( - nnAddr, ugi, getConf()); - } + Token token = selectDelegationToken(); //since we don't already have a token, go get one boolean createdToken = false; @@ -200,6 +196,10 @@ protected void initDelegationToken() throws IOException { } } + protected Token selectDelegationToken() { + return DT_SELECTOR.selectToken(getUri(), ugi.getTokens(), getConf()); + } + @Override protected int getDefaultPort() { return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, @@ -845,4 +845,24 @@ public void cancel(final Token token, final Configuration conf } } } + + private static class WebHdfsDelegationTokenSelector + extends AbstractDelegationTokenSelector { + private static final DelegationTokenSelector hdfsTokenSelector = + new DelegationTokenSelector(); + + public WebHdfsDelegationTokenSelector() { + super(TOKEN_KIND); + } + + Token selectToken(URI nnUri, + Collection> tokens, Configuration conf) { + Token token = + selectToken(SecurityUtil.buildTokenService(nnUri), tokens); + if (token == null) { + token = hdfsTokenSelector.selectToken(nnUri, tokens, conf); + } + return token; + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 2f1d992005d..edab4710607 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -94,6 +94,7 @@ import org.apache.hadoop.net.DNSToSwitchMapping; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.StaticMapping; +import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.util.StringUtils; @@ -1049,16 +1050,14 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes, if(dn == null) throw new IOException("Cannot start DataNode in " + dnConf.get(DFS_DATANODE_DATA_DIR_KEY)); - //NOTE: the following is true if and only if: - // hadoop.security.token.service.use_ip=true - //since the HDFS does things based on IP:port, we need to add the mapping - //for IP:port to rackId - String ipAddr = dn.getXferAddress().getAddress().getHostAddress(); + //since the HDFS does things based on host|ip:port, we need to add the + //mapping for the service to rackId + String service = + SecurityUtil.buildTokenService(dn.getXferAddress()).toString(); if (racks != null) { - int port = dn.getXferAddress().getPort(); - LOG.info("Adding node with IP:port : " + ipAddr + ":" + port + + LOG.info("Adding node with service : " + service + " to rack " + racks[i-curDatanodesNum]); - StaticMapping.addNodeToRack(ipAddr + ":" + port, + StaticMapping.addNodeToRack(service, racks[i-curDatanodesNum]); } dn.runDatanodeDaemon(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java index f13d5194d75..e4071222410 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java @@ -31,6 +31,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.SecurityUtilTestHelper; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; @@ -66,4 +67,59 @@ public FileSystem run() throws Exception { renewToken.setAccessible(true); assertSame("wrong token", token, renewToken.get(fs)); } + + @Test + public void testSelectHdfsDelegationToken() throws Exception { + SecurityUtilTestHelper.setTokenServiceUseIp(true); + + Configuration conf = new Configuration(); + URI hftpUri = URI.create("hftp://localhost:0"); + UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); + Token token = null; + + // test fallback to hdfs token + Token hdfsToken = new Token( + new byte[0], new byte[0], + DelegationTokenIdentifier.HDFS_DELEGATION_KIND, + new Text("127.0.0.1:8020")); + ugi.addToken(hdfsToken); + + HftpFileSystem fs = (HftpFileSystem) FileSystem.get(hftpUri, conf); + token = fs.selectDelegationToken(); + assertNotNull(token); + assertEquals(hdfsToken, token); + + // test hftp is favored over hdfs + Token hftpToken = new Token( + new byte[0], new byte[0], + HftpFileSystem.TOKEN_KIND, new Text("127.0.0.1:0")); + ugi.addToken(hftpToken); + token = fs.selectDelegationToken(); + assertNotNull(token); + assertEquals(hftpToken, token); + + // switch to using host-based tokens, no token should match + SecurityUtilTestHelper.setTokenServiceUseIp(false); + token = fs.selectDelegationToken(); + assertNull(token); + + // test fallback to hdfs token + hdfsToken = new Token( + new byte[0], new byte[0], + DelegationTokenIdentifier.HDFS_DELEGATION_KIND, + new Text("localhost:8020")); + ugi.addToken(hdfsToken); + token = fs.selectDelegationToken(); + assertNotNull(token); + assertEquals(hdfsToken, token); + + // test hftp is favored over hdfs + hftpToken = new Token( + new byte[0], new byte[0], + HftpFileSystem.TOKEN_KIND, new Text("localhost:0")); + ugi.addToken(hftpToken); + token = fs.selectDelegationToken(); + assertNotNull(token); + assertEquals(hftpToken, token); + } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java index 5f4696e144f..e54b8bccc28 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java @@ -41,6 +41,7 @@ import org.apache.hadoop.security.SaslInputStream; import org.apache.hadoop.security.SaslRpcClient; import org.apache.hadoop.security.SaslRpcServer; +import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.log4j.Level; @@ -91,10 +92,8 @@ public void testDelegationTokenRpc() throws Exception { DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(owner, owner, null); Token token = new Token( dtId, sm); - Text host = new Text(addr.getAddress().getHostAddress() + ":" - + addr.getPort()); - token.setService(host); - LOG.info("Service IP address for token is " + host); + SecurityUtil.setTokenService(token, addr); + LOG.info("Service for token is " + token.getService()); current.addToken(token); current.doAs(new PrivilegedExceptionAction() { @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java index 5c380915d04..a69a0ce267b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java @@ -26,6 +26,7 @@ import java.net.URI; import java.security.PrivilegedExceptionAction; import java.util.Collection; +import java.util.HashSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -44,11 +45,13 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.SecurityUtilTestHelper; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.test.GenericTestUtils; import org.junit.AfterClass; +import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; @@ -100,6 +103,11 @@ public static void shutdownCluster() throws IOException { } + @Before + public void prepTest() { + SecurityUtilTestHelper.setTokenServiceUseIp(true); + } + @Test public void testDelegationTokenDFSApi() throws Exception { Token token = dfs.getDelegationToken("JobTracker"); @@ -187,23 +195,48 @@ public void testHAUtilClonesDelegationTokens() throws Exception { URI haUri = new URI("hdfs://my-ha-uri/"); token.setService(HAUtil.buildTokenServiceForLogicalUri(haUri)); ugi.addToken(token); - HAUtil.cloneDelegationTokenForLogicalUri(ugi, haUri, nn0.getNameNodeAddress()); - HAUtil.cloneDelegationTokenForLogicalUri(ugi, haUri, nn1.getNameNodeAddress()); + + Collection nnAddrs = new HashSet(); + nnAddrs.add(nn0.getNameNodeAddress()); + nnAddrs.add(nn1.getNameNodeAddress()); + HAUtil.cloneDelegationTokenForLogicalUri(ugi, haUri, nnAddrs); Collection> tokens = ugi.getTokens(); assertEquals(3, tokens.size()); LOG.info("Tokens:\n" + Joiner.on("\n").join(tokens)); + DelegationTokenSelector dts = new DelegationTokenSelector(); // check that the token selected for one of the physical IPC addresses // matches the one we received - InetSocketAddress addr = nn0.getNameNodeAddress(); - Text ipcDtService = SecurityUtil.buildTokenService(addr); - Token token2 = - DelegationTokenSelector.selectHdfsDelegationToken(ipcDtService, ugi); - assertNotNull(token2); - assertArrayEquals(token.getIdentifier(), token2.getIdentifier()); - assertArrayEquals(token.getPassword(), token2.getPassword()); + for (InetSocketAddress addr : nnAddrs) { + Text ipcDtService = SecurityUtil.buildTokenService(addr); + Token token2 = + dts.selectToken(ipcDtService, ugi.getTokens()); + assertNotNull(token2); + assertArrayEquals(token.getIdentifier(), token2.getIdentifier()); + assertArrayEquals(token.getPassword(), token2.getPassword()); + } + + // switch to host-based tokens, shouldn't match existing tokens + SecurityUtilTestHelper.setTokenServiceUseIp(false); + for (InetSocketAddress addr : nnAddrs) { + Text ipcDtService = SecurityUtil.buildTokenService(addr); + Token token2 = + dts.selectToken(ipcDtService, ugi.getTokens()); + assertNull(token2); + } + + // reclone the tokens, and see if they match now + HAUtil.cloneDelegationTokenForLogicalUri(ugi, haUri, nnAddrs); + for (InetSocketAddress addr : nnAddrs) { + Text ipcDtService = SecurityUtil.buildTokenService(addr); + Token token2 = + dts.selectToken(ipcDtService, ugi.getTokens()); + assertNotNull(token2); + assertArrayEquals(token.getIdentifier(), token2.getIdentifier()); + assertArrayEquals(token.getPassword(), token2.getPassword()); + } } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java index ec90146d608..1dde0997dd1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java @@ -34,10 +34,16 @@ import org.apache.hadoop.hdfs.web.resources.HttpOpParam; import org.apache.hadoop.hdfs.web.resources.PutOpParam; import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.SecurityUtilTestHelper; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; import org.junit.Assert; import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; import static org.mockito.Mockito.mock; public class TestWebHdfsUrl { @@ -90,4 +96,60 @@ public void testDelegationTokenInUrl() throws IOException { private String generateUrlQueryPrefix(HttpOpParam.Op op, String username) { return "op=" + op.toString() + "&user.name=" + username; } + + @Test + public void testSelectDelegationToken() throws Exception { + SecurityUtilTestHelper.setTokenServiceUseIp(true); + + Configuration conf = new Configuration(); + URI webHdfsUri = URI.create("webhdfs://localhost:0"); + UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); + Token token = null; + + // test fallback to hdfs token + Token hdfsToken = new Token( + new byte[0], new byte[0], + DelegationTokenIdentifier.HDFS_DELEGATION_KIND, + new Text("127.0.0.1:8020")); + ugi.addToken(hdfsToken); + + WebHdfsFileSystem fs = (WebHdfsFileSystem) FileSystem.get(webHdfsUri, conf); + token = fs.selectDelegationToken(); + assertNotNull(token); + assertEquals(hdfsToken, token); + + // test webhdfs is favored over hdfs + Token webHdfsToken = new Token( + new byte[0], new byte[0], + WebHdfsFileSystem.TOKEN_KIND, new Text("127.0.0.1:0")); + ugi.addToken(webHdfsToken); + token = fs.selectDelegationToken(); + assertNotNull(token); + assertEquals(webHdfsToken, token); + + // switch to using host-based tokens, no token should match + SecurityUtilTestHelper.setTokenServiceUseIp(false); + token = fs.selectDelegationToken(); + assertNull(token); + + // test fallback to hdfs token + hdfsToken = new Token( + new byte[0], new byte[0], + DelegationTokenIdentifier.HDFS_DELEGATION_KIND, + new Text("localhost:8020")); + ugi.addToken(hdfsToken); + token = fs.selectDelegationToken(); + assertNotNull(token); + assertEquals(hdfsToken, token); + + // test webhdfs is favored over hdfs + webHdfsToken = new Token( + new byte[0], new byte[0], + WebHdfsFileSystem.TOKEN_KIND, new Text("localhost:0")); + ugi.addToken(webHdfsToken); + token = fs.selectDelegationToken(); + assertNotNull(token); + assertEquals(webHdfsToken, token); + } + } From 7d04a96027ad75877b41b7cd8f67455dd13159d7 Mon Sep 17 00:00:00 2001 From: Siddharth Seth Date: Wed, 18 Apr 2012 01:59:16 +0000 Subject: [PATCH 46/57] MAPREDUCE-3972. Fix locking and exception issues in JobHistory server. (Contributed by Robert Joseph Evans) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1327354 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 + .../hadoop/mapreduce/v2/app/job/Job.java | 9 + .../mapreduce/v2/app/job/impl/JobImpl.java | 10 + .../v2/app/webapp/AMWebServices.java | 10 +- .../mapreduce/v2/app/webapp/ConfBlock.java | 7 +- .../mapreduce/v2/app/webapp/dao/ConfInfo.java | 10 +- .../hadoop/mapreduce/v2/app/MockJobs.java | 12 +- .../v2/app/TestRuntimeEstimators.java | 5 + .../mapreduce/v2/hs/CachedHistoryStorage.java | 39 +- .../hadoop/mapreduce/v2/hs/CompletedJob.java | 21 +- .../mapreduce/v2/hs/HistoryFileManager.java | 655 ++++++++++-------- .../mapreduce/v2/hs/HistoryStorage.java | 13 +- .../hadoop/mapreduce/v2/hs/JobHistory.java | 149 +--- .../hadoop/mapreduce/v2/hs/PartialJob.java | 6 + .../mapreduce/v2/hs/webapp/HsWebServices.java | 4 +- .../v2/hs/TestJobHistoryEntities.java | 15 +- .../v2/hs/TestJobHistoryParsing.java | 209 +++--- 17 files changed, 630 insertions(+), 547 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 2c85b38b065..91ffd6c1c29 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -268,6 +268,9 @@ Release 0.23.3 - UNRELEASED MAPREDUCE-4134. Remove references of mapred.child.ulimit etc. since they are not being used any more (Ravi Prakash via bobby) + MAPREDUCE-3972. Fix locking and exception issues in JobHistory server. + (Robert Joseph Evans via sseth) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java index d30bb737ed9..5fd47158eb3 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java @@ -18,9 +18,11 @@ package org.apache.hadoop.mapreduce.v2.app.job; +import java.io.IOException; import java.util.List; import java.util.Map; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.JobACL; @@ -71,6 +73,13 @@ public interface Job { */ Path getConfFile(); + /** + * @return a parsed version of the config files pointed to by + * {@link #getConfFile()}. + * @throws IOException on any error trying to load the conf file. + */ + Configuration loadConfFile() throws IOException; + /** * @return the ACLs for this job for each type of JobACL given. */ diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java index f2f7a6c848c..6bc78fbfb9b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java @@ -37,6 +37,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.Text; @@ -1472,4 +1473,13 @@ public void transition(JobImpl job, JobEvent event) { job.finished(JobState.ERROR); } } + + @Override + public Configuration loadConfFile() throws IOException { + Path confPath = getConfFile(); + FileContext fc = FileContext.getFileContext(confPath.toUri(), conf); + Configuration jobConf = new Configuration(false); + jobConf.addResource(fc.open(confPath)); + return jobConf; + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java index f62cba0ccec..bacfc53bb95 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java @@ -31,7 +31,6 @@ import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response.Status; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.JobACL; import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; import org.apache.hadoop.mapreduce.v2.api.records.JobId; @@ -68,14 +67,11 @@ public class AMWebServices { private final AppContext appCtx; private final App app; - private final Configuration conf; - + @Inject - public AMWebServices(final App app, final AppContext context, - final Configuration conf) { + public AMWebServices(final App app, final AppContext context) { this.appCtx = context; this.app = app; - this.conf = conf; } Boolean hasAccess(Job job, HttpServletRequest request) { @@ -272,7 +268,7 @@ public ConfInfo getJobConf(@Context HttpServletRequest hsr, checkAccess(job, hsr); ConfInfo info; try { - info = new ConfInfo(job, this.conf); + info = new ConfInfo(job); } catch (IOException e) { throw new NotFoundException("unable to load configuration for job: " + jid); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java index 303c8a3b00a..36f202e66bf 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java @@ -23,7 +23,6 @@ import java.io.IOException; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.app.AppContext; @@ -44,11 +43,9 @@ */ public class ConfBlock extends HtmlBlock { final AppContext appContext; - final Configuration conf; - @Inject ConfBlock(AppContext appctx, Configuration conf) { + @Inject ConfBlock(AppContext appctx) { appContext = appctx; - this.conf = conf; } /* @@ -71,7 +68,7 @@ public class ConfBlock extends HtmlBlock { } Path confPath = job.getConfFile(); try { - ConfInfo info = new ConfInfo(job, this.conf); + ConfInfo info = new ConfInfo(job); html.div().h3(confPath.toString())._(); TBODY> tbody = html. diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/ConfInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/ConfInfo.java index d7f88bde60d..fa971a749e4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/ConfInfo.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/ConfInfo.java @@ -40,15 +40,11 @@ public class ConfInfo { public ConfInfo() { } - public ConfInfo(Job job, Configuration conf) throws IOException { + public ConfInfo(Job job) throws IOException { - Path confPath = job.getConfFile(); this.property = new ArrayList(); - // Read in the configuration file and put it in a key/value table. - FileContext fc = FileContext.getFileContext(confPath.toUri(), conf); - Configuration jobConf = new Configuration(false); - jobConf.addResource(fc.open(confPath)); - this.path = confPath.toString(); + Configuration jobConf = job.loadConfFile(); + this.path = job.getConfFile().toString(); for (Map.Entry entry : jobConf) { this.property.add(new ConfEntryInfo(entry.getKey(), entry.getValue())); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java index 0ffe9acdb91..dd574080142 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java @@ -18,6 +18,7 @@ package org.apache.hadoop.mapreduce.v2.app; +import java.io.IOException; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -27,6 +28,7 @@ import java.util.Map; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapred.JobACLsManager; import org.apache.hadoop.mapred.ShuffleHandler; @@ -442,7 +444,7 @@ public static Job newJob(ApplicationId appID, int i, int n, int m, Path confFile final Path configFile = confFile; Map tmpJobACLs = new HashMap(); - Configuration conf = new Configuration(); + final Configuration conf = new Configuration(); conf.set(JobACL.VIEW_JOB.getAclName(), "testuser"); conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true); @@ -564,6 +566,14 @@ public List getAMInfos() { amInfoList.add(createAMInfo(2)); return amInfoList; } + + @Override + public Configuration loadConfFile() throws IOException { + FileContext fc = FileContext.getFileContext(configFile.toUri(), conf); + Configuration jobConf = new Configuration(false); + jobConf.addResource(fc.open(configFile)); + return jobConf; + } }; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java index a629625a4b0..a5dae84062e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java @@ -489,6 +489,11 @@ public Map getJobACLs() { public List getAMInfos() { throw new UnsupportedOperationException("Not supported yet."); } + + @Override + public Configuration loadConfFile() { + throw new UnsupportedOperationException(); + } } /* diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CachedHistoryStorage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CachedHistoryStorage.java index 5a4da68e6fd..eb4e78499fe 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CachedHistoryStorage.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CachedHistoryStorage.java @@ -34,7 +34,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.JobState; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobsInfo; -import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.MetaInfo; +import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo; import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo; import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.apache.hadoop.yarn.YarnException; @@ -82,32 +82,41 @@ public CachedHistoryStorage() { super(CachedHistoryStorage.class.getName()); } - private Job loadJob(MetaInfo metaInfo) { + private Job loadJob(HistoryFileInfo fileInfo) { try { - Job job = hsManager.loadJob(metaInfo); + Job job = fileInfo.loadJob(); if (LOG.isDebugEnabled()) { LOG.debug("Adding " + job.getID() + " to loaded job cache"); } + // We can clobber results here, but that should be OK, because it only + // means that we may have two identical copies of the same job floating + // around for a while. loadedJobCache.put(job.getID(), job); return job; } catch (IOException e) { throw new YarnException( - "Could not find/load job: " + metaInfo.getJobId(), e); + "Could not find/load job: " + fileInfo.getJobId(), e); } } @Override - public synchronized Job getFullJob(JobId jobId) { + public Job getFullJob(JobId jobId) { if (LOG.isDebugEnabled()) { LOG.debug("Looking for Job " + jobId); } try { - Job result = loadedJobCache.get(jobId); - if (result == null) { - MetaInfo metaInfo = hsManager.getMetaInfo(jobId); - if (metaInfo != null) { - result = loadJob(metaInfo); + HistoryFileInfo fileInfo = hsManager.getFileInfo(jobId); + Job result = null; + if (fileInfo != null) { + result = loadedJobCache.get(jobId); + if (result == null) { + result = loadJob(fileInfo); + } else if(fileInfo.isDeleted()) { + loadedJobCache.remove(jobId); + result = null; } + } else { + loadedJobCache.remove(jobId); } return result; } catch (IOException e) { @@ -120,24 +129,19 @@ public Map getAllPartialJobs() { LOG.debug("Called getAllPartialJobs()"); SortedMap result = new TreeMap(); try { - for (MetaInfo mi : hsManager.getAllMetaInfo()) { + for (HistoryFileInfo mi : hsManager.getAllFileInfo()) { if (mi != null) { JobId id = mi.getJobId(); result.put(id, new PartialJob(mi.getJobIndexInfo(), id)); } } } catch (IOException e) { - LOG.warn("Error trying to scan for all MetaInfos", e); + LOG.warn("Error trying to scan for all FileInfos", e); throw new YarnException(e); } return result; } - @Override - public void jobRemovedFromHDFS(JobId jobId) { - loadedJobCache.remove(jobId); - } - @Override public JobsInfo getPartialJobs(Long offset, Long count, String user, String queue, Long sBegin, Long sEnd, Long fBegin, Long fEnd, @@ -173,6 +177,7 @@ public static JobsInfo getPartialJobs(Collection jobs, Long offset, if (end < 0) { // due to overflow end = Long.MAX_VALUE; } + for (Job job : jobs) { if (at > end) { break; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java index 11d19d5c52d..1801a1ed87a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java @@ -53,6 +53,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskType; import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; +import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo; import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils; import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; import org.apache.hadoop.security.UserGroupInformation; @@ -71,7 +72,7 @@ public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job private final Configuration conf; private final JobId jobId; //Can be picked from JobInfo with a conversion. private final String user; //Can be picked up from JobInfo - private final Path confFile; + private final HistoryFileInfo info; private JobInfo jobInfo; private JobReport report; AtomicBoolean tasksLoaded = new AtomicBoolean(false); @@ -84,13 +85,14 @@ public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job public CompletedJob(Configuration conf, JobId jobId, Path historyFile, - boolean loadTasks, String userName, Path confFile, JobACLsManager aclsMgr) + boolean loadTasks, String userName, HistoryFileInfo info, + JobACLsManager aclsMgr) throws IOException { LOG.info("Loading job: " + jobId + " from file: " + historyFile); this.conf = conf; this.jobId = jobId; this.user = userName; - this.confFile = confFile; + this.info = info; this.aclsMgr = aclsMgr; loadFullHistoryData(loadTasks, historyFile); } @@ -134,7 +136,7 @@ private void constructJobReport() { report.setUser(jobInfo.getUsername()); report.setMapProgress((float) getCompletedMaps() / getTotalMaps()); report.setReduceProgress((float) getCompletedReduces() / getTotalReduces()); - report.setJobFile(confFile.toString()); + report.setJobFile(getConfFile().toString()); String historyUrl = "N/A"; try { historyUrl = JobHistoryUtils.getHistoryUrl(conf, jobId.getAppId()); @@ -392,7 +394,16 @@ public String getUserName() { */ @Override public Path getConfFile() { - return confFile; + return info.getConfFile(); + } + + /* + * (non-Javadoc) + * @see org.apache.hadoop.mapreduce.v2.app.job.Job#loadConfFile() + */ + @Override + public Configuration loadConfFile() throws IOException { + return info.loadConfFile(); } @Override diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java index 07b078f50aa..6447109916b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java @@ -25,12 +25,17 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -57,6 +62,8 @@ import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.service.AbstractService; +import com.google.common.util.concurrent.ThreadFactoryBuilder; + /** * This class provides a way to interact with history files in a thread safe * manor. @@ -67,33 +74,251 @@ public class HistoryFileManager extends AbstractService { private static final Log LOG = LogFactory.getLog(HistoryFileManager.class); private static final Log SUMMARY_LOG = LogFactory.getLog(JobSummary.class); + private static enum HistoryInfoState { + IN_INTERMEDIATE, IN_DONE, DELETED, MOVE_FAILED + }; + private static String DONE_BEFORE_SERIAL_TAIL = JobHistoryUtils .doneSubdirsBeforeSerialTail(); - public static class MetaInfo { + /** + * Maps between a serial number (generated based on jobId) and the timestamp + * component(s) to which it belongs. Facilitates jobId based searches. If a + * jobId is not found in this list - it will not be found. + */ + private static class SerialNumberIndex { + private SortedMap> cache; + private int maxSize; + + public SerialNumberIndex(int maxSize) { + this.cache = new TreeMap>(); + this.maxSize = maxSize; + } + + public synchronized void add(String serialPart, String timestampPart) { + if (!cache.containsKey(serialPart)) { + cache.put(serialPart, new HashSet()); + if (cache.size() > maxSize) { + String key = cache.firstKey(); + LOG.error("Dropping " + key + + " from the SerialNumberIndex. We will no " + + "longer be able to see jobs that are in that serial index for " + + cache.get(key)); + cache.remove(key); + } + } + Set datePartSet = cache.get(serialPart); + datePartSet.add(timestampPart); + } + + public synchronized void remove(String serialPart, String timeStampPart) { + if (cache.containsKey(serialPart)) { + Set set = cache.get(serialPart); + set.remove(timeStampPart); + if (set.isEmpty()) { + cache.remove(serialPart); + } + } + } + + public synchronized Set get(String serialPart) { + Set found = cache.get(serialPart); + if (found != null) { + return new HashSet(found); + } + return null; + } + } + + private static class JobListCache { + private ConcurrentSkipListMap cache; + private int maxSize; + private long maxAge; + + public JobListCache(int maxSize, long maxAge) { + this.maxSize = maxSize; + this.maxAge = maxAge; + this.cache = new ConcurrentSkipListMap(); + } + + public HistoryFileInfo addIfAbsent(HistoryFileInfo fileInfo) { + JobId jobId = fileInfo.getJobIndexInfo().getJobId(); + if (LOG.isDebugEnabled()) { + LOG.debug("Adding " + jobId + " to job list cache with " + + fileInfo.getJobIndexInfo()); + } + HistoryFileInfo old = cache.putIfAbsent(jobId, fileInfo); + if (cache.size() > maxSize) { + //There is a race here, where more then one thread could be trying to + // remove entries. This could result in too many entries being removed + // from the cache. This is considered OK as the size of the cache + // should be rather large, and we would rather have performance over + // keeping the cache size exactly at the maximum. + Iterator keys = cache.navigableKeySet().iterator(); + long cutoff = System.currentTimeMillis() - maxAge; + while(cache.size() > maxSize && keys.hasNext()) { + JobId key = keys.next(); + HistoryFileInfo firstValue = cache.get(key); + if(firstValue != null) { + synchronized(firstValue) { + if (firstValue.isMovePending()) { + if(firstValue.didMoveFail() && + firstValue.jobIndexInfo.getFinishTime() <= cutoff) { + cache.remove(key); + //Now lets try to delete it + try { + firstValue.delete(); + } catch (IOException e) { + LOG.error("Error while trying to delete history files" + + " that could not be moved to done.", e); + } + } else { + LOG.warn("Waiting to remove " + key + + " from JobListCache because it is not in done yet."); + } + } else { + cache.remove(key); + } + } + } + } + } + return old; + } + + public void delete(HistoryFileInfo fileInfo) { + cache.remove(fileInfo.getJobId()); + } + + public Collection values() { + return new ArrayList(cache.values()); + } + + public HistoryFileInfo get(JobId jobId) { + return cache.get(jobId); + } + } + + public class HistoryFileInfo { private Path historyFile; private Path confFile; private Path summaryFile; private JobIndexInfo jobIndexInfo; + private HistoryInfoState state; - public MetaInfo(Path historyFile, Path confFile, Path summaryFile, - JobIndexInfo jobIndexInfo) { + private HistoryFileInfo(Path historyFile, Path confFile, Path summaryFile, + JobIndexInfo jobIndexInfo, boolean isInDone) { this.historyFile = historyFile; this.confFile = confFile; this.summaryFile = summaryFile; this.jobIndexInfo = jobIndexInfo; + state = isInDone ? HistoryInfoState.IN_DONE + : HistoryInfoState.IN_INTERMEDIATE; } - private Path getHistoryFile() { + private synchronized boolean isMovePending() { + return state == HistoryInfoState.IN_INTERMEDIATE + || state == HistoryInfoState.MOVE_FAILED; + } + + private synchronized boolean didMoveFail() { + return state == HistoryInfoState.MOVE_FAILED; + } + + /** + * @return true if the files backed by this were deleted. + */ + public synchronized boolean isDeleted() { + return state == HistoryInfoState.DELETED; + } + + private synchronized void moveToDone() throws IOException { + if (!isMovePending()) { + // It was either deleted or is already in done. Either way do nothing + return; + } + try { + long completeTime = jobIndexInfo.getFinishTime(); + if (completeTime == 0) { + completeTime = System.currentTimeMillis(); + } + JobId jobId = jobIndexInfo.getJobId(); + + List paths = new ArrayList(2); + if (historyFile == null) { + LOG.info("No file for job-history with " + jobId + " found in cache!"); + } else { + paths.add(historyFile); + } + + if (confFile == null) { + LOG.info("No file for jobConf with " + jobId + " found in cache!"); + } else { + paths.add(confFile); + } + + if (summaryFile == null) { + LOG.info("No summary file for job: " + jobId); + } else { + String jobSummaryString = getJobSummary(intermediateDoneDirFc, + summaryFile); + SUMMARY_LOG.info(jobSummaryString); + LOG.info("Deleting JobSummary file: [" + summaryFile + "]"); + intermediateDoneDirFc.delete(summaryFile, false); + summaryFile = null; + } + + Path targetDir = canonicalHistoryLogPath(jobId, completeTime); + addDirectoryToSerialNumberIndex(targetDir); + makeDoneSubdir(targetDir); + if (historyFile != null) { + Path toPath = doneDirFc.makeQualified(new Path(targetDir, historyFile + .getName())); + if (!toPath.equals(historyFile)) { + moveToDoneNow(historyFile, toPath); + historyFile = toPath; + } + } + if (confFile != null) { + Path toPath = doneDirFc.makeQualified(new Path(targetDir, confFile + .getName())); + if (!toPath.equals(confFile)) { + moveToDoneNow(confFile, toPath); + confFile = toPath; + } + } + state = HistoryInfoState.IN_DONE; + } catch (Throwable t) { + LOG.error("Error while trying to move a job to done", t); + this.state = HistoryInfoState.MOVE_FAILED; + } + } + + /** + * Parse a job from the JobHistoryFile, if the underlying file is not going + * to be deleted. + * + * @return the Job or null if the underlying file was deleted. + * @throws IOException + * if there is an error trying to read the file. + */ + public synchronized Job loadJob() throws IOException { + return new CompletedJob(conf, jobIndexInfo.getJobId(), historyFile, + false, jobIndexInfo.getUser(), this, aclsMgr); + } + + /** + * Return the history file. This should only be used for testing. + * @return the history file. + */ + synchronized Path getHistoryFile() { return historyFile; } - - private Path getConfFile() { - return confFile; - } - - private Path getSummaryFile() { - return summaryFile; + + private synchronized void delete() throws IOException { + state = HistoryInfoState.DELETED; + doneDirFc.delete(doneDirFc.makeQualified(historyFile), false); + doneDirFc.delete(doneDirFc.makeQualified(confFile), false); } public JobIndexInfo getJobIndexInfo() { @@ -104,57 +329,35 @@ public JobId getJobId() { return jobIndexInfo.getJobId(); } - private void setHistoryFile(Path historyFile) { - this.historyFile = historyFile; + public synchronized Path getConfFile() { + return confFile; } - - private void setConfFile(Path confFile) { - this.confFile = confFile; - } - - private void setSummaryFile(Path summaryFile) { - this.summaryFile = summaryFile; + + public synchronized Configuration loadConfFile() throws IOException { + FileContext fc = FileContext.getFileContext(confFile.toUri(), conf); + Configuration jobConf = new Configuration(false); + jobConf.addResource(fc.open(confFile)); + return jobConf; } } - /** - * Maps between a serial number (generated based on jobId) and the timestamp - * component(s) to which it belongs. Facilitates jobId based searches. If a - * jobId is not found in this list - it will not be found. - */ - private final SortedMap> idToDateString = - new TreeMap>(); - // The number of entries in idToDateString - private int dateStringCacheSize; - - // Maintains minimal details for recent jobs (parsed from history file name). - // Sorted on Job Completion Time. - private final SortedMap jobListCache = - new ConcurrentSkipListMap(); - // The number of jobs to maintain in the job list cache. - private int jobListCacheSize; - - // Re-use existing MetaInfo objects if they exist for the specific JobId. - // (synchronization on MetaInfo) - // Check for existence of the object when using iterators. - private final SortedMap intermediateListCache = - new ConcurrentSkipListMap(); + private SerialNumberIndex serialNumberIndex = null; + private JobListCache jobListCache = null; // Maintains a list of known done subdirectories. - private final Set existingDoneSubdirs = new HashSet(); + private final Set existingDoneSubdirs = Collections + .synchronizedSet(new HashSet()); /** * Maintains a mapping between intermediate user directories and the last * known modification time. */ - private Map userDirModificationTimeMap = - new HashMap(); + private Map userDirModificationTimeMap = new HashMap(); private JobACLsManager aclsMgr; private Configuration conf; - // TODO Remove me!!!! private boolean debugMode; private String serialNumberFormat; @@ -165,6 +368,9 @@ private void setSummaryFile(Path summaryFile) { private FileContext intermediateDoneDirFc; // Intermediate Done Dir // FileContext + private ThreadPoolExecutor moveToDoneExecutor = null; + private long maxHistoryAge = 0; + public HistoryFileManager() { super(HistoryFileManager.class.getName()); } @@ -211,12 +417,25 @@ public void init(Configuration conf) { this.aclsMgr = new JobACLsManager(conf); - jobListCacheSize = conf.getInt(JHAdminConfig.MR_HISTORY_JOBLIST_CACHE_SIZE, - JHAdminConfig.DEFAULT_MR_HISTORY_JOBLIST_CACHE_SIZE); + maxHistoryAge = conf.getLong(JHAdminConfig.MR_HISTORY_MAX_AGE_MS, + JHAdminConfig.DEFAULT_MR_HISTORY_MAX_AGE); + + jobListCache = new JobListCache(conf.getInt( + JHAdminConfig.MR_HISTORY_JOBLIST_CACHE_SIZE, + JHAdminConfig.DEFAULT_MR_HISTORY_JOBLIST_CACHE_SIZE), + maxHistoryAge); - dateStringCacheSize = conf.getInt( + serialNumberIndex = new SerialNumberIndex(conf.getInt( JHAdminConfig.MR_HISTORY_DATESTRING_CACHE_SIZE, - JHAdminConfig.DEFAULT_MR_HISTORY_DATESTRING_CACHE_SIZE); + JHAdminConfig.DEFAULT_MR_HISTORY_DATESTRING_CACHE_SIZE)); + + int numMoveThreads = conf.getInt( + JHAdminConfig.MR_HISTORY_MOVE_THREAD_COUNT, + JHAdminConfig.DEFAULT_MR_HISTORY_MOVE_THREAD_COUNT); + ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat( + "MoveIntermediateToDone Thread #%d").build(); + moveToDoneExecutor = new ThreadPoolExecutor(numMoveThreads, numMoveThreads, + 1, TimeUnit.HOURS, new LinkedBlockingQueue(), tf); super.init(conf); } @@ -249,6 +468,7 @@ private void mkdir(FileContext fc, Path path, FsPermission fsp) void initExisting() throws IOException { LOG.info("Initializing Existing Jobs..."); List timestampedDirList = findTimestampedDirectories(); + // Sort first just so insertion is in a consistent order Collections.sort(timestampedDirList); for (FileStatus fs : timestampedDirList) { // TODO Could verify the correct format for these directories. @@ -271,16 +491,7 @@ private void removeDirectoryFromSerialNumberIndex(Path serialDirPath) { + serialDirPath.toString() + ". Continuing with next"); return; } - synchronized (idToDateString) { - // TODO make this thread safe without the synchronize - if (idToDateString.containsKey(serialPart)) { - Set set = idToDateString.get(serialPart); - set.remove(timeStampPart); - if (set.isEmpty()) { - idToDateString.remove(serialPart); - } - } - } + serialNumberIndex.remove(serialPart, timeStampPart); } private void addDirectoryToSerialNumberIndex(Path serialDirPath) { @@ -299,21 +510,7 @@ private void addDirectoryToSerialNumberIndex(Path serialDirPath) { LOG.warn("Could not find serial portion from path: " + serialDirPath.toString() + ". Continuing with next"); } - addToSerialNumberIndex(serialPart, timestampPart); - } - - private void addToSerialNumberIndex(String serialPart, String timestampPart) { - synchronized (idToDateString) { - // TODO make this thread safe without the synchronize - if (!idToDateString.containsKey(serialPart)) { - idToDateString.put(serialPart, new HashSet()); - if (idToDateString.size() > dateStringCacheSize) { - idToDateString.remove(idToDateString.firstKey()); - } - Set datePartSet = idToDateString.get(serialPart); - datePartSet.add(timestampPart); - } - } + serialNumberIndex.add(serialPart, timestampPart); } private void addDirectoryToJobListCache(Path path) throws IOException { @@ -332,10 +529,10 @@ private void addDirectoryToJobListCache(Path path) throws IOException { .getIntermediateConfFileName(jobIndexInfo.getJobId()); String summaryFileName = JobHistoryUtils .getIntermediateSummaryFileName(jobIndexInfo.getJobId()); - MetaInfo metaInfo = new MetaInfo(fs.getPath(), new Path(fs.getPath() - .getParent(), confFileName), new Path(fs.getPath().getParent(), - summaryFileName), jobIndexInfo); - addToJobListCache(metaInfo); + HistoryFileInfo fileInfo = new HistoryFileInfo(fs.getPath(), new Path(fs + .getPath().getParent(), confFileName), new Path(fs.getPath() + .getParent(), summaryFileName), jobIndexInfo, true); + jobListCache.addIfAbsent(fileInfo); } } @@ -371,25 +568,18 @@ private List findTimestampedDirectories() throws IOException { return fsList; } - private void addToJobListCache(MetaInfo metaInfo) { - JobId jobId = metaInfo.getJobIndexInfo().getJobId(); - if (LOG.isDebugEnabled()) { - LOG.debug("Adding " + jobId + " to job list cache with " - + metaInfo.getJobIndexInfo()); - } - jobListCache.put(jobId, metaInfo); - if (jobListCache.size() > jobListCacheSize) { - jobListCache.remove(jobListCache.firstKey()); - } - } - /** * Scans the intermediate directory to find user directories. Scans these for - * history files if the modification time for the directory has changed. + * history files if the modification time for the directory has changed. Once + * it finds history files it starts the process of moving them to the done + * directory. * * @throws IOException + * if there was a error while scanning */ - private void scanIntermediateDirectory() throws IOException { + void scanIntermediateDirectory() throws IOException { + // TODO it would be great to limit how often this happens, except in the + // case where we are looking for a particular job. List userDirList = JobHistoryUtils.localGlobber( intermediateDoneDirFc, intermediateDoneDirPath, ""); @@ -405,7 +595,12 @@ private void scanIntermediateDirectory() throws IOException { } } if (shouldScan) { - scanIntermediateDirectory(userDir.getPath()); + try { + scanIntermediateDirectory(userDir.getPath()); + } catch (IOException e) { + LOG.error("Error while trying to scan the directory " + + userDir.getPath(), e); + } } } } @@ -426,11 +621,33 @@ private void scanIntermediateDirectory(final Path absPath) throws IOException { .getIntermediateConfFileName(jobIndexInfo.getJobId()); String summaryFileName = JobHistoryUtils .getIntermediateSummaryFileName(jobIndexInfo.getJobId()); - MetaInfo metaInfo = new MetaInfo(fs.getPath(), new Path(fs.getPath() - .getParent(), confFileName), new Path(fs.getPath().getParent(), - summaryFileName), jobIndexInfo); - if (!intermediateListCache.containsKey(jobIndexInfo.getJobId())) { - intermediateListCache.put(jobIndexInfo.getJobId(), metaInfo); + HistoryFileInfo fileInfo = new HistoryFileInfo(fs.getPath(), new Path(fs + .getPath().getParent(), confFileName), new Path(fs.getPath() + .getParent(), summaryFileName), jobIndexInfo, false); + + final HistoryFileInfo old = jobListCache.addIfAbsent(fileInfo); + if (old == null || old.didMoveFail()) { + final HistoryFileInfo found = (old == null) ? fileInfo : old; + long cutoff = System.currentTimeMillis() - maxHistoryAge; + if(found.getJobIndexInfo().getFinishTime() <= cutoff) { + try { + found.delete(); + } catch (IOException e) { + LOG.warn("Error cleaning up a HistoryFile that is out of date.", e); + } + } else { + moveToDoneExecutor.execute(new Runnable() { + @Override + public void run() { + try { + found.moveToDone(); + } catch (IOException e) { + LOG.info("Failed to process fileInfo for job: " + + found.getJobId(), e); + } + } + }); + } } } } @@ -442,11 +659,11 @@ private void scanIntermediateDirectory(final Path absPath) throws IOException { * fileStatus list of Job History Files. * @param jobId * The JobId to find. - * @return A MetaInfo object for the jobId, null if not found. + * @return A FileInfo object for the jobId, null if not found. * @throws IOException */ - private MetaInfo getJobMetaInfo(List fileStatusList, JobId jobId) - throws IOException { + private HistoryFileInfo getJobFileInfo(List fileStatusList, + JobId jobId) throws IOException { for (FileStatus fs : fileStatusList) { JobIndexInfo jobIndexInfo = FileNameIndexUtils.getIndexInfo(fs.getPath() .getName()); @@ -455,10 +672,10 @@ private MetaInfo getJobMetaInfo(List fileStatusList, JobId jobId) .getIntermediateConfFileName(jobIndexInfo.getJobId()); String summaryFileName = JobHistoryUtils .getIntermediateSummaryFileName(jobIndexInfo.getJobId()); - MetaInfo metaInfo = new MetaInfo(fs.getPath(), new Path(fs.getPath() - .getParent(), confFileName), new Path(fs.getPath().getParent(), - summaryFileName), jobIndexInfo); - return metaInfo; + HistoryFileInfo fileInfo = new HistoryFileInfo(fs.getPath(), new Path( + fs.getPath().getParent(), confFileName), new Path(fs.getPath() + .getParent(), summaryFileName), jobIndexInfo, true); + return fileInfo; } } return null; @@ -474,175 +691,51 @@ private MetaInfo getJobMetaInfo(List fileStatusList, JobId jobId) * @return * @throws IOException */ - private MetaInfo scanOldDirsForJob(JobId jobId) throws IOException { + private HistoryFileInfo scanOldDirsForJob(JobId jobId) throws IOException { int jobSerialNumber = JobHistoryUtils.jobSerialNumber(jobId); String boxedSerialNumber = String.valueOf(jobSerialNumber); - Set dateStringSet; - synchronized (idToDateString) { - Set found = idToDateString.get(boxedSerialNumber); - if (found == null) { - return null; - } else { - dateStringSet = new HashSet(found); - } + Set dateStringSet = serialNumberIndex.get(boxedSerialNumber); + if (dateStringSet == null) { + return null; } for (String timestampPart : dateStringSet) { Path logDir = canonicalHistoryLogPath(jobId, timestampPart); List fileStatusList = scanDirectoryForHistoryFiles(logDir, doneDirFc); - MetaInfo metaInfo = getJobMetaInfo(fileStatusList, jobId); - if (metaInfo != null) { - return metaInfo; + HistoryFileInfo fileInfo = getJobFileInfo(fileStatusList, jobId); + if (fileInfo != null) { + return fileInfo; } } return null; } - /** - * Checks for the existence of the job history file in the intermediate - * directory. - * - * @param jobId - * @return - * @throws IOException - */ - private MetaInfo scanIntermediateForJob(JobId jobId) throws IOException { + public Collection getAllFileInfo() throws IOException { scanIntermediateDirectory(); - return intermediateListCache.get(jobId); + return jobListCache.values(); } - /** - * Parse a job from the JobHistoryFile, if the underlying file is not going to - * be deleted. - * - * @param metaInfo - * the where the JobHistory is stored. - * @return the Job or null if the underlying file was deleted. - * @throws IOException - * if there is an error trying to read the file. - */ - public Job loadJob(MetaInfo metaInfo) throws IOException { - return new CompletedJob(conf, metaInfo.getJobIndexInfo().getJobId(), - metaInfo.getHistoryFile(), false, metaInfo.getJobIndexInfo().getUser(), - metaInfo.getConfFile(), aclsMgr); - } - - public Collection getAllMetaInfo() throws IOException { - scanIntermediateDirectory(); - ArrayList result = new ArrayList(); - result.addAll(intermediateListCache.values()); - result.addAll(jobListCache.values()); - return result; - } - - Collection getIntermediateMetaInfos() throws IOException { - scanIntermediateDirectory(); - return intermediateListCache.values(); - } - - public MetaInfo getMetaInfo(JobId jobId) throws IOException { - // MetaInfo available in cache. - MetaInfo metaInfo = null; - if (jobListCache.containsKey(jobId)) { - metaInfo = jobListCache.get(jobId); + public HistoryFileInfo getFileInfo(JobId jobId) throws IOException { + // FileInfo available in cache. + HistoryFileInfo fileInfo = jobListCache.get(jobId); + if (fileInfo != null) { + return fileInfo; } - - if (metaInfo != null) { - return metaInfo; - } - - // MetaInfo not available. Check intermediate directory for meta info. - metaInfo = scanIntermediateForJob(jobId); - if (metaInfo != null) { - return metaInfo; + // OK so scan the intermediate to be sure we did not lose it that way + scanIntermediateDirectory(); + fileInfo = jobListCache.get(jobId); + if (fileInfo != null) { + return fileInfo; } // Intermediate directory does not contain job. Search through older ones. - metaInfo = scanOldDirsForJob(jobId); - if (metaInfo != null) { - return metaInfo; + fileInfo = scanOldDirsForJob(jobId); + if (fileInfo != null) { + return fileInfo; } return null; } - void moveToDone(MetaInfo metaInfo) throws IOException { - long completeTime = metaInfo.getJobIndexInfo().getFinishTime(); - if (completeTime == 0) - completeTime = System.currentTimeMillis(); - JobId jobId = metaInfo.getJobIndexInfo().getJobId(); - - List paths = new ArrayList(); - Path historyFile = metaInfo.getHistoryFile(); - if (historyFile == null) { - LOG.info("No file for job-history with " + jobId + " found in cache!"); - } else { - paths.add(historyFile); - } - - Path confFile = metaInfo.getConfFile(); - if (confFile == null) { - LOG.info("No file for jobConf with " + jobId + " found in cache!"); - } else { - paths.add(confFile); - } - - // TODO Check all mi getters and setters for the conf path - Path summaryFile = metaInfo.getSummaryFile(); - if (summaryFile == null) { - LOG.info("No summary file for job: " + jobId); - } else { - try { - String jobSummaryString = getJobSummary(intermediateDoneDirFc, - summaryFile); - SUMMARY_LOG.info(jobSummaryString); - LOG.info("Deleting JobSummary file: [" + summaryFile + "]"); - intermediateDoneDirFc.delete(summaryFile, false); - metaInfo.setSummaryFile(null); - } catch (IOException e) { - LOG.warn("Failed to process summary file: [" + summaryFile + "]"); - throw e; - } - } - - Path targetDir = canonicalHistoryLogPath(jobId, completeTime); - addDirectoryToSerialNumberIndex(targetDir); - try { - makeDoneSubdir(targetDir); - } catch (IOException e) { - LOG.warn("Failed creating subdirectory: " + targetDir - + " while attempting to move files for jobId: " + jobId); - throw e; - } - synchronized (metaInfo) { - if (historyFile != null) { - Path toPath = doneDirFc.makeQualified(new Path(targetDir, historyFile - .getName())); - try { - moveToDoneNow(historyFile, toPath); - } catch (IOException e) { - LOG.warn("Failed to move file: " + historyFile + " for jobId: " - + jobId); - throw e; - } - metaInfo.setHistoryFile(toPath); - } - if (confFile != null) { - Path toPath = doneDirFc.makeQualified(new Path(targetDir, confFile - .getName())); - try { - moveToDoneNow(confFile, toPath); - } catch (IOException e) { - LOG.warn("Failed to move file: " + historyFile + " for jobId: " - + jobId); - throw e; - } - metaInfo.setConfFile(toPath); - } - } - addToJobListCache(metaInfo); - intermediateListCache.remove(jobId); - } - private void moveToDoneNow(final Path src, final Path target) throws IOException { LOG.info("Moving " + src.toString() + " to " + target.toString()); @@ -658,20 +751,9 @@ private String getJobSummary(FileContext fc, Path path) throws IOException { } private void makeDoneSubdir(Path path) throws IOException { - boolean existsInExistingCache = false; - synchronized (existingDoneSubdirs) { - if (existingDoneSubdirs.contains(path)) - existsInExistingCache = true; - } try { doneDirFc.getFileStatus(path); - if (!existsInExistingCache) { - existingDoneSubdirs.add(path); - if (LOG.isDebugEnabled()) { - LOG.debug("JobHistory.maybeMakeSubdirectory -- We believed " + path - + " already existed, but it didn't."); - } - } + existingDoneSubdirs.add(path); } catch (FileNotFoundException fnfE) { try { FsPermission fsp = new FsPermission( @@ -685,11 +767,8 @@ private void makeDoneSubdir(Path path) throws IOException { + ", " + fsp); doneDirFc.setPermission(path, fsp); } - synchronized (existingDoneSubdirs) { - existingDoneSubdirs.add(path); - } - } catch (FileAlreadyExistsException faeE) { - // Nothing to do. + existingDoneSubdirs.add(path); + } catch (FileAlreadyExistsException faeE) { // Nothing to do. } } } @@ -713,16 +792,22 @@ private long getEffectiveTimestamp(long finishTime, FileStatus fileStatus) { return finishTime; } - private void deleteJobFromDone(MetaInfo metaInfo) throws IOException { - jobListCache.remove(metaInfo.getJobId()); - doneDirFc.delete(doneDirFc.makeQualified(metaInfo.getHistoryFile()), false); - doneDirFc.delete(doneDirFc.makeQualified(metaInfo.getConfFile()), false); + private void deleteJobFromDone(HistoryFileInfo fileInfo) throws IOException { + jobListCache.delete(fileInfo); + fileInfo.delete(); } + /** + * Clean up older history files. + * + * @throws IOException + * on any error trying to remove the entries. + */ @SuppressWarnings("unchecked") - void clean(long cutoff, HistoryStorage storage) throws IOException { + void clean() throws IOException { // TODO this should be replaced by something that knows about the directory // structure and will put less of a load on HDFS. + long cutoff = System.currentTimeMillis() - maxHistoryAge; boolean halted = false; // TODO Delete YYYY/MM/DD directories. List serialDirList = findTimestampedDirectories(); @@ -737,13 +822,17 @@ void clean(long cutoff, HistoryStorage storage) throws IOException { long effectiveTimestamp = getEffectiveTimestamp( jobIndexInfo.getFinishTime(), historyFile); if (effectiveTimestamp <= cutoff) { - String confFileName = JobHistoryUtils - .getIntermediateConfFileName(jobIndexInfo.getJobId()); - MetaInfo metaInfo = new MetaInfo(historyFile.getPath(), new Path( - historyFile.getPath().getParent(), confFileName), null, - jobIndexInfo); - storage.jobRemovedFromHDFS(metaInfo.getJobId()); - deleteJobFromDone(metaInfo); + HistoryFileInfo fileInfo = this.jobListCache.get(jobIndexInfo + .getJobId()); + if (fileInfo == null) { + String confFileName = JobHistoryUtils + .getIntermediateConfFileName(jobIndexInfo.getJobId()); + + fileInfo = new HistoryFileInfo(historyFile.getPath(), new Path( + historyFile.getPath().getParent(), confFileName), null, + jobIndexInfo, true); + } + deleteJobFromDone(fileInfo); } else { halted = true; break; @@ -752,9 +841,7 @@ void clean(long cutoff, HistoryStorage storage) throws IOException { if (!halted) { doneDirFc.delete(doneDirFc.makeQualified(serialDir.getPath()), true); removeDirectoryFromSerialNumberIndex(serialDir.getPath()); - synchronized (existingDoneSubdirs) { - existingDoneSubdirs.remove(serialDir.getPath()); - } + existingDoneSubdirs.remove(serialDir.getPath()); } else { break; // Don't scan any more directories. } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryStorage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryStorage.java index bbdf9feabc6..df528df3e05 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryStorage.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryStorage.java @@ -28,7 +28,12 @@ import org.apache.hadoop.classification.InterfaceStability; /** - * Provides an API to query jobs that have finished. + * Provides an API to query jobs that have finished. + * + * For those implementing this API be aware that there is no feedback when + * files are removed from HDFS. You may rely on HistoryFileManager to help + * you know when that has happened if you have not made a complete backup of + * the data stored on HDFS. */ @InterfaceAudience.Public @InterfaceStability.Unstable @@ -71,10 +76,4 @@ JobsInfo getPartialJobs(Long offset, Long count, String user, * @return the job, or null if it is not found. */ Job getFullJob(JobId jobId); - - /** - * Informs the Storage that a job has been removed from HDFS - * @param jobId the ID of the job that was removed. - */ - void jobRemovedFromHDFS(JobId jobId); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java index 54ffec6924d..2a8affb924c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java @@ -21,10 +21,7 @@ import java.io.IOException; import java.util.HashMap; import java.util.Map; -import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ScheduledThreadPoolExecutor; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.regex.Pattern; @@ -37,7 +34,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.JobState; import org.apache.hadoop.mapreduce.v2.app.job.Job; -import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.MetaInfo; +import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo; import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobsInfo; import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.apache.hadoop.util.ReflectionUtils; @@ -66,15 +63,9 @@ public class JobHistory extends AbstractService implements HistoryContext { // Time interval for the move thread. private long moveThreadInterval; - // Number of move threads. - private int numMoveThreads; - private Configuration conf; - private Thread moveIntermediateToDoneThread = null; - private MoveIntermediateToDoneRunnable moveIntermediateToDoneRunnable = null; - - private ScheduledThreadPoolExecutor cleanerScheduledExecutor = null; + private ScheduledThreadPoolExecutor scheduledExecutor = null; private HistoryStorage storage = null; private HistoryFileManager hsManager = null; @@ -91,8 +82,6 @@ public void init(Configuration conf) throws YarnException { moveThreadInterval = conf.getLong( JHAdminConfig.MR_HISTORY_MOVE_INTERVAL_MS, JHAdminConfig.DEFAULT_MR_HISTORY_MOVE_INTERVAL_MS); - numMoveThreads = conf.getInt(JHAdminConfig.MR_HISTORY_MOVE_THREAD_COUNT, - JHAdminConfig.DEFAULT_MR_HISTORY_MOVE_THREAD_COUNT); hsManager = new HistoryFileManager(); hsManager.init(conf); @@ -120,27 +109,22 @@ public void start() { ((Service) storage).start(); } - // Start moveIntermediatToDoneThread - moveIntermediateToDoneRunnable = new MoveIntermediateToDoneRunnable( - moveThreadInterval, numMoveThreads); - moveIntermediateToDoneThread = new Thread(moveIntermediateToDoneRunnable); - moveIntermediateToDoneThread.setName("MoveIntermediateToDoneScanner"); - moveIntermediateToDoneThread.start(); + scheduledExecutor = new ScheduledThreadPoolExecutor(2, + new ThreadFactoryBuilder().setNameFormat("Log Scanner/Cleaner #%d") + .build()); + + scheduledExecutor.scheduleAtFixedRate(new MoveIntermediateToDoneRunnable(), + moveThreadInterval, moveThreadInterval, TimeUnit.MILLISECONDS); // Start historyCleaner boolean startCleanerService = conf.getBoolean( JHAdminConfig.MR_HISTORY_CLEANER_ENABLE, true); if (startCleanerService) { - long maxAgeOfHistoryFiles = conf.getLong( - JHAdminConfig.MR_HISTORY_MAX_AGE_MS, - JHAdminConfig.DEFAULT_MR_HISTORY_MAX_AGE); - cleanerScheduledExecutor = new ScheduledThreadPoolExecutor(1, - new ThreadFactoryBuilder().setNameFormat("LogCleaner").build()); long runInterval = conf.getLong( JHAdminConfig.MR_HISTORY_CLEANER_INTERVAL_MS, JHAdminConfig.DEFAULT_MR_HISTORY_CLEANER_INTERVAL_MS); - cleanerScheduledExecutor - .scheduleAtFixedRate(new HistoryCleaner(maxAgeOfHistoryFiles), + scheduledExecutor + .scheduleAtFixedRate(new HistoryCleaner(), 30 * 1000l, runInterval, TimeUnit.MILLISECONDS); } super.start(); @@ -149,24 +133,12 @@ public void start() { @Override public void stop() { LOG.info("Stopping JobHistory"); - if (moveIntermediateToDoneThread != null) { - LOG.info("Stopping move thread"); - moveIntermediateToDoneRunnable.stop(); - moveIntermediateToDoneThread.interrupt(); - try { - LOG.info("Joining on move thread"); - moveIntermediateToDoneThread.join(); - } catch (InterruptedException e) { - LOG.info("Interrupted while stopping move thread"); - } - } - - if (cleanerScheduledExecutor != null) { - LOG.info("Stopping History Cleaner"); - cleanerScheduledExecutor.shutdown(); + if (scheduledExecutor != null) { + LOG.info("Stopping History Cleaner/Move To Done"); + scheduledExecutor.shutdown(); boolean interrupted = false; long currentTime = System.currentTimeMillis(); - while (!cleanerScheduledExecutor.isShutdown() + while (!scheduledExecutor.isShutdown() && System.currentTimeMillis() > currentTime + 1000l && !interrupted) { try { Thread.sleep(20); @@ -174,8 +146,10 @@ public void stop() { interrupted = true; } } - if (!cleanerScheduledExecutor.isShutdown()) { - LOG.warn("HistoryCleanerService shutdown may not have succeeded"); + if (!scheduledExecutor.isShutdown()) { + LOG.warn("HistoryCleanerService/move to done shutdown may not have " + + "succeeded, Forcing a shutdown"); + scheduledExecutor.shutdownNow(); } } if (storage instanceof Service) { @@ -195,68 +169,34 @@ public String getApplicationName() { } private class MoveIntermediateToDoneRunnable implements Runnable { - - private long sleepTime; - private ThreadPoolExecutor moveToDoneExecutor = null; - private boolean running = false; - - public synchronized void stop() { - running = false; - notify(); - } - - MoveIntermediateToDoneRunnable(long sleepTime, int numMoveThreads) { - this.sleepTime = sleepTime; - ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat( - "MoveIntermediateToDone Thread #%d").build(); - moveToDoneExecutor = new ThreadPoolExecutor(1, numMoveThreads, 1, - TimeUnit.HOURS, new LinkedBlockingQueue(), tf); - running = true; - } - @Override public void run() { - Thread.currentThread().setName("IntermediateHistoryScanner"); try { - while (true) { - LOG.info("Starting scan to move intermediate done files"); - for (final MetaInfo metaInfo : hsManager.getIntermediateMetaInfos()) { - moveToDoneExecutor.execute(new Runnable() { - @Override - public void run() { - try { - hsManager.moveToDone(metaInfo); - } catch (IOException e) { - LOG.info( - "Failed to process metaInfo for job: " - + metaInfo.getJobId(), e); - } - } - }); - } - synchronized (this) { - try { - this.wait(sleepTime); - } catch (InterruptedException e) { - LOG.info("IntermediateHistoryScannerThread interrupted"); - } - if (!running) { - break; - } - } - } + LOG.info("Starting scan to move intermediate done files"); + hsManager.scanIntermediateDirectory(); } catch (IOException e) { - LOG.warn("Unable to get a list of intermediate files to be moved"); - // TODO Shut down the entire process!!!! + LOG.error("Error while scanning intermediate done dir ", e); } } } + + private class HistoryCleaner implements Runnable { + public void run() { + LOG.info("History Cleaner started"); + try { + hsManager.clean(); + } catch (IOException e) { + LOG.warn("Error trying to clean up ", e); + } + LOG.info("History Cleaner complete"); + } + } /** * Helper method for test cases. */ - MetaInfo getJobMetaInfo(JobId jobId) throws IOException { - return hsManager.getMetaInfo(jobId); + HistoryFileInfo getJobFileInfo(JobId jobId) throws IOException { + return hsManager.getFileInfo(jobId); } @Override @@ -313,25 +253,6 @@ public JobsInfo getPartialJobs(Long offset, Long count, String user, fBegin, fEnd, jobState); } - public class HistoryCleaner implements Runnable { - long maxAgeMillis; - - public HistoryCleaner(long maxAge) { - this.maxAgeMillis = maxAge; - } - - public void run() { - LOG.info("History Cleaner started"); - long cutoff = System.currentTimeMillis() - maxAgeMillis; - try { - hsManager.clean(cutoff, storage); - } catch (IOException e) { - LOG.warn("Error trying to clean up ", e); - } - LOG.info("History Cleaner complete"); - } - } - // TODO AppContext - Not Required private ApplicationAttemptId appAttemptID; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java index 199f77062dc..dd5bb01a401 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java @@ -23,6 +23,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.JobACL; @@ -166,6 +167,11 @@ public String getUserName() { public Path getConfFile() { throw new IllegalStateException("Not implemented yet"); } + + @Override + public Configuration loadConfFile() { + throw new IllegalStateException("Not implemented yet"); + } @Override public Map getJobACLs() { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebServices.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebServices.java index 71ad89f028e..842d8f57b83 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebServices.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebServices.java @@ -65,7 +65,6 @@ public class HsWebServices { private final HistoryContext ctx; private WebApp webapp; - private final Configuration conf; @Context UriInfo uriInfo; @@ -74,7 +73,6 @@ public class HsWebServices { public HsWebServices(final HistoryContext ctx, final Configuration conf, final WebApp webapp) { this.ctx = ctx; - this.conf = conf; this.webapp = webapp; } @@ -222,7 +220,7 @@ public ConfInfo getJobConf(@PathParam("jobid") String jid) { Job job = AMWebServices.getJobFromJobIdString(jid, ctx); ConfInfo info; try { - info = new ConfInfo(job, this.conf); + info = new ConfInfo(job); } catch (IOException e) { throw new NotFoundException("unable to load configuration for job: " + jid); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEntities.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEntities.java index 34462ece0e5..1080ebe5325 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEntities.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEntities.java @@ -22,12 +22,15 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskType; import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; +import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo; import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; +import static org.mockito.Mockito.*; + @RunWith(value = Parameterized.class) public class TestJobHistoryEntities { @@ -61,10 +64,12 @@ public static Collection data() { /* Verify some expected values based on the history file */ @Test public void testCompletedJob() throws Exception { + HistoryFileInfo info = mock(HistoryFileInfo.class); + when(info.getConfFile()).thenReturn(fullConfPath); //Re-initialize to verify the delayed load. completedJob = new CompletedJob(conf, jobId, fulleHistoryPath, loadTasks, "user", - fullConfPath, jobAclsManager); + info, jobAclsManager); //Verify tasks loaded based on loadTask parameter. assertEquals(loadTasks, completedJob.tasksLoaded.get()); assertEquals(1, completedJob.getAMInfos().size()); @@ -84,9 +89,11 @@ public void testCompletedJob() throws Exception { @Test public void testCompletedTask() throws Exception { + HistoryFileInfo info = mock(HistoryFileInfo.class); + when(info.getConfFile()).thenReturn(fullConfPath); completedJob = new CompletedJob(conf, jobId, fulleHistoryPath, loadTasks, "user", - fullConfPath, jobAclsManager); + info, jobAclsManager); TaskId mt1Id = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP); TaskId rt1Id = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE); @@ -111,9 +118,11 @@ public void testCompletedTask() throws Exception { @Test public void testCompletedTaskAttempt() throws Exception { + HistoryFileInfo info = mock(HistoryFileInfo.class); + when(info.getConfFile()).thenReturn(fullConfPath); completedJob = new CompletedJob(conf, jobId, fulleHistoryPath, loadTasks, "user", - fullConfPath, jobAclsManager); + info, jobAclsManager); TaskId mt1Id = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP); TaskId rt1Id = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE); TaskAttemptId mta1Id = MRBuilderUtils.newTaskAttemptId(mt1Id, 0); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java index 7aab4de0dc2..f2eaeebe97d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java @@ -56,6 +56,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; +import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo; import org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.MRAppWithHistory; import org.apache.hadoop.mapreduce.v2.jobhistory.FileNameIndexUtils; import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils; @@ -84,12 +85,22 @@ public List resolve(List names) { @Test public void testHistoryParsing() throws Exception { - checkHistoryParsing(2, 1, 2); + LOG.info("STARTING testHistoryParsing()"); + try { + checkHistoryParsing(2, 1, 2); + } finally { + LOG.info("FINISHED testHistoryParsing()"); + } } @Test public void testHistoryParsingWithParseErrors() throws Exception { - checkHistoryParsing(3, 0, 2); + LOG.info("STARTING testHistoryParsingWithParseErrors()"); + try { + checkHistoryParsing(3, 0, 2); + } finally { + LOG.info("FINISHED testHistoryParsingWithParseErrors()"); + } } private static String getJobSummary(FileContext fc, Path path) throws IOException { @@ -124,61 +135,112 @@ private void checkHistoryParsing(final int numMaps, final int numReduces, String jobhistoryDir = JobHistoryUtils .getHistoryIntermediateDoneDirForUser(conf); - JobHistory jobHistory = new JobHistory(); - jobHistory.init(conf); - - JobIndexInfo jobIndexInfo = jobHistory.getJobMetaInfo(jobId) - .getJobIndexInfo(); - String jobhistoryFileName = FileNameIndexUtils - .getDoneFileName(jobIndexInfo); - - Path historyFilePath = new Path(jobhistoryDir, jobhistoryFileName); - FSDataInputStream in = null; - LOG.info("JobHistoryFile is: " + historyFilePath); + FileContext fc = null; try { fc = FileContext.getFileContext(conf); - in = fc.open(fc.makeQualified(historyFilePath)); } catch (IOException ioe) { - LOG.info("Can not open history file: " + historyFilePath, ioe); - throw (new Exception("Can not open History File")); + LOG.info("Can not get FileContext", ioe); + throw (new Exception("Can not get File Context")); + } + + if (numMaps == numSuccessfulMaps) { + String summaryFileName = JobHistoryUtils + .getIntermediateSummaryFileName(jobId); + Path summaryFile = new Path(jobhistoryDir, summaryFileName); + String jobSummaryString = getJobSummary(fc, summaryFile); + Assert.assertNotNull(jobSummaryString); + Assert.assertTrue(jobSummaryString.contains("resourcesPerMap=100")); + Assert.assertTrue(jobSummaryString.contains("resourcesPerReduce=100")); + + Map jobSummaryElements = new HashMap(); + StringTokenizer strToken = new StringTokenizer(jobSummaryString, ","); + while (strToken.hasMoreTokens()) { + String keypair = strToken.nextToken(); + jobSummaryElements.put(keypair.split("=")[0], keypair.split("=")[1]); + } + + Assert.assertEquals("JobId does not match", jobId.toString(), + jobSummaryElements.get("jobId")); + Assert.assertEquals("JobName does not match", "test", + jobSummaryElements.get("jobName")); + Assert.assertTrue("submitTime should not be 0", + Long.parseLong(jobSummaryElements.get("submitTime")) != 0); + Assert.assertTrue("launchTime should not be 0", + Long.parseLong(jobSummaryElements.get("launchTime")) != 0); + Assert.assertTrue("firstMapTaskLaunchTime should not be 0", + Long.parseLong(jobSummaryElements.get("firstMapTaskLaunchTime")) != 0); + Assert + .assertTrue( + "firstReduceTaskLaunchTime should not be 0", + Long.parseLong(jobSummaryElements.get("firstReduceTaskLaunchTime")) != 0); + Assert.assertTrue("finishTime should not be 0", + Long.parseLong(jobSummaryElements.get("finishTime")) != 0); + Assert.assertEquals("Mismatch in num map slots", numSuccessfulMaps, + Integer.parseInt(jobSummaryElements.get("numMaps"))); + Assert.assertEquals("Mismatch in num reduce slots", numReduces, + Integer.parseInt(jobSummaryElements.get("numReduces"))); + Assert.assertEquals("User does not match", System.getProperty("user.name"), + jobSummaryElements.get("user")); + Assert.assertEquals("Queue does not match", "default", + jobSummaryElements.get("queue")); + Assert.assertEquals("Status does not match", "SUCCEEDED", + jobSummaryElements.get("status")); } - JobHistoryParser parser = new JobHistoryParser(in); - final EventReader realReader = new EventReader(in); - EventReader reader = Mockito.mock(EventReader.class); - if (numMaps == numSuccessfulMaps) { - reader = realReader; - } else { - final AtomicInteger numFinishedEvents = new AtomicInteger(0); // Hack! - Mockito.when(reader.getNextEvent()).thenAnswer( - new Answer() { - public HistoryEvent answer(InvocationOnMock invocation) - throws IOException { - HistoryEvent event = realReader.getNextEvent(); - if (event instanceof TaskFinishedEvent) { - numFinishedEvents.incrementAndGet(); - } - - if (numFinishedEvents.get() <= numSuccessfulMaps) { - return event; - } else { - throw new IOException("test"); + JobHistory jobHistory = new JobHistory(); + jobHistory.init(conf); + HistoryFileInfo fileInfo = jobHistory.getJobFileInfo(jobId); + JobInfo jobInfo; + long numFinishedMaps; + + synchronized(fileInfo) { + Path historyFilePath = fileInfo.getHistoryFile(); + FSDataInputStream in = null; + LOG.info("JobHistoryFile is: " + historyFilePath); + try { + in = fc.open(fc.makeQualified(historyFilePath)); + } catch (IOException ioe) { + LOG.info("Can not open history file: " + historyFilePath, ioe); + throw (new Exception("Can not open History File")); + } + + JobHistoryParser parser = new JobHistoryParser(in); + final EventReader realReader = new EventReader(in); + EventReader reader = Mockito.mock(EventReader.class); + if (numMaps == numSuccessfulMaps) { + reader = realReader; + } else { + final AtomicInteger numFinishedEvents = new AtomicInteger(0); // Hack! + Mockito.when(reader.getNextEvent()).thenAnswer( + new Answer() { + public HistoryEvent answer(InvocationOnMock invocation) + throws IOException { + HistoryEvent event = realReader.getNextEvent(); + if (event instanceof TaskFinishedEvent) { + numFinishedEvents.incrementAndGet(); + } + + if (numFinishedEvents.get() <= numSuccessfulMaps) { + return event; + } else { + throw new IOException("test"); + } } } - } ); - } - - JobInfo jobInfo = parser.parse(reader); - - long numFinishedMaps = + } + + jobInfo = parser.parse(reader); + + numFinishedMaps = computeFinishedMaps(jobInfo, numMaps, numSuccessfulMaps); - - if (numFinishedMaps != numMaps) { - Exception parseException = parser.getParseException(); - Assert.assertNotNull("Didn't get expected parse exception", - parseException); + + if (numFinishedMaps != numMaps) { + Exception parseException = parser.getParseException(); + Assert.assertNotNull("Didn't get expected parse exception", + parseException); + } } Assert.assertEquals("Incorrect username ", System.getProperty("user.name"), @@ -246,52 +308,6 @@ public HistoryEvent answer(InvocationOnMock invocation) } } } - - if (numMaps == numSuccessfulMaps) { - - String summaryFileName = JobHistoryUtils - .getIntermediateSummaryFileName(jobId); - Path summaryFile = new Path(jobhistoryDir, summaryFileName); - String jobSummaryString = getJobSummary(fc, summaryFile); - Assert.assertTrue(jobSummaryString.contains("resourcesPerMap=100")); - Assert.assertTrue(jobSummaryString.contains("resourcesPerReduce=100")); - Assert.assertNotNull(jobSummaryString); - - Map jobSummaryElements = new HashMap(); - StringTokenizer strToken = new StringTokenizer(jobSummaryString, ","); - while (strToken.hasMoreTokens()) { - String keypair = strToken.nextToken(); - jobSummaryElements.put(keypair.split("=")[0], keypair.split("=")[1]); - - } - - Assert.assertEquals("JobId does not match", jobId.toString(), - jobSummaryElements.get("jobId")); - Assert.assertEquals("JobName does not match", "test", - jobSummaryElements.get("jobName")); - Assert.assertTrue("submitTime should not be 0", - Long.parseLong(jobSummaryElements.get("submitTime")) != 0); - Assert.assertTrue("launchTime should not be 0", - Long.parseLong(jobSummaryElements.get("launchTime")) != 0); - Assert.assertTrue("firstMapTaskLaunchTime should not be 0", - Long.parseLong(jobSummaryElements.get("firstMapTaskLaunchTime")) != 0); - Assert - .assertTrue( - "firstReduceTaskLaunchTime should not be 0", - Long.parseLong(jobSummaryElements.get("firstReduceTaskLaunchTime")) != 0); - Assert.assertTrue("finishTime should not be 0", - Long.parseLong(jobSummaryElements.get("finishTime")) != 0); - Assert.assertEquals("Mismatch in num map slots", numSuccessfulMaps, - Integer.parseInt(jobSummaryElements.get("numMaps"))); - Assert.assertEquals("Mismatch in num reduce slots", numReduces, - Integer.parseInt(jobSummaryElements.get("numReduces"))); - Assert.assertEquals("User does not match", System.getProperty("user.name"), - jobSummaryElements.get("user")); - Assert.assertEquals("Queue does not match", "default", - jobSummaryElements.get("queue")); - Assert.assertEquals("Status does not match", "SUCCEEDED", - jobSummaryElements.get("status")); - } } // Computes finished maps similar to RecoveryService... @@ -314,6 +330,8 @@ private long computeFinishedMaps(JobInfo jobInfo, @Test public void testHistoryParsingForFailedAttempts() throws Exception { + LOG.info("STARTING testHistoryParsingForFailedAttempts"); + try { Configuration conf = new Configuration(); conf .setClass( @@ -335,7 +353,7 @@ public void testHistoryParsingForFailedAttempts() throws Exception { JobHistory jobHistory = new JobHistory(); jobHistory.init(conf); - JobIndexInfo jobIndexInfo = jobHistory.getJobMetaInfo(jobId) + JobIndexInfo jobIndexInfo = jobHistory.getJobFileInfo(jobId) .getJobIndexInfo(); String jobhistoryFileName = FileNameIndexUtils .getDoneFileName(jobIndexInfo); @@ -372,6 +390,9 @@ public void testHistoryParsingForFailedAttempts() throws Exception { } } Assert.assertEquals("No of Failed tasks doesn't match.", 2, noOffailedAttempts); + } finally { + LOG.info("FINISHED testHistoryParsingForFailedAttempts"); + } } static class MRAppWithHistoryWithFailedAttempt extends MRAppWithHistory { From eeec4dc72abf4c540146a81c5419828520b80fa4 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Wed, 18 Apr 2012 03:25:08 +0000 Subject: [PATCH 47/57] HDFS-3294. Fix code indentation in NamenodeWebHdfsMethods and DatanodeWebHdfsMethods. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1327365 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../web/resources/DatanodeWebHdfsMethods.java | 56 ++++++-- .../web/resources/NamenodeWebHdfsMethods.java | 133 +++++++++++++----- 3 files changed, 146 insertions(+), 46 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index b13218a19af..955c7b97b10 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -382,6 +382,9 @@ Release 2.0.0 - UNRELEASED HDFS-3279. Move the FSEditLog constructor with @VisibleForTesting to TestEditLog. (Arpit Gupta via szetszwo) + HDFS-3294. Fix code indentation in NamenodeWebHdfsMethods and + DatanodeWebHdfsMethods. (szetszwo) + OPTIMIZATIONS HDFS-3024. Improve performance of stringification in addStoredBlock (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java index 44ef5273386..05f92383465 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java @@ -178,8 +178,25 @@ public Response put( return ugi.doAs(new PrivilegedExceptionAction() { @Override public Response run() throws IOException, URISyntaxException { + return put(in, ugi, delegation, nnRpcAddr, path.getAbsolutePath(), op, + permission, overwrite, bufferSize, replication, blockSize); + } + }); + } - final String fullpath = path.getAbsolutePath(); + private Response put( + final InputStream in, + final UserGroupInformation ugi, + final DelegationParam delegation, + final InetSocketAddress nnRpcAddr, + final String fullpath, + final PutOpParam op, + final PermissionParam permission, + final OverwriteParam overwrite, + final BufferSizeParam bufferSize, + final ReplicationParam replication, + final BlockSizeParam blockSize + ) throws IOException, URISyntaxException { final DataNode datanode = (DataNode)context.getAttribute("datanode"); switch(op.getValue()) { @@ -214,8 +231,6 @@ public Response run() throws IOException, URISyntaxException { default: throw new UnsupportedOperationException(op + " is not supported"); } - } - }); } /** Handle HTTP POST request for the root for the root. */ @@ -265,8 +280,21 @@ public Response post( return ugi.doAs(new PrivilegedExceptionAction() { @Override public Response run() throws IOException { + return post(in, ugi, delegation, nnRpcAddr, path.getAbsolutePath(), op, + bufferSize); + } + }); + } - final String fullpath = path.getAbsolutePath(); + private Response post( + final InputStream in, + final UserGroupInformation ugi, + final DelegationParam delegation, + final InetSocketAddress nnRpcAddr, + final String fullpath, + final PostOpParam op, + final BufferSizeParam bufferSize + ) throws IOException { final DataNode datanode = (DataNode)context.getAttribute("datanode"); switch(op.getValue()) { @@ -292,8 +320,6 @@ public Response run() throws IOException { default: throw new UnsupportedOperationException(op + " is not supported"); } - } - }); } /** Handle HTTP GET request for the root. */ @@ -348,8 +374,22 @@ public Response get( return ugi.doAs(new PrivilegedExceptionAction() { @Override public Response run() throws IOException { + return get(ugi, delegation, nnRpcAddr, path.getAbsolutePath(), op, + offset, length, bufferSize); + } + }); + } - final String fullpath = path.getAbsolutePath(); + private Response get( + final UserGroupInformation ugi, + final DelegationParam delegation, + final InetSocketAddress nnRpcAddr, + final String fullpath, + final GetOpParam op, + final OffsetParam offset, + final LengthParam length, + final BufferSizeParam bufferSize + ) throws IOException { final DataNode datanode = (DataNode)context.getAttribute("datanode"); final Configuration conf = new Configuration(datanode.getConf()); @@ -412,7 +452,5 @@ public void write(final OutputStream out) throws IOException { default: throw new UnsupportedOperationException(op + " is not supported"); } - } - }); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index 0115ee27579..0127e959be8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -314,8 +314,40 @@ public Response put( public Response run() throws IOException, URISyntaxException { REMOTE_ADDRESS.set(request.getRemoteAddr()); try { + return put(ugi, delegation, username, doAsUser, + path.getAbsolutePath(), op, destination, owner, group, + permission, overwrite, bufferSize, replication, blockSize, + modificationTime, accessTime, renameOptions, createParent, + delegationTokenArgument); + } finally { + REMOTE_ADDRESS.set(null); + } + } + }); + } + + private Response put( + final UserGroupInformation ugi, + final DelegationParam delegation, + final UserParam username, + final DoAsParam doAsUser, + final String fullpath, + final PutOpParam op, + final DestinationParam destination, + final OwnerParam owner, + final GroupParam group, + final PermissionParam permission, + final OverwriteParam overwrite, + final BufferSizeParam bufferSize, + final ReplicationParam replication, + final BlockSizeParam blockSize, + final ModificationTimeParam modificationTime, + final AccessTimeParam accessTime, + final RenameOptionSetParam renameOptions, + final CreateParentParam createParent, + final TokenArgumentParam delegationTokenArgument + ) throws IOException, URISyntaxException { - final String fullpath = path.getAbsolutePath(); final Configuration conf = (Configuration)context.getAttribute(JspHelper.CURRENT_CONF); final NameNode namenode = (NameNode)context.getAttribute("name.node"); final NamenodeProtocols np = namenode.getRpcServer(); @@ -396,12 +428,6 @@ public Response run() throws IOException, URISyntaxException { default: throw new UnsupportedOperationException(op + " is not supported"); } - - } finally { - REMOTE_ADDRESS.set(null); - } - } - }); } /** Handle HTTP POST request for the root. */ @@ -452,8 +478,24 @@ public Response post( public Response run() throws IOException, URISyntaxException { REMOTE_ADDRESS.set(request.getRemoteAddr()); try { + return post(ugi, delegation, username, doAsUser, + path.getAbsolutePath(), op, bufferSize); + } finally { + REMOTE_ADDRESS.set(null); + } + } + }); + } - final String fullpath = path.getAbsolutePath(); + private Response post( + final UserGroupInformation ugi, + final DelegationParam delegation, + final UserParam username, + final DoAsParam doAsUser, + final String fullpath, + final PostOpParam op, + final BufferSizeParam bufferSize + ) throws IOException, URISyntaxException { final NameNode namenode = (NameNode)context.getAttribute("name.node"); switch(op.getValue()) { @@ -466,12 +508,6 @@ public Response run() throws IOException, URISyntaxException { default: throw new UnsupportedOperationException(op + " is not supported"); } - - } finally { - REMOTE_ADDRESS.set(null); - } - } - }); } /** Handle HTTP GET request for the root. */ @@ -534,9 +570,28 @@ public Response get( public Response run() throws IOException, URISyntaxException { REMOTE_ADDRESS.set(request.getRemoteAddr()); try { + return get(ugi, delegation, username, doAsUser, + path.getAbsolutePath(), op, offset, length, renewer, bufferSize); + } finally { + REMOTE_ADDRESS.set(null); + } + } + }); + } + private Response get( + final UserGroupInformation ugi, + final DelegationParam delegation, + final UserParam username, + final DoAsParam doAsUser, + final String fullpath, + final GetOpParam op, + final OffsetParam offset, + final LengthParam length, + final RenewerParam renewer, + final BufferSizeParam bufferSize + ) throws IOException, URISyntaxException { final NameNode namenode = (NameNode)context.getAttribute("name.node"); - final String fullpath = path.getAbsolutePath(); final NamenodeProtocols np = namenode.getRpcServer(); switch(op.getValue()) { @@ -613,13 +668,7 @@ public Response run() throws IOException, URISyntaxException { } default: throw new UnsupportedOperationException(op + " is not supported"); - } - - } finally { - REMOTE_ADDRESS.set(null); - } - } - }); + } } private static DirectoryListing getDirectoryListing(final NamenodeProtocols np, @@ -712,25 +761,35 @@ public Response delete( public Response run() throws IOException { REMOTE_ADDRESS.set(request.getRemoteAddr()); try { - - final NameNode namenode = (NameNode)context.getAttribute("name.node"); - final String fullpath = path.getAbsolutePath(); - - switch(op.getValue()) { - case DELETE: - { - final boolean b = namenode.getRpcServer().delete(fullpath, recursive.getValue()); - final String js = JsonUtil.toJsonString("boolean", b); - return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); - } - default: - throw new UnsupportedOperationException(op + " is not supported"); - } - + return delete(ugi, delegation, username, doAsUser, + path.getAbsolutePath(), op, recursive); } finally { REMOTE_ADDRESS.set(null); } } }); } + + private Response delete( + final UserGroupInformation ugi, + final DelegationParam delegation, + final UserParam username, + final DoAsParam doAsUser, + final String fullpath, + final DeleteOpParam op, + final RecursiveParam recursive + ) throws IOException { + final NameNode namenode = (NameNode)context.getAttribute("name.node"); + + switch(op.getValue()) { + case DELETE: + { + final boolean b = namenode.getRpcServer().delete(fullpath, recursive.getValue()); + final String js = JsonUtil.toJsonString("boolean", b); + return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); + } + default: + throw new UnsupportedOperationException(op + " is not supported"); + } + } } From 2ada53f10750a611755d730764629d3368d35126 Mon Sep 17 00:00:00 2001 From: Ravi Gummadi Date: Wed, 18 Apr 2012 11:59:38 +0000 Subject: [PATCH 48/57] MAPREDUCE-4149. [Rumen] Rumen fails to parse certain counter strings. (ravigummadi) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1327461 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 +++ .../org/apache/hadoop/tools/rumen/HistoryEventEmitter.java | 2 ++ 2 files changed, 5 insertions(+) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 91ffd6c1c29..1f74eb54004 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -52,6 +52,9 @@ Trunk (unreleased changes) BUG FIXES + MAPREDUCE-4149. [Rumen] Rumen fails to parse certain counter + strings. (ravigummadi) + MAPREDUCE-4083. [Gridmix] NPE in cpu emulation. (amarrk) MAPREDUCE-4087. [Gridmix] GenerateDistCacheData job of Gridmix can diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/HistoryEventEmitter.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/HistoryEventEmitter.java index a8a7fcbed01..2103709f54e 100644 --- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/HistoryEventEmitter.java +++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/HistoryEventEmitter.java @@ -80,6 +80,8 @@ protected static Counters parseCounters(String counters) } counters = counters.replace("\\.", "\\\\."); + counters = counters.replace("\\\\{", "\\{"); + counters = counters.replace("\\\\}", "\\}"); counters = counters.replace("\\\\(", "\\("); counters = counters.replace("\\\\)", "\\)"); counters = counters.replace("\\\\[", "\\["); From 14c61e6504d318ab84eaa6c638e25c2447158e38 Mon Sep 17 00:00:00 2001 From: Harsh J Date: Wed, 18 Apr 2012 18:29:11 +0000 Subject: [PATCH 49/57] HADOOP-8290. Remove remaining references to hadoop.native.lib (harsh) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1327609 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++ .../java/org/apache/hadoop/io/compress/TestCodec.java | 3 ++- .../hadoop/io/file/tfile/TestTFileSeqFileComparison.java | 4 +++- .../src/main/resources/mapred-default.xml | 9 +++++++++ 4 files changed, 16 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 51db2e5c329..468f6e473ea 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -63,6 +63,8 @@ Trunk (unreleased changes) HADOOP-8117. Upgrade test build to Surefire 2.12 (todd) + HADOOP-8290. Remove remaining references to hadoop.native.lib (harsh) + BUG FIXES HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java index 119fb3c14e4..67254fe7582 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java @@ -756,7 +756,8 @@ public void testCodecPoolAndGzipDecompressor() { // Don't use native libs for this test. Configuration conf = new Configuration(); - conf.setBoolean("hadoop.native.lib", false); + conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, + false); assertFalse("ZlibFactory is using native libs against request", ZlibFactory.isNativeZlibLoaded(conf)); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java index a9df13752d1..6aa685da5fb 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java @@ -33,6 +33,7 @@ import org.apache.commons.cli.OptionBuilder; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -237,7 +238,8 @@ static class SeqFileAppendable implements KVAppendable { public SeqFileAppendable(FileSystem fs, Path path, int osBufferSize, String compress, int minBlkSize) throws IOException { Configuration conf = new Configuration(); - conf.setBoolean("hadoop.native.lib", true); + conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, + true); CompressionCodec codec = null; if ("lzo".equals(compress)) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml index 5c533e6f26e..d4a627ac7e5 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml @@ -1126,6 +1126,15 @@ + + mapreduce.shuffle.port + 8080 + Default port that the ShuffleHandler will run on. ShuffleHandler + is a service run at the NodeManager to facilitate transfers of intermediate + Map outputs to requesting Reducers. + + + From 3ac28085a700fbe98059175d4071a2c9c8760b64 Mon Sep 17 00:00:00 2001 From: Harsh J Date: Wed, 18 Apr 2012 18:33:57 +0000 Subject: [PATCH 50/57] HADOOP-8290. Remove remaining references to hadoop.native.lib (harsh) (fixed commit) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1327611 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 -- .../test/java/org/apache/hadoop/io/compress/TestCodec.java | 3 +-- .../hadoop/io/file/tfile/TestTFileSeqFileComparison.java | 4 +--- 3 files changed, 2 insertions(+), 7 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 468f6e473ea..51db2e5c329 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -63,8 +63,6 @@ Trunk (unreleased changes) HADOOP-8117. Upgrade test build to Surefire 2.12 (todd) - HADOOP-8290. Remove remaining references to hadoop.native.lib (harsh) - BUG FIXES HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java index 67254fe7582..119fb3c14e4 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java @@ -756,8 +756,7 @@ public void testCodecPoolAndGzipDecompressor() { // Don't use native libs for this test. Configuration conf = new Configuration(); - conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, - false); + conf.setBoolean("hadoop.native.lib", false); assertFalse("ZlibFactory is using native libs against request", ZlibFactory.isNativeZlibLoaded(conf)); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java index 6aa685da5fb..a9df13752d1 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java @@ -33,7 +33,6 @@ import org.apache.commons.cli.OptionBuilder; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; -import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -238,8 +237,7 @@ static class SeqFileAppendable implements KVAppendable { public SeqFileAppendable(FileSystem fs, Path path, int osBufferSize, String compress, int minBlkSize) throws IOException { Configuration conf = new Configuration(); - conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, - true); + conf.setBoolean("hadoop.native.lib", true); CompressionCodec codec = null; if ("lzo".equals(compress)) { From d34fee4aa3d04dbba5f0c7d361946c9e4405a932 Mon Sep 17 00:00:00 2001 From: Harsh J Date: Wed, 18 Apr 2012 18:36:35 +0000 Subject: [PATCH 51/57] HADOOP-8290. Remove remaining references to hadoop.native.lib (harsh) (fixed commit after revert) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1327615 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++ .../test/java/org/apache/hadoop/io/compress/TestCodec.java | 3 ++- .../hadoop/io/file/tfile/TestTFileSeqFileComparison.java | 4 +++- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 51db2e5c329..468f6e473ea 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -63,6 +63,8 @@ Trunk (unreleased changes) HADOOP-8117. Upgrade test build to Surefire 2.12 (todd) + HADOOP-8290. Remove remaining references to hadoop.native.lib (harsh) + BUG FIXES HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java index 119fb3c14e4..67254fe7582 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java @@ -756,7 +756,8 @@ public void testCodecPoolAndGzipDecompressor() { // Don't use native libs for this test. Configuration conf = new Configuration(); - conf.setBoolean("hadoop.native.lib", false); + conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, + false); assertFalse("ZlibFactory is using native libs against request", ZlibFactory.isNativeZlibLoaded(conf)); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java index a9df13752d1..6aa685da5fb 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java @@ -33,6 +33,7 @@ import org.apache.commons.cli.OptionBuilder; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -237,7 +238,8 @@ static class SeqFileAppendable implements KVAppendable { public SeqFileAppendable(FileSystem fs, Path path, int osBufferSize, String compress, int minBlkSize) throws IOException { Configuration conf = new Configuration(); - conf.setBoolean("hadoop.native.lib", true); + conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, + true); CompressionCodec codec = null; if ("lzo".equals(compress)) { From 6b69ba6ba14adb9b612b92b00bd5289eb48a8e1d Mon Sep 17 00:00:00 2001 From: Harsh J Date: Wed, 18 Apr 2012 18:42:09 +0000 Subject: [PATCH 52/57] Reverting all commit booboos made on HADOOP-8290. Resetting to state before that. Sorry for so much noise. (harsh) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1327616 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 -- .../java/org/apache/hadoop/io/compress/TestCodec.java | 3 +-- .../hadoop/io/file/tfile/TestTFileSeqFileComparison.java | 4 +--- .../src/main/resources/mapred-default.xml | 9 --------- 4 files changed, 2 insertions(+), 16 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 468f6e473ea..51db2e5c329 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -63,8 +63,6 @@ Trunk (unreleased changes) HADOOP-8117. Upgrade test build to Surefire 2.12 (todd) - HADOOP-8290. Remove remaining references to hadoop.native.lib (harsh) - BUG FIXES HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java index 67254fe7582..119fb3c14e4 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java @@ -756,8 +756,7 @@ public void testCodecPoolAndGzipDecompressor() { // Don't use native libs for this test. Configuration conf = new Configuration(); - conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, - false); + conf.setBoolean("hadoop.native.lib", false); assertFalse("ZlibFactory is using native libs against request", ZlibFactory.isNativeZlibLoaded(conf)); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java index 6aa685da5fb..a9df13752d1 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java @@ -33,7 +33,6 @@ import org.apache.commons.cli.OptionBuilder; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; -import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -238,8 +237,7 @@ static class SeqFileAppendable implements KVAppendable { public SeqFileAppendable(FileSystem fs, Path path, int osBufferSize, String compress, int minBlkSize) throws IOException { Configuration conf = new Configuration(); - conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, - true); + conf.setBoolean("hadoop.native.lib", true); CompressionCodec codec = null; if ("lzo".equals(compress)) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml index d4a627ac7e5..5c533e6f26e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml @@ -1126,15 +1126,6 @@ - - mapreduce.shuffle.port - 8080 - Default port that the ShuffleHandler will run on. ShuffleHandler - is a service run at the NodeManager to facilitate transfers of intermediate - Map outputs to requesting Reducers. - - - From 4e4ba4cd90c7dfcbc327e13154e9d7d8f6f24517 Mon Sep 17 00:00:00 2001 From: Harsh J Date: Wed, 18 Apr 2012 18:44:18 +0000 Subject: [PATCH 53/57] HADOOP-8290. Remove remaining references to hadoop.native.lib (harsh) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1327618 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++ .../test/java/org/apache/hadoop/io/compress/TestCodec.java | 3 ++- .../hadoop/io/file/tfile/TestTFileSeqFileComparison.java | 4 +++- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 51db2e5c329..468f6e473ea 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -63,6 +63,8 @@ Trunk (unreleased changes) HADOOP-8117. Upgrade test build to Surefire 2.12 (todd) + HADOOP-8290. Remove remaining references to hadoop.native.lib (harsh) + BUG FIXES HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java index 119fb3c14e4..67254fe7582 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java @@ -756,7 +756,8 @@ public void testCodecPoolAndGzipDecompressor() { // Don't use native libs for this test. Configuration conf = new Configuration(); - conf.setBoolean("hadoop.native.lib", false); + conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, + false); assertFalse("ZlibFactory is using native libs against request", ZlibFactory.isNativeZlibLoaded(conf)); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java index a9df13752d1..6aa685da5fb 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java @@ -33,6 +33,7 @@ import org.apache.commons.cli.OptionBuilder; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -237,7 +238,8 @@ static class SeqFileAppendable implements KVAppendable { public SeqFileAppendable(FileSystem fs, Path path, int osBufferSize, String compress, int minBlkSize) throws IOException { Configuration conf = new Configuration(); - conf.setBoolean("hadoop.native.lib", true); + conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, + true); CompressionCodec codec = null; if ("lzo".equals(compress)) { From 32d511065a2612d37591f09e53eed8ec1488587d Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Wed, 18 Apr 2012 18:46:34 +0000 Subject: [PATCH 54/57] MAPREDUCE-4161. create sockets consistently (Daryn Sharp via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1327621 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 2 ++ .../mapreduce/v2/app/rm/RMCommunicator.java | 11 +++++---- .../java/org/apache/hadoop/mapred/Master.java | 8 +++---- .../hadoop/mapred/ResourceMgrDelegate.java | 9 +++---- .../distributedshell/ApplicationMaster.java | 5 ++-- .../applications/distributedshell/Client.java | 7 +++--- .../hadoop/yarn/conf/YarnConfiguration.java | 19 +++++++-------- .../nodemanager/NodeStatusUpdaterImpl.java | 24 +++++++------------ .../ContainerManagerImpl.java | 9 ++++--- .../ResourceLocalizationService.java | 15 ++++++------ .../server/resourcemanager/AdminService.java | 11 ++++----- .../ApplicationMasterService.java | 11 ++++----- .../resourcemanager/ClientRMService.java | 13 ++++------ .../ResourceTrackerService.java | 12 ++++------ .../amlauncher/AMLauncher.java | 9 ++++--- .../server/resourcemanager/tools/RMAdmin.java | 12 ++++------ .../resourcemanager/TestAMAuthorization.java | 11 +++++---- .../resourcemanager/TestApplicationACLs.java | 8 +++---- .../server/webproxy/AppReportFetcher.java | 7 +++--- 19 files changed, 88 insertions(+), 115 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 1f74eb54004..2f88d9dc345 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -274,6 +274,8 @@ Release 0.23.3 - UNRELEASED MAPREDUCE-3972. Fix locking and exception issues in JobHistory server. (Robert Joseph Evans via sseth) + MAPREDUCE-4161. create sockets consistently (Daryn Sharp via bobby) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java index 45d72bc7a3f..49df2176ef9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java @@ -19,6 +19,7 @@ package org.apache.hadoop.mapreduce.v2.app.rm; import java.io.IOException; +import java.net.InetSocketAddress; import java.security.PrivilegedAction; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; @@ -35,7 +36,6 @@ import org.apache.hadoop.mapreduce.v2.app.client.ClientService; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils; -import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; @@ -245,11 +245,12 @@ public void run() { } protected AMRMProtocol createSchedulerProxy() { - final YarnRPC rpc = YarnRPC.create(getConfig()); final Configuration conf = getConfig(); - final String serviceAddr = conf.get( + final YarnRPC rpc = YarnRPC.create(conf); + final InetSocketAddress serviceAddr = conf.getSocketAddr( YarnConfiguration.RM_SCHEDULER_ADDRESS, - YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS); + YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS, + YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT); UserGroupInformation currentUser; try { @@ -279,7 +280,7 @@ protected AMRMProtocol createSchedulerProxy() { @Override public AMRMProtocol run() { return (AMRMProtocol) rpc.getProxy(AMRMProtocol.class, - NetUtils.createSocketAddr(serviceAddr), conf); + serviceAddr, conf); } }); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Master.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Master.java index d6f7346c63b..3bacc5ecc08 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Master.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Master.java @@ -55,10 +55,10 @@ public static InetSocketAddress getMasterAddress(Configuration conf) { return NetUtils.createSocketAddr(masterAddress, 8012, MRConfig.MASTER_ADDRESS); } else { - masterAddress = conf.get(YarnConfiguration.RM_ADDRESS, - YarnConfiguration.DEFAULT_RM_ADDRESS); - return NetUtils.createSocketAddr(masterAddress, YarnConfiguration.DEFAULT_RM_PORT, - YarnConfiguration.RM_ADDRESS); + return conf.getSocketAddr( + YarnConfiguration.RM_ADDRESS, + YarnConfiguration.DEFAULT_RM_ADDRESS, + YarnConfiguration.DEFAULT_RM_PORT); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java index ec1b90427ae..79a1d27c2db 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java @@ -40,7 +40,6 @@ import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse; import org.apache.hadoop.mapreduce.v2.util.MRApps; -import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.yarn.api.ClientRMProtocol; @@ -88,12 +87,10 @@ public class ResourceMgrDelegate { public ResourceMgrDelegate(YarnConfiguration conf) { this.conf = conf; YarnRPC rpc = YarnRPC.create(this.conf); - InetSocketAddress rmAddress = - NetUtils.createSocketAddr(this.conf.get( + InetSocketAddress rmAddress = conf.getSocketAddr( YarnConfiguration.RM_ADDRESS, - YarnConfiguration.DEFAULT_RM_ADDRESS), - YarnConfiguration.DEFAULT_RM_PORT, - YarnConfiguration.RM_ADDRESS); + YarnConfiguration.DEFAULT_RM_ADDRESS, + YarnConfiguration.DEFAULT_RM_PORT); this.rmAddress = rmAddress.toString(); LOG.debug("Connecting to ResourceManager at " + rmAddress); applicationsManager = diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java index 611bdf88678..b790e3c52a7 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java @@ -742,9 +742,10 @@ public void run() { */ private AMRMProtocol connectToRM() { YarnConfiguration yarnConf = new YarnConfiguration(conf); - InetSocketAddress rmAddress = NetUtils.createSocketAddr(yarnConf.get( + InetSocketAddress rmAddress = yarnConf.getSocketAddr( YarnConfiguration.RM_SCHEDULER_ADDRESS, - YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS)); + YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS, + YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT); LOG.info("Connecting to ResourceManager at " + rmAddress); return ((AMRMProtocol) rpc.getProxy(AMRMProtocol.class, rmAddress, conf)); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java index 3b962a3048f..8d67929b541 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java @@ -42,8 +42,6 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.security.SecurityInfo; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ClientRMProtocol; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; @@ -723,9 +721,10 @@ public ClientRMProtocol run() { }); */ YarnConfiguration yarnConf = new YarnConfiguration(conf); - InetSocketAddress rmAddress = NetUtils.createSocketAddr(yarnConf.get( + InetSocketAddress rmAddress = yarnConf.getSocketAddr( YarnConfiguration.RM_ADDRESS, - YarnConfiguration.DEFAULT_RM_ADDRESS)); + YarnConfiguration.DEFAULT_RM_ADDRESS, + YarnConfiguration.DEFAULT_RM_PORT); LOG.info("Connecting to ResourceManager at " + rmAddress); applicationsManager = ((ClientRMProtocol) rpc.getProxy( ClientRMProtocol.class, rmAddress, conf)); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index c7747139f30..407c3a690d6 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -558,17 +558,16 @@ public static String getProxyHostAndPort(Configuration conf) { } public static String getRMWebAppHostAndPort(Configuration conf) { - String addr = conf.get(YarnConfiguration.RM_WEBAPP_ADDRESS, - YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS); - Iterator it = ADDR_SPLITTER.split(addr).iterator(); - it.next(); // ignore the bind host - String port = it.next(); + int port = conf.getSocketAddr( + YarnConfiguration.RM_WEBAPP_ADDRESS, + YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS, + YarnConfiguration.DEFAULT_RM_WEBAPP_PORT).getPort(); // Use apps manager address to figure out the host for webapp - addr = conf.get(YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS); - String host = ADDR_SPLITTER.split(addr).iterator().next(); - String rmAddress = JOINER.join(host, ":", port); - InetSocketAddress address = NetUtils.createSocketAddr( - rmAddress, DEFAULT_RM_WEBAPP_PORT, RM_WEBAPP_ADDRESS); + String host = conf.getSocketAddr( + YarnConfiguration.RM_ADDRESS, + YarnConfiguration.DEFAULT_RM_ADDRESS, + YarnConfiguration.DEFAULT_RM_PORT).getHostName(); + InetSocketAddress address = NetUtils.createSocketAddrForHost(host, port); StringBuffer sb = new StringBuffer(); InetAddress resolved = address.getAddress(); if (resolved == null || resolved.isAnyLocalAddress() || diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java index 6651c299aa5..ba3e53ebe95 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java @@ -32,7 +32,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -75,7 +74,7 @@ public class NodeStatusUpdaterImpl extends AbstractService implements private ContainerTokenSecretManager containerTokenSecretManager; private long heartBeatInterval; private ResourceTracker resourceTracker; - private String rmAddress; + private InetSocketAddress rmAddress; private Resource totalResource; private int httpPort; private byte[] secretKeyBytes = new byte[0]; @@ -106,9 +105,10 @@ public NodeStatusUpdaterImpl(Context context, Dispatcher dispatcher, @Override public synchronized void init(Configuration conf) { - this.rmAddress = - conf.get(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, - YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS); + this.rmAddress = conf.getSocketAddr( + YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT); this.heartBeatInterval = conf.getLong(YarnConfiguration.NM_TO_RM_HEARTBEAT_INTERVAL_MS, YarnConfiguration.DEFAULT_NM_TO_RM_HEARTBEAT_INTERVAL_MS); @@ -132,13 +132,10 @@ public void start() { // NodeManager is the last service to start, so NodeId is available. this.nodeId = this.context.getNodeId(); - String httpBindAddressStr = - getConfig().get(YarnConfiguration.NM_WEBAPP_ADDRESS, - YarnConfiguration.DEFAULT_NM_WEBAPP_ADDRESS); - InetSocketAddress httpBindAddress = - NetUtils.createSocketAddr(httpBindAddressStr, - YarnConfiguration.DEFAULT_NM_WEBAPP_PORT, - YarnConfiguration.NM_WEBAPP_ADDRESS); + InetSocketAddress httpBindAddress = getConfig().getSocketAddr( + YarnConfiguration.NM_WEBAPP_ADDRESS, + YarnConfiguration.DEFAULT_NM_WEBAPP_ADDRESS, + YarnConfiguration.DEFAULT_NM_WEBAPP_PORT); try { // this.hostName = InetAddress.getLocalHost().getCanonicalHostName(); this.httpPort = httpBindAddress.getPort(); @@ -178,9 +175,6 @@ protected boolean isSecurityEnabled() { protected ResourceTracker getRMClient() { Configuration conf = getConfig(); YarnRPC rpc = YarnRPC.create(conf); - InetSocketAddress rmAddress = NetUtils.createSocketAddr(this.rmAddress, - YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT, - YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS); return (ResourceTracker) rpc.getProxy(ResourceTracker.class, rmAddress, conf); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java index 040b8174c26..bdbe8131a8a 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java @@ -33,7 +33,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.io.DataInputByteBuffer; import org.apache.hadoop.ipc.Server; -import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.PolicyProvider; @@ -226,10 +225,10 @@ public void start() { Configuration conf = getConfig(); YarnRPC rpc = YarnRPC.create(conf); - InetSocketAddress initialAddress = NetUtils.createSocketAddr(conf.get( - YarnConfiguration.NM_ADDRESS, YarnConfiguration.DEFAULT_NM_ADDRESS), - YarnConfiguration.DEFAULT_NM_PORT, - YarnConfiguration.NM_ADDRESS); + InetSocketAddress initialAddress = conf.getSocketAddr( + YarnConfiguration.NM_ADDRESS, + YarnConfiguration.DEFAULT_NM_ADDRESS, + YarnConfiguration.DEFAULT_NM_PORT); server = rpc.getServer(ContainerManager.class, this, initialAddress, conf, diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java index 744c2b19900..c674e18f31d 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java @@ -207,10 +207,10 @@ public void init(Configuration conf) { conf.getLong(YarnConfiguration.NM_LOCALIZER_CACHE_TARGET_SIZE_MB, YarnConfiguration.DEFAULT_NM_LOCALIZER_CACHE_TARGET_SIZE_MB) << 20; cacheCleanupPeriod = conf.getLong(YarnConfiguration.NM_LOCALIZER_CACHE_CLEANUP_INTERVAL_MS, YarnConfiguration.DEFAULT_NM_LOCALIZER_CACHE_CLEANUP_INTERVAL_MS); - localizationServerAddress = NetUtils.createSocketAddr( - conf.get(YarnConfiguration.NM_LOCALIZER_ADDRESS, YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS), - YarnConfiguration.DEFAULT_NM_LOCALIZER_PORT, - YarnConfiguration.NM_LOCALIZER_ADDRESS); + localizationServerAddress = conf.getSocketAddr( + YarnConfiguration.NM_LOCALIZER_ADDRESS, + YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS, + YarnConfiguration.DEFAULT_NM_LOCALIZER_PORT); localizerTracker = createLocalizerTracker(conf); addService(localizerTracker); dispatcher.register(LocalizerEventType.class, localizerTracker); @@ -232,9 +232,10 @@ public void start() { .split(":")[0]; getConfig().set(YarnConfiguration.NM_LOCALIZER_ADDRESS, host + ":" + server.getPort()); - localizationServerAddress = NetUtils.createSocketAddr( - getConfig().get(YarnConfiguration.NM_LOCALIZER_ADDRESS, - YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS)); + localizationServerAddress = getConfig().getSocketAddr( + YarnConfiguration.NM_LOCALIZER_ADDRESS, + YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS, + YarnConfiguration.DEFAULT_NM_LOCALIZER_PORT); LOG.info("Localizer started on port " + server.getPort()); super.start(); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java index c2a90185696..583304481bf 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java @@ -26,7 +26,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.Groups; import org.apache.hadoop.security.UserGroupInformation; @@ -95,12 +94,10 @@ public AdminService(Configuration conf, ResourceScheduler scheduler, @Override public void init(Configuration conf) { super.init(conf); - String bindAddress = - conf.get(YarnConfiguration.RM_ADMIN_ADDRESS, - YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS); - masterServiceAddress = NetUtils.createSocketAddr(bindAddress, - YarnConfiguration.DEFAULT_RM_ADMIN_PORT, - YarnConfiguration.RM_ADMIN_ADDRESS); + masterServiceAddress = conf.getSocketAddr( + YarnConfiguration.RM_ADMIN_ADDRESS, + YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS, + YarnConfiguration.DEFAULT_RM_ADMIN_PORT); adminAcl = new AccessControlList(conf.get( YarnConfiguration.YARN_ADMIN_ACL, YarnConfiguration.DEFAULT_YARN_ADMIN_ACL)); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java index 80de3660cf5..d9b4f8ae48e 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java @@ -100,13 +100,10 @@ public void start() { Configuration conf = getConfig(); YarnRPC rpc = YarnRPC.create(conf); - String bindAddressStr = - conf.get(YarnConfiguration.RM_SCHEDULER_ADDRESS, - YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS); - InetSocketAddress masterServiceAddress = - NetUtils.createSocketAddr(bindAddressStr, - YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT, - YarnConfiguration.RM_SCHEDULER_ADDRESS); + InetSocketAddress masterServiceAddress = conf.getSocketAddr( + YarnConfiguration.RM_SCHEDULER_ADDRESS, + YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS, + YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT); this.server = rpc.getServer(AMRMProtocol.class, this, masterServiceAddress, diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java index e4864995fd9..fe3f6a25c4e 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java @@ -33,7 +33,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.Server; -import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.authorize.PolicyProvider; @@ -104,7 +103,6 @@ public class ClientRMService extends AbstractService implements final private RMContext rmContext; private final RMAppManager rmAppManager; - private String clientServiceBindAddress; private Server server; private RMDelegationTokenSecretManager rmDTSecretManager; @@ -126,13 +124,10 @@ public ClientRMService(RMContext rmContext, YarnScheduler scheduler, @Override public void init(Configuration conf) { - clientServiceBindAddress = - conf.get(YarnConfiguration.RM_ADDRESS, - YarnConfiguration.DEFAULT_RM_ADDRESS); - clientBindAddress = - NetUtils.createSocketAddr(clientServiceBindAddress, - YarnConfiguration.DEFAULT_RM_PORT, - YarnConfiguration.RM_ADDRESS); + clientBindAddress = conf.getSocketAddr( + YarnConfiguration.RM_ADDRESS, + YarnConfiguration.DEFAULT_RM_ADDRESS, + YarnConfiguration.DEFAULT_RM_PORT); super.init(conf); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java index d762766efcb..6ffc9b23155 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java @@ -27,7 +27,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.ipc.Server; -import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.Node; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.yarn.api.records.NodeId; @@ -104,13 +103,10 @@ public ResourceTrackerService(RMContext rmContext, @Override public synchronized void init(Configuration conf) { - String resourceTrackerBindAddress = - conf.get(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, - YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS); - resourceTrackerAddress = NetUtils.createSocketAddr( - resourceTrackerBindAddress, - YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT, - YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS); + resourceTrackerAddress = conf.getSocketAddr( + YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT); RackResolver.init(conf); super.init(conf); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java index 45641d4d7b2..114dc977b59 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java @@ -218,11 +218,10 @@ private void setupTokensAndEnv( Token token = new Token(id, this.rmContext.getApplicationTokenSecretManager()); - String schedulerAddressStr = - this.conf.get(YarnConfiguration.RM_SCHEDULER_ADDRESS, - YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS); - InetSocketAddress unresolvedAddr = - NetUtils.createSocketAddr(schedulerAddressStr); + InetSocketAddress unresolvedAddr = conf.getSocketAddr( + YarnConfiguration.RM_SCHEDULER_ADDRESS, + YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS, + YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT); String resolvedAddr = unresolvedAddr.getAddress().getHostAddress() + ":" + unresolvedAddr.getPort(); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/tools/RMAdmin.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/tools/RMAdmin.java index 4d429306dd2..4f3e65176e4 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/tools/RMAdmin.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/tools/RMAdmin.java @@ -25,7 +25,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.ipc.RemoteException; -import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; @@ -158,13 +157,10 @@ private RMAdminProtocol createAdminProtocol() throws IOException { final YarnConfiguration conf = new YarnConfiguration(getConf()); // Create the client - final String adminAddress = - conf.get(YarnConfiguration.RM_ADMIN_ADDRESS, - YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS); - final InetSocketAddress addr = - NetUtils.createSocketAddr(adminAddress, - YarnConfiguration.DEFAULT_RM_ADMIN_PORT, - YarnConfiguration.RM_ADMIN_ADDRESS); + final InetSocketAddress addr = conf.getSocketAddr( + YarnConfiguration.RM_ADMIN_ADDRESS, + YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS, + YarnConfiguration.DEFAULT_RM_ADMIN_PORT); final YarnRPC rpc = YarnRPC.create(conf); RMAdminProtocol adminProtocol = diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java index e62a64cb2fd..9d03e9159b1 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager; import java.io.IOException; +import java.net.InetSocketAddress; import java.security.PrivilegedAction; import java.util.HashMap; import java.util.Map; @@ -27,7 +28,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; @@ -196,9 +196,10 @@ public void testUnauthorizedAccess() throws Exception { // Create a client to the RM. final Configuration conf = rm.getConfig(); final YarnRPC rpc = YarnRPC.create(conf); - final String serviceAddr = conf.get( + final InetSocketAddress serviceAddr = conf.getSocketAddr( YarnConfiguration.RM_SCHEDULER_ADDRESS, - YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS); + YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS, + YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT); UserGroupInformation currentUser = UserGroupInformation .createRemoteUser(applicationAttemptId.toString()); @@ -213,8 +214,8 @@ public void testUnauthorizedAccess() throws Exception { .doAs(new PrivilegedAction() { @Override public AMRMProtocol run() { - return (AMRMProtocol) rpc.getProxy(AMRMProtocol.class, NetUtils - .createSocketAddr(serviceAddr), conf); + return (AMRMProtocol) rpc.getProxy(AMRMProtocol.class, + serviceAddr, conf); } }); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java index ea27be32dad..cc251e15829 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java @@ -30,7 +30,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.yarn.api.ClientRMProtocol; @@ -75,9 +74,10 @@ public class TestApplicationACLs { static MockRM resourceManager; static Configuration conf = new YarnConfiguration(); final static YarnRPC rpc = YarnRPC.create(conf); - final static InetSocketAddress rmAddress = NetUtils - .createSocketAddr(conf.get(YarnConfiguration.RM_ADDRESS, - YarnConfiguration.DEFAULT_RM_ADDRESS)); + final static InetSocketAddress rmAddress = conf.getSocketAddr( + YarnConfiguration.RM_ADDRESS, + YarnConfiguration.DEFAULT_RM_ADDRESS, + YarnConfiguration.DEFAULT_RM_PORT); private static ClientRMProtocol rmClient; private static RecordFactory recordFactory = RecordFactoryProvider diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/AppReportFetcher.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/AppReportFetcher.java index bc711eacf5a..938a893a826 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/AppReportFetcher.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/AppReportFetcher.java @@ -23,7 +23,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.yarn.api.ClientRMProtocol; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; @@ -51,10 +50,10 @@ public class AppReportFetcher { public AppReportFetcher(Configuration conf) { this.conf = conf; YarnRPC rpc = YarnRPC.create(this.conf); - InetSocketAddress rmAddress = - NetUtils.createSocketAddr(this.conf.get( + InetSocketAddress rmAddress = conf.getSocketAddr( YarnConfiguration.RM_ADDRESS, - YarnConfiguration.DEFAULT_RM_ADDRESS)); + YarnConfiguration.DEFAULT_RM_ADDRESS, + YarnConfiguration.DEFAULT_RM_PORT); LOG.info("Connecting to ResourceManager at " + rmAddress); applicationsManager = (ClientRMProtocol) rpc.getProxy(ClientRMProtocol.class, From 8bda086d046b12e8efed834f39a775e710ca0962 Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Wed, 18 Apr 2012 18:51:22 +0000 Subject: [PATCH 55/57] HDFS-3263. HttpFS should read HDFS config from Hadoop site.xml files (tucu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1327627 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop/fs/http/server/HttpFSServer.java | 4 +- .../fs/http/server/HttpFSServerWebApp.java | 25 ++++-- .../hadoop/lib/service/FileSystemAccess.java | 2 +- .../service/FileSystemAccessException.java | 8 +- .../hadoop/FileSystemAccessService.java | 87 +++++++++++-------- .../src/main/resources/httpfs-default.xml | 31 ------- .../src/site/apt/ServerSetup.apt.vm | 16 ++-- .../fs/http/client/TestHttpFSFileSystem.java | 25 ++++-- .../fs/http/server/TestHttpFSServer.java | 81 ++++++++++++++--- .../hadoop/TestFileSystemAccessService.java | 77 ++++++++++++++-- .../test/HadoopUsersConfTestHelper.java | 7 +- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + 12 files changed, 250 insertions(+), 115 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java index b52f2d3d08d..43d1f72242e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java @@ -170,7 +170,7 @@ private T fsExecute(Principal user, String doAs, FileSystemAccess.FileSystem throws IOException, FileSystemAccessException { String hadoopUser = getEffectiveUser(user, doAs); FileSystemAccess fsAccess = HttpFSServerWebApp.get().get(FileSystemAccess.class); - Configuration conf = HttpFSServerWebApp.get().get(FileSystemAccess.class).getDefaultConfiguration(); + Configuration conf = HttpFSServerWebApp.get().get(FileSystemAccess.class).getFileSystemConfiguration(); return fsAccess.execute(hadoopUser, conf, executor); } @@ -194,7 +194,7 @@ private T fsExecute(Principal user, String doAs, FileSystemAccess.FileSystem private FileSystem createFileSystem(Principal user, String doAs) throws IOException, FileSystemAccessException { String hadoopUser = getEffectiveUser(user, doAs); FileSystemAccess fsAccess = HttpFSServerWebApp.get().get(FileSystemAccess.class); - Configuration conf = HttpFSServerWebApp.get().get(FileSystemAccess.class).getDefaultConfiguration(); + Configuration conf = HttpFSServerWebApp.get().get(FileSystemAccess.class).getFileSystemConfiguration(); FileSystem fs = fsAccess.createFileSystem(hadoopUser, conf); FileSystemReleaseFilter.setFileSystem(fs); return fs; diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java index 9e1609ed6b6..fec8aa0805b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java @@ -19,6 +19,7 @@ package org.apache.hadoop.fs.http.server; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.lib.server.ServerException; import org.apache.hadoop.lib.service.FileSystemAccess; import org.apache.hadoop.lib.servlet.ServerWebApp; @@ -29,8 +30,9 @@ /** * Bootstrap class that manages the initialization and destruction of the - * HttpFSServer server, it is a javax.servlet.ServletContextListener - * implementation that is wired in HttpFSServer's WAR WEB-INF/web.xml. + * HttpFSServer server, it is a javax.servlet.ServletContextListener + * implementation that is wired in HttpFSServer's WAR + * WEB-INF/web.xml. *

* It provides acces to the server context via the singleton {@link #get}. *

@@ -38,7 +40,8 @@ * with httpfs.. */ public class HttpFSServerWebApp extends ServerWebApp { - private static final Logger LOG = LoggerFactory.getLogger(HttpFSServerWebApp.class); + private static final Logger LOG = + LoggerFactory.getLogger(HttpFSServerWebApp.class); /** * Server name and prefix for all configuration properties. @@ -67,8 +70,8 @@ public HttpFSServerWebApp() throws IOException { /** * Constructor used for testing purposes. */ - protected HttpFSServerWebApp(String homeDir, String configDir, String logDir, String tempDir, - Configuration config) { + protected HttpFSServerWebApp(String homeDir, String configDir, String logDir, + String tempDir, Configuration config) { super(NAME, homeDir, configDir, logDir, tempDir, config); } @@ -80,9 +83,11 @@ public HttpFSServerWebApp(String homeDir, Configuration config) { } /** - * Initializes the HttpFSServer server, loads configuration and required services. + * Initializes the HttpFSServer server, loads configuration and required + * services. * - * @throws ServerException thrown if HttpFSServer server could not be initialized. + * @throws ServerException thrown if HttpFSServer server could not be + * initialized. */ @Override public void init() throws ServerException { @@ -93,7 +98,8 @@ public void init() throws ServerException { SERVER = this; adminGroup = getConfig().get(getPrefixedName(CONF_ADMIN_GROUP), "admin"); LOG.info("Connects to Namenode [{}]", - get().get(FileSystemAccess.class).getDefaultConfiguration().get("fs.default.name")); + get().get(FileSystemAccess.class).getFileSystemConfiguration(). + get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)); } /** @@ -106,7 +112,8 @@ public void destroy() { } /** - * Returns HttpFSServer server singleton, configuration and services are accessible through it. + * Returns HttpFSServer server singleton, configuration and services are + * accessible through it. * * @return the HttpFSServer server singleton. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/FileSystemAccess.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/FileSystemAccess.java index 7984761d547..5d8ce9e6e2f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/FileSystemAccess.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/FileSystemAccess.java @@ -37,6 +37,6 @@ public T execute(String user, Configuration conf, FileSystemExecutor exec public void releaseFileSystem(FileSystem fs) throws IOException; - public Configuration getDefaultConfiguration(); + public Configuration getFileSystemConfiguration(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/FileSystemAccessException.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/FileSystemAccessException.java index 8a0ba3caa0a..42fc8ff1bdc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/FileSystemAccessException.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/FileSystemAccessException.java @@ -26,12 +26,14 @@ public enum ERROR implements XException.ERROR { H01("Service property [{0}] not defined"), H02("Kerberos initialization failed, {0}"), H03("FileSystemExecutor error, {0}"), - H04("JobClientExecutor error, {0}"), + H04("Invalid configuration, it has not be created by the FileSystemAccessService"), H05("[{0}] validation failed, {1}"), H06("Property [{0}] not defined in configuration object"), H07("[{0}] not healthy, {1}"), - H08(""), - H09("Invalid FileSystemAccess security mode [{0}]"); + H08("{0}"), + H09("Invalid FileSystemAccess security mode [{0}]"), + H10("Hadoop config directory not found [{0}]"), + H11("Could not load Hadoop config files, {0}"); private String template; diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java index f1a9ac055dd..eb31b060843 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java @@ -19,7 +19,9 @@ package org.apache.hadoop.lib.service.hadoop; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.lib.server.BaseService; import org.apache.hadoop.lib.server.ServiceException; import org.apache.hadoop.lib.service.FileSystemAccess; @@ -32,6 +34,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.File; import java.io.IOException; import java.net.URI; import java.security.PrivilegedExceptionAction; @@ -54,9 +57,11 @@ public class FileSystemAccessService extends BaseService implements FileSystemAc public static final String NAME_NODE_WHITELIST = "name.node.whitelist"; - private static final String HADOOP_CONF_PREFIX = "conf:"; + public static final String HADOOP_CONF_DIR = "config.dir"; - private static final String NAME_NODE_PROPERTY = "fs.default.name"; + private static final String[] HADOOP_CONF_FILES = {"core-site.xml", "hdfs-site.xml"}; + + private static final String FILE_SYSTEM_SERVICE_CREATED = "FileSystemAccessService.created"; public FileSystemAccessService() { super(PREFIX); @@ -102,26 +107,40 @@ protected void init() throws ServiceException { throw new ServiceException(FileSystemAccessException.ERROR.H09, security); } - serviceHadoopConf = new Configuration(false); - for (Map.Entry entry : getServiceConfig()) { - String name = (String) entry.getKey(); - if (name.startsWith(HADOOP_CONF_PREFIX)) { - name = name.substring(HADOOP_CONF_PREFIX.length()); - String value = (String) entry.getValue(); - serviceHadoopConf.set(name, value); - - } + String hadoopConfDirProp = getServiceConfig().get(HADOOP_CONF_DIR, getServer().getConfigDir()); + File hadoopConfDir = new File(hadoopConfDirProp).getAbsoluteFile(); + if (hadoopConfDir == null) { + hadoopConfDir = new File(getServer().getConfigDir()).getAbsoluteFile(); + } + if (!hadoopConfDir.exists()) { + throw new ServiceException(FileSystemAccessException.ERROR.H10, hadoopConfDir); + } + try { + serviceHadoopConf = loadHadoopConf(hadoopConfDir); + } catch (IOException ex) { + throw new ServiceException(FileSystemAccessException.ERROR.H11, ex.toString(), ex); } - setRequiredServiceHadoopConf(serviceHadoopConf); - LOG.debug("FileSystemAccess default configuration:"); + LOG.debug("FileSystemAccess FileSystem configuration:"); for (Map.Entry entry : serviceHadoopConf) { LOG.debug(" {} = {}", entry.getKey(), entry.getValue()); } + setRequiredServiceHadoopConf(serviceHadoopConf); nameNodeWhitelist = toLowerCase(getServiceConfig().getTrimmedStringCollection(NAME_NODE_WHITELIST)); } + private Configuration loadHadoopConf(File dir) throws IOException { + Configuration hadoopConf = new Configuration(false); + for (String file : HADOOP_CONF_FILES) { + File f = new File(dir, file); + if (f.exists()) { + hadoopConf.addResource(new Path(f.getAbsolutePath())); + } + } + return hadoopConf; + } + @Override public void postInit() throws ServiceException { super.postInit(); @@ -166,17 +185,6 @@ protected void setRequiredServiceHadoopConf(Configuration conf) { conf.set("fs.hdfs.impl.disable.cache", "true"); } - protected Configuration createHadoopConf(Configuration conf) { - Configuration hadoopConf = new Configuration(); - ConfigurationUtils.copy(serviceHadoopConf, hadoopConf); - ConfigurationUtils.copy(conf, hadoopConf); - return hadoopConf; - } - - protected Configuration createNameNodeConf(Configuration conf) { - return createHadoopConf(conf); - } - protected FileSystem createFileSystem(Configuration namenodeConf) throws IOException { return FileSystem.get(namenodeConf); } @@ -202,16 +210,22 @@ public T execute(String user, final Configuration conf, final FileSystemExec Check.notEmpty(user, "user"); Check.notNull(conf, "conf"); Check.notNull(executor, "executor"); - if (conf.get(NAME_NODE_PROPERTY) == null || conf.getTrimmed(NAME_NODE_PROPERTY).length() == 0) { - throw new FileSystemAccessException(FileSystemAccessException.ERROR.H06, NAME_NODE_PROPERTY); + if (!conf.getBoolean(FILE_SYSTEM_SERVICE_CREATED, false)) { + throw new FileSystemAccessException(FileSystemAccessException.ERROR.H04); + } + if (conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY) == null || + conf.getTrimmed(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY).length() == 0) { + throw new FileSystemAccessException(FileSystemAccessException.ERROR.H06, + CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY); } try { - validateNamenode(new URI(conf.get(NAME_NODE_PROPERTY)).getAuthority()); + validateNamenode( + new URI(conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)). + getAuthority()); UserGroupInformation ugi = getUGI(user); return ugi.doAs(new PrivilegedExceptionAction() { public T run() throws Exception { - Configuration namenodeConf = createNameNodeConf(conf); - FileSystem fs = createFileSystem(namenodeConf); + FileSystem fs = createFileSystem(conf); Instrumentation instrumentation = getServer().get(Instrumentation.class); Instrumentation.Cron cron = instrumentation.createCron(); try { @@ -236,13 +250,16 @@ public FileSystem createFileSystemInternal(String user, final Configuration conf throws IOException, FileSystemAccessException { Check.notEmpty(user, "user"); Check.notNull(conf, "conf"); + if (!conf.getBoolean(FILE_SYSTEM_SERVICE_CREATED, false)) { + throw new FileSystemAccessException(FileSystemAccessException.ERROR.H04); + } try { - validateNamenode(new URI(conf.get(NAME_NODE_PROPERTY)).getAuthority()); + validateNamenode( + new URI(conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)).getAuthority()); UserGroupInformation ugi = getUGI(user); return ugi.doAs(new PrivilegedExceptionAction() { public FileSystem run() throws Exception { - Configuration namenodeConf = createNameNodeConf(conf); - return createFileSystem(namenodeConf); + return createFileSystem(conf); } }); } catch (IOException ex) { @@ -267,11 +284,11 @@ public void releaseFileSystem(FileSystem fs) throws IOException { closeFileSystem(fs); } - @Override - public Configuration getDefaultConfiguration() { - Configuration conf = new Configuration(false); + public Configuration getFileSystemConfiguration() { + Configuration conf = new Configuration(true); ConfigurationUtils.copy(serviceHadoopConf, conf); + conf.setBoolean(FILE_SYSTEM_SERVICE_CREATED, true); return conf; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml index c58c925663e..e96042ef133 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml @@ -153,29 +153,6 @@ - - - - namenode.hostname - localhost - - The HDFS Namenode host the httpfs server connects to perform file - system operations. - - This property is only used to resolve other properties within this - configuration file. - - - - - httpfs.hadoop.conf:fs.default.name - hdfs://${namenode.hostname}:8020 - - The HDFS Namenode URI the httpfs server connects to perform file - system operations. - - - @@ -206,12 +183,4 @@ - - httpfs.hadoop.conf:dfs.namenode.kerberos.principal - hdfs/${namenode.hostname}@${kerberos.realm} - - The HDFS Namenode Kerberos principal. - - - diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/ServerSetup.apt.vm b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/ServerSetup.apt.vm index 26891721b8d..fe5ad30608e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/ServerSetup.apt.vm +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/ServerSetup.apt.vm @@ -37,13 +37,13 @@ Hadoop HDFS over HTTP ${project.version} - Server Setup * Configure HttpFS - Edit the <<>> file and - set the <<>> property to the HDFS - Namenode URI. For example: + By default, HttpFS assumes that Hadoop configuration files + (<<>>) are in the HttpFS + configuration directory. -+---+ -httpfs.fsAccess.conf:fs.default.name=hdfs://localhost:8021 -+---+ + If this is not the case, add to the <<>> file the + <<>> property set to the location + of the Hadoop configuration directory. * Configure Hadoop @@ -53,11 +53,11 @@ httpfs.fsAccess.conf:fs.default.name=hdfs://localhost:8021 +---+ ... - fsAccess.proxyuser.#HTTPFSUSER#.hosts + hadoop.proxyuser.#HTTPFSUSER#.hosts httpfs-host.foo.com - fsAccess.proxyuser.#HTTPFSUSER#.groups + hadoop.proxyuser.#HTTPFSUSER#.groups * ... diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java index 579498713f5..4837352fc3b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java @@ -19,6 +19,7 @@ package org.apache.hadoop.fs.http.client; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; @@ -70,16 +71,24 @@ private void createHttpFSServer() throws Exception { w.write("secret"); w.close(); - String fsDefaultName = TestHdfsHelper.getHdfsConf().get("fs.default.name"); + //HDFS configuration + String fsDefaultName = TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY); Configuration conf = new Configuration(false); - conf.set("httpfs.hadoop.conf:fs.default.name", fsDefaultName); - conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups", HadoopUsersConfTestHelper - .getHadoopProxyUserGroups()); - conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts", HadoopUsersConfTestHelper - .getHadoopProxyUserHosts()); + conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName); + File hdfsSite = new File(new File(homeDir, "conf"), "hdfs-site.xml"); + OutputStream os = new FileOutputStream(hdfsSite); + conf.writeXml(os); + os.close(); + + //HTTPFS configuration + conf = new Configuration(false); + conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups", + HadoopUsersConfTestHelper.getHadoopProxyUserGroups()); + conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts", + HadoopUsersConfTestHelper.getHadoopProxyUserHosts()); conf.set("httpfs.authentication.signature.secret.file", secretFile.getAbsolutePath()); - File hoopSite = new File(new File(homeDir, "conf"), "httpfs-site.xml"); - OutputStream os = new FileOutputStream(hoopSite); + File httpfsSite = new File(new File(homeDir, "conf"), "httpfs-site.xml"); + os = new FileOutputStream(httpfsSite); conf.writeXml(os); os.close(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java index d397fa35a51..ff525e643a7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java @@ -20,10 +20,12 @@ import junit.framework.Assert; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.lib.service.security.DummyGroupMapping; +import org.apache.hadoop.lib.server.Service; +import org.apache.hadoop.lib.server.ServiceException; +import org.apache.hadoop.lib.service.Groups; import org.apache.hadoop.test.HFSTestCase; import org.apache.hadoop.test.HadoopUsersConfTestHelper; import org.apache.hadoop.test.TestDir; @@ -40,12 +42,15 @@ import java.io.File; import java.io.FileOutputStream; import java.io.FileWriter; +import java.io.IOException; import java.io.InputStreamReader; import java.io.OutputStream; import java.io.Writer; import java.net.HttpURLConnection; import java.net.URL; import java.text.MessageFormat; +import java.util.Arrays; +import java.util.List; public class TestHttpFSServer extends HFSTestCase { @@ -54,12 +59,48 @@ public class TestHttpFSServer extends HFSTestCase { @TestJetty public void server() throws Exception { String dir = TestDirHelper.getTestDir().getAbsolutePath(); - Configuration hoopConf = new Configuration(false); - HttpFSServerWebApp server = new HttpFSServerWebApp(dir, dir, dir, dir, hoopConf); + + Configuration httpfsConf = new Configuration(false); + HttpFSServerWebApp server = new HttpFSServerWebApp(dir, dir, dir, dir, httpfsConf); server.init(); server.destroy(); } + public static class MockGroups implements Service,Groups { + + @Override + public void init(org.apache.hadoop.lib.server.Server server) throws ServiceException { + } + + @Override + public void postInit() throws ServiceException { + } + + @Override + public void destroy() { + } + + @Override + public Class[] getServiceDependencies() { + return new Class[0]; + } + + @Override + public Class getInterface() { + return Groups.class; + } + + @Override + public void serverStatusChange(org.apache.hadoop.lib.server.Server.Status oldStatus, + org.apache.hadoop.lib.server.Server.Status newStatus) throws ServiceException { + } + + @Override + public List getGroups(String user) throws IOException { + return Arrays.asList(HadoopUsersConfTestHelper.getHadoopUserGroups(user)); + } + + } private void createHttpFSServer() throws Exception { File homeDir = TestDirHelper.getTestDir(); Assert.assertTrue(new File(homeDir, "conf").mkdir()); @@ -72,13 +113,29 @@ private void createHttpFSServer() throws Exception { w.write("secret"); w.close(); - String fsDefaultName = TestHdfsHelper.getHdfsConf().get("fs.default.name"); + //HDFS configuration + File hadoopConfDir = new File(new File(homeDir, "conf"), "hadoop-conf"); + hadoopConfDir.mkdirs(); + String fsDefaultName = TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY); Configuration conf = new Configuration(false); - conf.set("httpfs.hadoop.conf:fs.default.name", fsDefaultName); - conf.set("httpfs.groups." + CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, DummyGroupMapping.class.getName()); + conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName); + File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml"); + OutputStream os = new FileOutputStream(hdfsSite); + conf.writeXml(os); + os.close(); + + //HTTPFS configuration + conf = new Configuration(false); + conf.set("httpfs.services.ext", MockGroups.class.getName()); + conf.set("httpfs.admin.group", HadoopUsersConfTestHelper. + getHadoopUserGroups(HadoopUsersConfTestHelper.getHadoopUsers()[0])[0]); + conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups", + HadoopUsersConfTestHelper.getHadoopProxyUserGroups()); + conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts", + HadoopUsersConfTestHelper.getHadoopProxyUserHosts()); conf.set("httpfs.authentication.signature.secret.file", secretFile.getAbsolutePath()); - File hoopSite = new File(new File(homeDir, "conf"), "httpfs-site.xml"); - OutputStream os = new FileOutputStream(hoopSite); + File httpfsSite = new File(new File(homeDir, "conf"), "httpfs-site.xml"); + os = new FileOutputStream(httpfsSite); conf.writeXml(os); os.close(); @@ -103,7 +160,8 @@ public void instrumentation() throws Exception { Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_UNAUTHORIZED); url = new URL(TestJettyHelper.getJettyURL(), - MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation", "root")); + MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation", + HadoopUsersConfTestHelper.getHadoopUsers()[0])); conn = (HttpURLConnection) url.openConnection(); Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream())); @@ -112,7 +170,8 @@ public void instrumentation() throws Exception { Assert.assertTrue(line.contains("\"counters\":{")); url = new URL(TestJettyHelper.getJettyURL(), - MessageFormat.format("/webhdfs/v1/foo?user.name={0}&op=instrumentation", "root")); + MessageFormat.format("/webhdfs/v1/foo?user.name={0}&op=instrumentation", + HadoopUsersConfTestHelper.getHadoopUsers()[0])); conn = (HttpURLConnection) url.openConnection(); Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/hadoop/TestFileSystemAccessService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/hadoop/TestFileSystemAccessService.java index 84ff45a1658..b8689c9d6e4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/hadoop/TestFileSystemAccessService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/hadoop/TestFileSystemAccessService.java @@ -20,6 +20,7 @@ import junit.framework.Assert; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.lib.server.Server; @@ -34,13 +35,32 @@ import org.apache.hadoop.test.TestHdfs; import org.apache.hadoop.test.TestHdfsHelper; import org.apache.hadoop.util.StringUtils; +import org.junit.Before; import org.junit.Test; +import java.io.File; +import java.io.FileOutputStream; import java.io.IOException; +import java.io.OutputStream; import java.util.Arrays; public class TestFileSystemAccessService extends HFSTestCase { + private void createHadoopConf(Configuration hadoopConf) throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + File hdfsSite = new File(dir, "hdfs-site.xml"); + OutputStream os = new FileOutputStream(hdfsSite); + hadoopConf.writeXml(os); + os.close(); + } + + @Before + public void createHadoopConf() throws Exception { + Configuration hadoopConf = new Configuration(false); + hadoopConf.set("foo", "FOO"); + createHadoopConf(hadoopConf); + } + @Test @TestDir public void simpleSecurity() throws Exception { @@ -124,7 +144,7 @@ public void serviceHadoopConf() throws Exception { FileSystemAccessService.class.getName())); Configuration conf = new Configuration(false); conf.set("server.services", services); - conf.set("server.hadoop.conf:foo", "FOO"); + Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class); @@ -132,6 +152,32 @@ public void serviceHadoopConf() throws Exception { server.destroy(); } + @Test + @TestDir + public void serviceHadoopConfCustomDir() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + String hadoopConfDir = new File(dir, "confx").getAbsolutePath(); + new File(hadoopConfDir).mkdirs(); + String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(), + FileSystemAccessService.class.getName())); + Configuration conf = new Configuration(false); + conf.set("server.services", services); + conf.set("server.hadoop.config.dir", hadoopConfDir); + + File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml"); + OutputStream os = new FileOutputStream(hdfsSite); + Configuration hadoopConf = new Configuration(false); + hadoopConf.set("foo", "BAR"); + hadoopConf.writeXml(os); + os.close(); + + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class); + Assert.assertEquals(fsAccess.serviceHadoopConf.get("foo"), "BAR"); + server.destroy(); + } + @Test @TestDir public void inWhitelists() throws Exception { @@ -188,12 +234,17 @@ public void createFileSystem() throws Exception { String dir = TestDirHelper.getTestDir().getAbsolutePath(); String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(), FileSystemAccessService.class.getName())); + + Configuration hadoopConf = new Configuration(false); + hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)); + createHadoopConf(hadoopConf); + Configuration conf = new Configuration(false); conf.set("server.services", services); Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); FileSystemAccess hadoop = server.get(FileSystemAccess.class); - FileSystem fs = hadoop.createFileSystem("u", TestHdfsHelper.getHdfsConf()); + FileSystem fs = hadoop.createFileSystem("u", hadoop.getFileSystemConfiguration()); Assert.assertNotNull(fs); fs.mkdirs(new Path("/tmp/foo")); hadoop.releaseFileSystem(fs); @@ -214,6 +265,11 @@ public void fileSystemExecutor() throws Exception { String dir = TestDirHelper.getTestDir().getAbsolutePath(); String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(), FileSystemAccessService.class.getName())); + + Configuration hadoopConf = new Configuration(false); + hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)); + createHadoopConf(hadoopConf); + Configuration conf = new Configuration(false); conf.set("server.services", services); Server server = new Server("server", dir, dir, dir, dir, conf); @@ -222,7 +278,7 @@ public void fileSystemExecutor() throws Exception { final FileSystem fsa[] = new FileSystem[1]; - hadoop.execute("u", TestHdfsHelper.getHdfsConf(), new FileSystemAccess.FileSystemExecutor() { + hadoop.execute("u", hadoop.getFileSystemConfiguration(), new FileSystemAccess.FileSystemExecutor() { @Override public Void execute(FileSystem fs) throws IOException { fs.mkdirs(new Path("/tmp/foo")); @@ -248,14 +304,18 @@ public void fileSystemExecutorNoNameNode() throws Exception { String dir = TestDirHelper.getTestDir().getAbsolutePath(); String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(), FileSystemAccessService.class.getName())); + Configuration hadoopConf = new Configuration(false); + hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)); + createHadoopConf(hadoopConf); + Configuration conf = new Configuration(false); conf.set("server.services", services); Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); FileSystemAccess fsAccess = server.get(FileSystemAccess.class); - Configuration hdfsConf = TestHdfsHelper.getHdfsConf(); - hdfsConf.set("fs.default.name", ""); + Configuration hdfsConf = fsAccess.getFileSystemConfiguration(); + hdfsConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, ""); fsAccess.execute("u", hdfsConf, new FileSystemAccess.FileSystemExecutor() { @Override public Void execute(FileSystem fs) throws IOException { @@ -271,6 +331,11 @@ public void fileSystemExecutorException() throws Exception { String dir = TestDirHelper.getTestDir().getAbsolutePath(); String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(), FileSystemAccessService.class.getName())); + + Configuration hadoopConf = new Configuration(false); + hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)); + createHadoopConf(hadoopConf); + Configuration conf = new Configuration(false); conf.set("server.services", services); Server server = new Server("server", dir, dir, dir, dir, conf); @@ -279,7 +344,7 @@ public void fileSystemExecutorException() throws Exception { final FileSystem fsa[] = new FileSystem[1]; try { - hadoop.execute("u", TestHdfsHelper.getHdfsConf(), new FileSystemAccess.FileSystemExecutor() { + hadoop.execute("u", hadoop.getFileSystemConfiguration(), new FileSystemAccess.FileSystemExecutor() { @Override public Void execute(FileSystem fs) throws IOException { fsa[0] = fs; diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HadoopUsersConfTestHelper.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HadoopUsersConfTestHelper.java index 398a8853dd9..f27d0efaae9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HadoopUsersConfTestHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HadoopUsersConfTestHelper.java @@ -145,7 +145,12 @@ public static String[] getHadoopUsers() { */ public static String[] getHadoopUserGroups(String user) { if (getHadoopUsers() == DEFAULT_USERS) { - return DEFAULT_USERS_GROUP; + for (String defaultUser : DEFAULT_USERS) { + if (defaultUser.equals(user)) { + return DEFAULT_USERS_GROUP; + } + } + return new String[0]; } else { String groups = System.getProperty(HADOOP_USER_PREFIX + user); return (groups != null) ? groups.split(",") : new String[0]; diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 955c7b97b10..79fd026c899 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -385,6 +385,8 @@ Release 2.0.0 - UNRELEASED HDFS-3294. Fix code indentation in NamenodeWebHdfsMethods and DatanodeWebHdfsMethods. (szetszwo) + HDFS-3263. HttpFS should read HDFS config from Hadoop site.xml files (tucu) + OPTIMIZATIONS HDFS-3024. Improve performance of stringification in addStoredBlock (todd) From 2bf8be3c72c548e71a6af7b4bf4cf3b58fd62994 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Wed, 18 Apr 2012 20:21:08 +0000 Subject: [PATCH 56/57] HDFS-3292. Remove the deprecated DiskStatus, getDiskStatus(), getRawCapacity() and getRawUsed() from DistributedFileSystem. Contributed by Arpit Gupta git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1327664 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../hadoop/hdfs/DistributedFileSystem.java | 44 ------------------- 2 files changed, 3 insertions(+), 44 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 79fd026c899..14a95499f5a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -65,6 +65,9 @@ Trunk (unreleased changes) HDFS-3273. Refactor BackupImage and FSEditLog, and rename JournalListener.rollLogs(..) to startLogSegment(..). (szetszwo) + HDFS-3292. Remove the deprecated DiskStatus, getDiskStatus(), getRawCapacity() and + getRawUsed() from DistributedFileSystem. (Arpit Gupta via szetszwo) + OPTIMIZATIONS HDFS-2834. Add a ByteBuffer-based read API to DFSInputStream. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 988a6e7ee3f..b6521c09e5e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -499,56 +499,12 @@ public DFSClient getClient() { return dfs; } - /** @deprecated Use {@link org.apache.hadoop.fs.FsStatus} instead */ - @InterfaceAudience.Private - @Deprecated - public static class DiskStatus extends FsStatus { - public DiskStatus(FsStatus stats) { - super(stats.getCapacity(), stats.getUsed(), stats.getRemaining()); - } - - public DiskStatus(long capacity, long dfsUsed, long remaining) { - super(capacity, dfsUsed, remaining); - } - - public long getDfsUsed() { - return super.getUsed(); - } - } - @Override public FsStatus getStatus(Path p) throws IOException { statistics.incrementReadOps(1); return dfs.getDiskStatus(); } - /** Return the disk usage of the filesystem, including total capacity, - * used space, and remaining space - * @deprecated Use {@link org.apache.hadoop.fs.FileSystem#getStatus()} - * instead */ - @Deprecated - public DiskStatus getDiskStatus() throws IOException { - return new DiskStatus(dfs.getDiskStatus()); - } - - /** Return the total raw capacity of the filesystem, disregarding - * replication. - * @deprecated Use {@link org.apache.hadoop.fs.FileSystem#getStatus()} - * instead */ - @Deprecated - public long getRawCapacity() throws IOException{ - return dfs.getDiskStatus().getCapacity(); - } - - /** Return the total raw used space in the filesystem, disregarding - * replication. - * @deprecated Use {@link org.apache.hadoop.fs.FileSystem#getStatus()} - * instead */ - @Deprecated - public long getRawUsed() throws IOException{ - return dfs.getDiskStatus().getUsed(); - } - /** * Returns count of blocks with no good replicas left. Normally should be * zero. From cc052d2a4a70f237e812f377785f4a7e67670f7b Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Wed, 18 Apr 2012 22:22:49 +0000 Subject: [PATCH 57/57] Move HADOOP-8117 from 3.0 to 2.0 in CHANGES.txt after commit to branch-2 git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1327707 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 468f6e473ea..27f13283828 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -61,8 +61,6 @@ Trunk (unreleased changes) HADOOP-8147. test-patch should run tests with -fn to avoid masking test failures (Robert Evans via tgraves) - HADOOP-8117. Upgrade test build to Surefire 2.12 (todd) - HADOOP-8290. Remove remaining references to hadoop.native.lib (harsh) BUG FIXES @@ -265,6 +263,8 @@ Release 2.0.0 - UNRELEASED HADOOP-8280. Move VersionUtil/TestVersionUtil and GenericTestUtils from HDFS into Common. (Ahmed Radwan via atm) + HADOOP-8117. Upgrade test build to Surefire 2.12 (todd) + OPTIMIZATIONS BUG FIXES