From 65bda6d168e47106f5542e15bb7b21522478626e Mon Sep 17 00:00:00 2001 From: Sanford Ryza Date: Mon, 21 Oct 2013 18:31:57 +0000 Subject: [PATCH 01/11] YARN-1258: Move to 2.2.1 in CHANGES.txt git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1534307 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 4 +- .../resourcemanager/scheduler/Queue.java | 8 --- .../scheduler/capacity/LeafQueue.java | 5 -- .../scheduler/capacity/ParentQueue.java | 5 -- .../scheduler/fair/FSLeafQueue.java | 5 +- .../scheduler/fair/FSQueue.java | 11 +-- .../scheduler/fair/QueueManager.java | 37 ++++------ .../scheduler/fifo/FifoScheduler.java | 1 - .../scheduler/fair/TestFairScheduler.java | 69 +++++++++++-------- .../src/site/apt/FairScheduler.apt.vm | 31 +++++++-- 10 files changed, 83 insertions(+), 93 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 430734b3570..f4d7e72529c 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -61,8 +61,6 @@ Release 2.3.0 - UNRELEASED YARN-976. Document the meaning of a virtual core. (Sandy Ryza) - YARN-1258. Allow configuring the Fair Scheduler root queue (Sandy Ryza) - YARN-1182. MiniYARNCluster creates and inits the RM/NM only on start() (Karthik Kambatla via Sandy Ryza) @@ -95,6 +93,8 @@ Release 2.2.1 - UNRELEASED YARN-305. Fair scheduler logs too many "Node offered to app" messages. (Lohit Vijayarenu via Sandy Ryza) + YARN-1258. Allow configuring the Fair Scheduler root queue (Sandy Ryza) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java index 2c9e7ad3680..0380d323824 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java @@ -19,12 +19,10 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler; import java.util.List; -import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; @@ -44,12 +42,6 @@ public interface Queue { */ QueueMetrics getMetrics(); - /** - * Get ACLs for the queue. - * @return ACLs for the queue - */ - public Map getQueueAcls(); - /** * Get queue information * @param includeChildQueues include child queues? diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index a09ea616c2c..d82e6737797 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -526,11 +526,6 @@ public synchronized float getUserLimitFactor() { return userLimitFactor; } - @Override - public synchronized Map getQueueAcls() { - return new HashMap(acls); - } - @Override public synchronized QueueInfo getQueueInfo( boolean includeChildQueues, boolean recursive) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java index 9a450069208..5ca953dc9ab 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java @@ -299,11 +299,6 @@ public synchronized QueueState getState() { return state; } - @Override - public synchronized Map getQueueAcls() { - return new HashMap(acls); - } - @Override public synchronized QueueInfo getQueueInfo( boolean includeChildQueues, boolean recursive) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java index e0caed77f86..2da754c03c6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java @@ -24,14 +24,12 @@ import java.util.Comparator; import java.util.Iterator; import java.util.List; -import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; import org.apache.hadoop.yarn.api.records.Resource; @@ -177,8 +175,7 @@ public List getQueueUserAclInfo(UserGroupInformation user) { recordFactory.newRecordInstance(QueueUserACLInfo.class); List operations = new ArrayList(); for (QueueACL operation : QueueACL.values()) { - Map acls = queueMgr.getQueueAcls(getName()); - if (acls.get(operation).isUserAllowed(user)) { + if (hasAccess(operation, user)) { operations.add(operation); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java index 9cb0463a56e..9f3c4c97c5d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java @@ -20,13 +20,10 @@ import java.util.ArrayList; import java.util.Collection; -import java.util.HashMap; -import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.api.records.QueueInfo; @@ -135,12 +132,6 @@ public QueueInfo getQueueInfo(boolean includeChildQueues, boolean recursive) { return queueInfo; } - @Override - public Map getQueueAcls() { - Map acls = queueMgr.getQueueAcls(getName()); - return new HashMap(acls); - } - @Override public FSQueueMetrics getMetrics() { return metrics; @@ -154,7 +145,7 @@ public void setFairShare(Resource fairShare) { public boolean hasAccess(QueueACL acl, UserGroupInformation user) { // Check if the leaf-queue allows access - if (queueMgr.getQueueAcls(getName()).get(acl).isUserAllowed(user)) { + if (queueMgr.getQueueAcl(getName(), acl).isUserAllowed(user)) { return true; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java index 8f2fc1e9a1e..ca5a9d5b848 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java @@ -72,6 +72,9 @@ public class QueueManager { * (this is done to prevent loading a file that hasn't been fully written). */ public static final long ALLOC_RELOAD_WAIT = 5 * 1000; + + private static final AccessControlList EVERYBODY_ACL = new AccessControlList("*"); + private static final AccessControlList NOBODY_ACL = new AccessControlList(" "); private final FairScheduler scheduler; @@ -381,15 +384,6 @@ public void reloadAllocs() throws IOException, ParserConfigurationException, queueMetrics.setMinShare(queue.getMinShare()); queueMetrics.setMaxShare(queue.getMaxShare()); } - - // Root queue should have empty ACLs. As a queue's ACL is the union of - // its ACL and all its parents' ACLs, setting the roots' to empty will - // neither allow nor prohibit more access to its children. - Map rootAcls = - new HashMap(); - rootAcls.put(QueueACL.SUBMIT_APPLICATIONS, new AccessControlList(" ")); - rootAcls.put(QueueACL.ADMINISTER_QUEUE, new AccessControlList(" ")); - queueAcls.put(ROOT_QUEUE, rootAcls); // Create all queus for (String name: queueNamesInAllocFile) { @@ -454,10 +448,10 @@ private void loadQueue(String parentName, Element element, Map policy.initialize(scheduler.getClusterCapacity()); queuePolicies.put(queueName, policy); } else if ("aclSubmitApps".equals(field.getTagName())) { - String text = ((Text)field.getFirstChild()).getData().trim(); + String text = ((Text)field.getFirstChild()).getData(); acls.put(QueueACL.SUBMIT_APPLICATIONS, new AccessControlList(text)); } else if ("aclAdministerApps".equals(field.getTagName())) { - String text = ((Text)field.getFirstChild()).getData().trim(); + String text = ((Text)field.getFirstChild()).getData(); acls.put(QueueACL.ADMINISTER_QUEUE, new AccessControlList(text)); } else if ("queue".endsWith(field.getTagName()) || "pool".equals(field.getTagName())) { @@ -577,21 +571,16 @@ public long getFairSharePreemptionTimeout() { /** * Get the ACLs associated with this queue. If a given ACL is not explicitly - * configured, include the default value for that ACL. + * configured, include the default value for that ACL. The default for the + * root queue is everybody ("*") and the default for all other queues is + * nobody ("") */ - public Map getQueueAcls(String queue) { - HashMap out = new HashMap(); - Map queueAcl = info.queueAcls.get(queue); - if (queueAcl != null) { - out.putAll(queueAcl); + public AccessControlList getQueueAcl(String queue, QueueACL operation) { + Map queueAcls = info.queueAcls.get(queue); + if (queueAcls == null || !queueAcls.containsKey(operation)) { + return (queue.equals(ROOT_QUEUE)) ? EVERYBODY_ACL : NOBODY_ACL; } - if (!out.containsKey(QueueACL.ADMINISTER_QUEUE)) { - out.put(QueueACL.ADMINISTER_QUEUE, new AccessControlList("*")); - } - if (!out.containsKey(QueueACL.SUBMIT_APPLICATIONS)) { - out.put(QueueACL.SUBMIT_APPLICATIONS, new AccessControlList("*")); - } - return out; + return queueAcls.get(operation); } static class QueueManagerInfo { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java index ac7c68a5135..293811e63b0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java @@ -156,7 +156,6 @@ public QueueInfo getQueueInfo( return queueInfo; } - @Override public Map getQueueAcls() { Map acls = new HashMap(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index e0b81dc7747..c69b431a4dd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -865,22 +865,25 @@ public void testAllocationFileParsing() throws Exception { assertEquals(10, queueManager.getUserMaxApps("user1")); assertEquals(5, queueManager.getUserMaxApps("user2")); + // Root should get * ACL + assertEquals("*",queueManager.getQueueAcl("root", + QueueACL.ADMINISTER_QUEUE).getAclString()); + assertEquals("*", queueManager.getQueueAcl("root", + QueueACL.SUBMIT_APPLICATIONS).getAclString()); + // Unspecified queues should get default ACL - Map aclsA = queueManager.getQueueAcls("root.queueA"); - assertTrue(aclsA.containsKey(QueueACL.ADMINISTER_QUEUE)); - assertEquals("*", aclsA.get(QueueACL.ADMINISTER_QUEUE).getAclString()); - assertTrue(aclsA.containsKey(QueueACL.SUBMIT_APPLICATIONS)); - assertEquals("*", aclsA.get(QueueACL.SUBMIT_APPLICATIONS).getAclString()); + assertEquals(" ",queueManager.getQueueAcl("root.queueA", + QueueACL.ADMINISTER_QUEUE).getAclString()); + assertEquals(" ", queueManager.getQueueAcl("root.queueA", + QueueACL.SUBMIT_APPLICATIONS).getAclString()); // Queue B ACL - Map aclsB = queueManager.getQueueAcls("root.queueB"); - assertTrue(aclsB.containsKey(QueueACL.ADMINISTER_QUEUE)); - assertEquals("alice,bob admins", aclsB.get(QueueACL.ADMINISTER_QUEUE).getAclString()); + assertEquals("alice,bob admins",queueManager.getQueueAcl("root.queueB", + QueueACL.ADMINISTER_QUEUE).getAclString()); - // Queue c ACL - Map aclsC = queueManager.getQueueAcls("root.queueC"); - assertTrue(aclsC.containsKey(QueueACL.SUBMIT_APPLICATIONS)); - assertEquals("alice,bob admins", aclsC.get(QueueACL.SUBMIT_APPLICATIONS).getAclString()); + // Queue C ACL + assertEquals("alice,bob admins",queueManager.getQueueAcl("root.queueC", + QueueACL.SUBMIT_APPLICATIONS).getAclString()); assertEquals(120000, queueManager.getMinSharePreemptionTimeout("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); @@ -1063,21 +1066,19 @@ public void testBackwardsCompatibleAllocationFileParsing() throws Exception { assertEquals(5, queueManager.getUserMaxApps("user2")); // Unspecified queues should get default ACL - Map aclsA = queueManager.getQueueAcls("queueA"); - assertTrue(aclsA.containsKey(QueueACL.ADMINISTER_QUEUE)); - assertEquals("*", aclsA.get(QueueACL.ADMINISTER_QUEUE).getAclString()); - assertTrue(aclsA.containsKey(QueueACL.SUBMIT_APPLICATIONS)); - assertEquals("*", aclsA.get(QueueACL.SUBMIT_APPLICATIONS).getAclString()); + assertEquals(" ", queueManager.getQueueAcl("root.queueA", + QueueACL.ADMINISTER_QUEUE).getAclString()); + assertEquals(" ", queueManager.getQueueAcl("root.queueA", + QueueACL.SUBMIT_APPLICATIONS).getAclString()); // Queue B ACL - Map aclsB = queueManager.getQueueAcls("root.queueB"); - assertTrue(aclsB.containsKey(QueueACL.ADMINISTER_QUEUE)); - assertEquals("alice,bob admins", aclsB.get(QueueACL.ADMINISTER_QUEUE).getAclString()); + assertEquals("alice,bob admins", queueManager.getQueueAcl("root.queueB", + QueueACL.ADMINISTER_QUEUE).getAclString()); + + // Queue C ACL + assertEquals("alice,bob admins", queueManager.getQueueAcl("root.queueC", + QueueACL.SUBMIT_APPLICATIONS).getAclString()); - // Queue c ACL - Map aclsC = queueManager.getQueueAcls("root.queueC"); - assertTrue(aclsC.containsKey(QueueACL.SUBMIT_APPLICATIONS)); - assertEquals("alice,bob admins", aclsC.get(QueueACL.SUBMIT_APPLICATIONS).getAclString()); assertEquals(120000, queueManager.getMinSharePreemptionTimeout("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); @@ -1664,9 +1665,13 @@ public void testAclSubmitApplication() throws Exception { PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); - out.println(""); - out.println("norealuserhasthisname"); - out.println("norealuserhasthisname"); + out.println(""); + out.println(" "); + out.println(" "); + out.println(" "); + out.println(" norealuserhasthisname"); + out.println(" norealuserhasthisname"); + out.println(" "); out.println(""); out.println(""); out.close(); @@ -1893,9 +1898,13 @@ public void testNotAllowSubmitApplication() throws Exception { PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); - out.println(""); - out.println("userallow"); - out.println("userallow"); + out.println(""); + out.println(" "); + out.println(" "); + out.println(" "); + out.println(" userallow"); + out.println(" userallow"); + out.println(" "); out.println(""); out.println(""); out.close(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm index 7008c207685..01f39cbd5fb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm @@ -221,10 +221,14 @@ Allocation file format for containers, but apps submitted later may run concurrently if there is leftover space on the cluster after satisfying the earlier app's requests. - * aclSubmitApps: a list of users that can submit apps to the queue. A (default) - value of "*" means that any users can submit apps. A queue inherits the ACL of - its parent, so if a queue2 descends from queue1, and user1 is in queue1's ACL, - and user2 is in queue2's ACL, then both users may submit to queue2. + * aclSubmitApps: a list of users and/or groups that can submit apps to the + queue. Refer to the ACLs section below for more info on the format of this + list and how queue ACLs work. + + * aclAdministerApps: a list of users and/or groups that can administer a + queue. Currently the only administrative action is killing an application. + Refer to the ACLs section below for more info on the format of this list + and how queue ACLs work. * minSharePreemptionTimeout: number of seconds the queue is under its minimum share before it will try to preempt containers to take resources from other queues. @@ -246,6 +250,24 @@ Allocation file format An example allocation file is given here: +Queue Access Control Lists (ACLs) + + Queue Access Control Lists (ACLs) allow administrators to control who may + take actions on particular queues. They are configured with the aclSubmitApps + and aclAdministerApps properties, which can be set per queue. Currently the + only supported administrative action is killing an application. Anybody who + may administer a queue may also submit applications to it. These properties + take values in a format like "user1,user2 group1,group2" or " group1,group2". + An action on a queue will be permitted if its user or group is in the ACL of + that queue or in the ACL of any of that queue's ancestors. So if queue2 + is inside queue1, and user1 is in queue1's ACL, and user2 is in queue2's + ACL, then both users may submit to queue2. + + The root queue's ACLs are "*" by default which, because ACLs are passed down, + means that everybody may submit to and kill applications from every queue. + To start restricting access, change the root queue's ACLs to something other + than "*". + --- @@ -256,6 +278,7 @@ Allocation file format 2.0 fair + charlie 5000 mb,0vcores From 881d1d5c7e2810ed001fe4ca5dfdc375599d37cf Mon Sep 17 00:00:00 2001 From: Sanford Ryza Date: Mon, 21 Oct 2013 18:37:32 +0000 Subject: [PATCH 02/11] Reverting "YARN-1258: Move to 2.2.1 in CHANGES.txt" because it contained unintended changes git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1534308 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 4 +- .../resourcemanager/scheduler/Queue.java | 8 +++ .../scheduler/capacity/LeafQueue.java | 5 ++ .../scheduler/capacity/ParentQueue.java | 5 ++ .../scheduler/fair/FSLeafQueue.java | 5 +- .../scheduler/fair/FSQueue.java | 11 ++- .../scheduler/fair/QueueManager.java | 37 ++++++---- .../scheduler/fifo/FifoScheduler.java | 1 + .../scheduler/fair/TestFairScheduler.java | 69 ++++++++----------- .../src/site/apt/FairScheduler.apt.vm | 31 ++------- 10 files changed, 93 insertions(+), 83 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index f4d7e72529c..430734b3570 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -61,6 +61,8 @@ Release 2.3.0 - UNRELEASED YARN-976. Document the meaning of a virtual core. (Sandy Ryza) + YARN-1258. Allow configuring the Fair Scheduler root queue (Sandy Ryza) + YARN-1182. MiniYARNCluster creates and inits the RM/NM only on start() (Karthik Kambatla via Sandy Ryza) @@ -93,8 +95,6 @@ Release 2.2.1 - UNRELEASED YARN-305. Fair scheduler logs too many "Node offered to app" messages. (Lohit Vijayarenu via Sandy Ryza) - YARN-1258. Allow configuring the Fair Scheduler root queue (Sandy Ryza) - OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java index 0380d323824..2c9e7ad3680 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java @@ -19,10 +19,12 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler; import java.util.List; +import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; @@ -42,6 +44,12 @@ public interface Queue { */ QueueMetrics getMetrics(); + /** + * Get ACLs for the queue. + * @return ACLs for the queue + */ + public Map getQueueAcls(); + /** * Get queue information * @param includeChildQueues include child queues? diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index d82e6737797..a09ea616c2c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -526,6 +526,11 @@ public synchronized float getUserLimitFactor() { return userLimitFactor; } + @Override + public synchronized Map getQueueAcls() { + return new HashMap(acls); + } + @Override public synchronized QueueInfo getQueueInfo( boolean includeChildQueues, boolean recursive) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java index 5ca953dc9ab..9a450069208 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java @@ -299,6 +299,11 @@ public synchronized QueueState getState() { return state; } + @Override + public synchronized Map getQueueAcls() { + return new HashMap(acls); + } + @Override public synchronized QueueInfo getQueueInfo( boolean includeChildQueues, boolean recursive) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java index 2da754c03c6..e0caed77f86 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java @@ -24,12 +24,14 @@ import java.util.Comparator; import java.util.Iterator; import java.util.List; +import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; import org.apache.hadoop.yarn.api.records.Resource; @@ -175,7 +177,8 @@ public List getQueueUserAclInfo(UserGroupInformation user) { recordFactory.newRecordInstance(QueueUserACLInfo.class); List operations = new ArrayList(); for (QueueACL operation : QueueACL.values()) { - if (hasAccess(operation, user)) { + Map acls = queueMgr.getQueueAcls(getName()); + if (acls.get(operation).isUserAllowed(user)) { operations.add(operation); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java index 9f3c4c97c5d..9cb0463a56e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java @@ -20,10 +20,13 @@ import java.util.ArrayList; import java.util.Collection; +import java.util.HashMap; +import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.api.records.QueueInfo; @@ -132,6 +135,12 @@ public QueueInfo getQueueInfo(boolean includeChildQueues, boolean recursive) { return queueInfo; } + @Override + public Map getQueueAcls() { + Map acls = queueMgr.getQueueAcls(getName()); + return new HashMap(acls); + } + @Override public FSQueueMetrics getMetrics() { return metrics; @@ -145,7 +154,7 @@ public void setFairShare(Resource fairShare) { public boolean hasAccess(QueueACL acl, UserGroupInformation user) { // Check if the leaf-queue allows access - if (queueMgr.getQueueAcl(getName(), acl).isUserAllowed(user)) { + if (queueMgr.getQueueAcls(getName()).get(acl).isUserAllowed(user)) { return true; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java index ca5a9d5b848..8f2fc1e9a1e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java @@ -72,9 +72,6 @@ public class QueueManager { * (this is done to prevent loading a file that hasn't been fully written). */ public static final long ALLOC_RELOAD_WAIT = 5 * 1000; - - private static final AccessControlList EVERYBODY_ACL = new AccessControlList("*"); - private static final AccessControlList NOBODY_ACL = new AccessControlList(" "); private final FairScheduler scheduler; @@ -384,6 +381,15 @@ public void reloadAllocs() throws IOException, ParserConfigurationException, queueMetrics.setMinShare(queue.getMinShare()); queueMetrics.setMaxShare(queue.getMaxShare()); } + + // Root queue should have empty ACLs. As a queue's ACL is the union of + // its ACL and all its parents' ACLs, setting the roots' to empty will + // neither allow nor prohibit more access to its children. + Map rootAcls = + new HashMap(); + rootAcls.put(QueueACL.SUBMIT_APPLICATIONS, new AccessControlList(" ")); + rootAcls.put(QueueACL.ADMINISTER_QUEUE, new AccessControlList(" ")); + queueAcls.put(ROOT_QUEUE, rootAcls); // Create all queus for (String name: queueNamesInAllocFile) { @@ -448,10 +454,10 @@ private void loadQueue(String parentName, Element element, Map policy.initialize(scheduler.getClusterCapacity()); queuePolicies.put(queueName, policy); } else if ("aclSubmitApps".equals(field.getTagName())) { - String text = ((Text)field.getFirstChild()).getData(); + String text = ((Text)field.getFirstChild()).getData().trim(); acls.put(QueueACL.SUBMIT_APPLICATIONS, new AccessControlList(text)); } else if ("aclAdministerApps".equals(field.getTagName())) { - String text = ((Text)field.getFirstChild()).getData(); + String text = ((Text)field.getFirstChild()).getData().trim(); acls.put(QueueACL.ADMINISTER_QUEUE, new AccessControlList(text)); } else if ("queue".endsWith(field.getTagName()) || "pool".equals(field.getTagName())) { @@ -571,16 +577,21 @@ public long getFairSharePreemptionTimeout() { /** * Get the ACLs associated with this queue. If a given ACL is not explicitly - * configured, include the default value for that ACL. The default for the - * root queue is everybody ("*") and the default for all other queues is - * nobody ("") + * configured, include the default value for that ACL. */ - public AccessControlList getQueueAcl(String queue, QueueACL operation) { - Map queueAcls = info.queueAcls.get(queue); - if (queueAcls == null || !queueAcls.containsKey(operation)) { - return (queue.equals(ROOT_QUEUE)) ? EVERYBODY_ACL : NOBODY_ACL; + public Map getQueueAcls(String queue) { + HashMap out = new HashMap(); + Map queueAcl = info.queueAcls.get(queue); + if (queueAcl != null) { + out.putAll(queueAcl); } - return queueAcls.get(operation); + if (!out.containsKey(QueueACL.ADMINISTER_QUEUE)) { + out.put(QueueACL.ADMINISTER_QUEUE, new AccessControlList("*")); + } + if (!out.containsKey(QueueACL.SUBMIT_APPLICATIONS)) { + out.put(QueueACL.SUBMIT_APPLICATIONS, new AccessControlList("*")); + } + return out; } static class QueueManagerInfo { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java index 293811e63b0..ac7c68a5135 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java @@ -156,6 +156,7 @@ public QueueInfo getQueueInfo( return queueInfo; } + @Override public Map getQueueAcls() { Map acls = new HashMap(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index c69b431a4dd..e0b81dc7747 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -865,25 +865,22 @@ public void testAllocationFileParsing() throws Exception { assertEquals(10, queueManager.getUserMaxApps("user1")); assertEquals(5, queueManager.getUserMaxApps("user2")); - // Root should get * ACL - assertEquals("*",queueManager.getQueueAcl("root", - QueueACL.ADMINISTER_QUEUE).getAclString()); - assertEquals("*", queueManager.getQueueAcl("root", - QueueACL.SUBMIT_APPLICATIONS).getAclString()); - // Unspecified queues should get default ACL - assertEquals(" ",queueManager.getQueueAcl("root.queueA", - QueueACL.ADMINISTER_QUEUE).getAclString()); - assertEquals(" ", queueManager.getQueueAcl("root.queueA", - QueueACL.SUBMIT_APPLICATIONS).getAclString()); + Map aclsA = queueManager.getQueueAcls("root.queueA"); + assertTrue(aclsA.containsKey(QueueACL.ADMINISTER_QUEUE)); + assertEquals("*", aclsA.get(QueueACL.ADMINISTER_QUEUE).getAclString()); + assertTrue(aclsA.containsKey(QueueACL.SUBMIT_APPLICATIONS)); + assertEquals("*", aclsA.get(QueueACL.SUBMIT_APPLICATIONS).getAclString()); // Queue B ACL - assertEquals("alice,bob admins",queueManager.getQueueAcl("root.queueB", - QueueACL.ADMINISTER_QUEUE).getAclString()); + Map aclsB = queueManager.getQueueAcls("root.queueB"); + assertTrue(aclsB.containsKey(QueueACL.ADMINISTER_QUEUE)); + assertEquals("alice,bob admins", aclsB.get(QueueACL.ADMINISTER_QUEUE).getAclString()); - // Queue C ACL - assertEquals("alice,bob admins",queueManager.getQueueAcl("root.queueC", - QueueACL.SUBMIT_APPLICATIONS).getAclString()); + // Queue c ACL + Map aclsC = queueManager.getQueueAcls("root.queueC"); + assertTrue(aclsC.containsKey(QueueACL.SUBMIT_APPLICATIONS)); + assertEquals("alice,bob admins", aclsC.get(QueueACL.SUBMIT_APPLICATIONS).getAclString()); assertEquals(120000, queueManager.getMinSharePreemptionTimeout("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); @@ -1066,19 +1063,21 @@ public void testBackwardsCompatibleAllocationFileParsing() throws Exception { assertEquals(5, queueManager.getUserMaxApps("user2")); // Unspecified queues should get default ACL - assertEquals(" ", queueManager.getQueueAcl("root.queueA", - QueueACL.ADMINISTER_QUEUE).getAclString()); - assertEquals(" ", queueManager.getQueueAcl("root.queueA", - QueueACL.SUBMIT_APPLICATIONS).getAclString()); + Map aclsA = queueManager.getQueueAcls("queueA"); + assertTrue(aclsA.containsKey(QueueACL.ADMINISTER_QUEUE)); + assertEquals("*", aclsA.get(QueueACL.ADMINISTER_QUEUE).getAclString()); + assertTrue(aclsA.containsKey(QueueACL.SUBMIT_APPLICATIONS)); + assertEquals("*", aclsA.get(QueueACL.SUBMIT_APPLICATIONS).getAclString()); // Queue B ACL - assertEquals("alice,bob admins", queueManager.getQueueAcl("root.queueB", - QueueACL.ADMINISTER_QUEUE).getAclString()); - - // Queue C ACL - assertEquals("alice,bob admins", queueManager.getQueueAcl("root.queueC", - QueueACL.SUBMIT_APPLICATIONS).getAclString()); + Map aclsB = queueManager.getQueueAcls("root.queueB"); + assertTrue(aclsB.containsKey(QueueACL.ADMINISTER_QUEUE)); + assertEquals("alice,bob admins", aclsB.get(QueueACL.ADMINISTER_QUEUE).getAclString()); + // Queue c ACL + Map aclsC = queueManager.getQueueAcls("root.queueC"); + assertTrue(aclsC.containsKey(QueueACL.SUBMIT_APPLICATIONS)); + assertEquals("alice,bob admins", aclsC.get(QueueACL.SUBMIT_APPLICATIONS).getAclString()); assertEquals(120000, queueManager.getMinSharePreemptionTimeout("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); @@ -1665,13 +1664,9 @@ public void testAclSubmitApplication() throws Exception { PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); - out.println(""); - out.println(" "); - out.println(" "); - out.println(" "); - out.println(" norealuserhasthisname"); - out.println(" norealuserhasthisname"); - out.println(" "); + out.println(""); + out.println("norealuserhasthisname"); + out.println("norealuserhasthisname"); out.println(""); out.println(""); out.close(); @@ -1898,13 +1893,9 @@ public void testNotAllowSubmitApplication() throws Exception { PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); - out.println(""); - out.println(" "); - out.println(" "); - out.println(" "); - out.println(" userallow"); - out.println(" userallow"); - out.println(" "); + out.println(""); + out.println("userallow"); + out.println("userallow"); out.println(""); out.println(""); out.close(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm index 01f39cbd5fb..7008c207685 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm @@ -221,14 +221,10 @@ Allocation file format for containers, but apps submitted later may run concurrently if there is leftover space on the cluster after satisfying the earlier app's requests. - * aclSubmitApps: a list of users and/or groups that can submit apps to the - queue. Refer to the ACLs section below for more info on the format of this - list and how queue ACLs work. - - * aclAdministerApps: a list of users and/or groups that can administer a - queue. Currently the only administrative action is killing an application. - Refer to the ACLs section below for more info on the format of this list - and how queue ACLs work. + * aclSubmitApps: a list of users that can submit apps to the queue. A (default) + value of "*" means that any users can submit apps. A queue inherits the ACL of + its parent, so if a queue2 descends from queue1, and user1 is in queue1's ACL, + and user2 is in queue2's ACL, then both users may submit to queue2. * minSharePreemptionTimeout: number of seconds the queue is under its minimum share before it will try to preempt containers to take resources from other queues. @@ -250,24 +246,6 @@ Allocation file format An example allocation file is given here: -Queue Access Control Lists (ACLs) - - Queue Access Control Lists (ACLs) allow administrators to control who may - take actions on particular queues. They are configured with the aclSubmitApps - and aclAdministerApps properties, which can be set per queue. Currently the - only supported administrative action is killing an application. Anybody who - may administer a queue may also submit applications to it. These properties - take values in a format like "user1,user2 group1,group2" or " group1,group2". - An action on a queue will be permitted if its user or group is in the ACL of - that queue or in the ACL of any of that queue's ancestors. So if queue2 - is inside queue1, and user1 is in queue1's ACL, and user2 is in queue2's - ACL, then both users may submit to queue2. - - The root queue's ACLs are "*" by default which, because ACLs are passed down, - means that everybody may submit to and kill applications from every queue. - To start restricting access, change the root queue's ACLs to something other - than "*". - --- @@ -278,7 +256,6 @@ Queue Access Control Lists (ACLs) 2.0 fair - charlie 5000 mb,0vcores From 65a9e3aa00711fab01628a0b95a4c539c93e5e0d Mon Sep 17 00:00:00 2001 From: Sanford Ryza Date: Mon, 21 Oct 2013 18:39:02 +0000 Subject: [PATCH 03/11] YARN-1258: Move to 2.2.1 in CHANGES.txt git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1534310 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 430734b3570..f4d7e72529c 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -61,8 +61,6 @@ Release 2.3.0 - UNRELEASED YARN-976. Document the meaning of a virtual core. (Sandy Ryza) - YARN-1258. Allow configuring the Fair Scheduler root queue (Sandy Ryza) - YARN-1182. MiniYARNCluster creates and inits the RM/NM only on start() (Karthik Kambatla via Sandy Ryza) @@ -95,6 +93,8 @@ Release 2.2.1 - UNRELEASED YARN-305. Fair scheduler logs too many "Node offered to app" messages. (Lohit Vijayarenu via Sandy Ryza) + YARN-1258. Allow configuring the Fair Scheduler root queue (Sandy Ryza) + OPTIMIZATIONS BUG FIXES From dc523bd18247df232fe814aed7062a116242ab04 Mon Sep 17 00:00:00 2001 From: Sanford Ryza Date: Mon, 21 Oct 2013 18:45:38 +0000 Subject: [PATCH 04/11] YARN-1288. Make Fair Scheduler ACLs more user friendly (Sandy Ryza) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1534315 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 2 + .../resourcemanager/scheduler/Queue.java | 8 --- .../scheduler/capacity/LeafQueue.java | 5 -- .../scheduler/capacity/ParentQueue.java | 5 -- .../scheduler/fair/FSLeafQueue.java | 5 +- .../scheduler/fair/FSQueue.java | 11 +-- .../scheduler/fair/QueueManager.java | 37 ++++------ .../scheduler/fifo/FifoScheduler.java | 1 - .../scheduler/fair/TestFairScheduler.java | 69 +++++++++++-------- .../src/site/apt/FairScheduler.apt.vm | 31 +++++++-- 10 files changed, 83 insertions(+), 91 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index f4d7e72529c..58ff240e4a2 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -95,6 +95,8 @@ Release 2.2.1 - UNRELEASED YARN-1258. Allow configuring the Fair Scheduler root queue (Sandy Ryza) + YARN-1288. Make Fair Scheduler ACLs more user friendly (Sandy Ryza) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java index 2c9e7ad3680..0380d323824 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java @@ -19,12 +19,10 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler; import java.util.List; -import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; @@ -44,12 +42,6 @@ public interface Queue { */ QueueMetrics getMetrics(); - /** - * Get ACLs for the queue. - * @return ACLs for the queue - */ - public Map getQueueAcls(); - /** * Get queue information * @param includeChildQueues include child queues? diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index a09ea616c2c..d82e6737797 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -526,11 +526,6 @@ public synchronized float getUserLimitFactor() { return userLimitFactor; } - @Override - public synchronized Map getQueueAcls() { - return new HashMap(acls); - } - @Override public synchronized QueueInfo getQueueInfo( boolean includeChildQueues, boolean recursive) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java index 9a450069208..5ca953dc9ab 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java @@ -299,11 +299,6 @@ public synchronized QueueState getState() { return state; } - @Override - public synchronized Map getQueueAcls() { - return new HashMap(acls); - } - @Override public synchronized QueueInfo getQueueInfo( boolean includeChildQueues, boolean recursive) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java index e0caed77f86..2da754c03c6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java @@ -24,14 +24,12 @@ import java.util.Comparator; import java.util.Iterator; import java.util.List; -import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; import org.apache.hadoop.yarn.api.records.Resource; @@ -177,8 +175,7 @@ public List getQueueUserAclInfo(UserGroupInformation user) { recordFactory.newRecordInstance(QueueUserACLInfo.class); List operations = new ArrayList(); for (QueueACL operation : QueueACL.values()) { - Map acls = queueMgr.getQueueAcls(getName()); - if (acls.get(operation).isUserAllowed(user)) { + if (hasAccess(operation, user)) { operations.add(operation); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java index 9cb0463a56e..9f3c4c97c5d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java @@ -20,13 +20,10 @@ import java.util.ArrayList; import java.util.Collection; -import java.util.HashMap; -import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.api.records.QueueInfo; @@ -135,12 +132,6 @@ public QueueInfo getQueueInfo(boolean includeChildQueues, boolean recursive) { return queueInfo; } - @Override - public Map getQueueAcls() { - Map acls = queueMgr.getQueueAcls(getName()); - return new HashMap(acls); - } - @Override public FSQueueMetrics getMetrics() { return metrics; @@ -154,7 +145,7 @@ public void setFairShare(Resource fairShare) { public boolean hasAccess(QueueACL acl, UserGroupInformation user) { // Check if the leaf-queue allows access - if (queueMgr.getQueueAcls(getName()).get(acl).isUserAllowed(user)) { + if (queueMgr.getQueueAcl(getName(), acl).isUserAllowed(user)) { return true; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java index 8f2fc1e9a1e..ca5a9d5b848 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java @@ -72,6 +72,9 @@ public class QueueManager { * (this is done to prevent loading a file that hasn't been fully written). */ public static final long ALLOC_RELOAD_WAIT = 5 * 1000; + + private static final AccessControlList EVERYBODY_ACL = new AccessControlList("*"); + private static final AccessControlList NOBODY_ACL = new AccessControlList(" "); private final FairScheduler scheduler; @@ -381,15 +384,6 @@ public void reloadAllocs() throws IOException, ParserConfigurationException, queueMetrics.setMinShare(queue.getMinShare()); queueMetrics.setMaxShare(queue.getMaxShare()); } - - // Root queue should have empty ACLs. As a queue's ACL is the union of - // its ACL and all its parents' ACLs, setting the roots' to empty will - // neither allow nor prohibit more access to its children. - Map rootAcls = - new HashMap(); - rootAcls.put(QueueACL.SUBMIT_APPLICATIONS, new AccessControlList(" ")); - rootAcls.put(QueueACL.ADMINISTER_QUEUE, new AccessControlList(" ")); - queueAcls.put(ROOT_QUEUE, rootAcls); // Create all queus for (String name: queueNamesInAllocFile) { @@ -454,10 +448,10 @@ private void loadQueue(String parentName, Element element, Map policy.initialize(scheduler.getClusterCapacity()); queuePolicies.put(queueName, policy); } else if ("aclSubmitApps".equals(field.getTagName())) { - String text = ((Text)field.getFirstChild()).getData().trim(); + String text = ((Text)field.getFirstChild()).getData(); acls.put(QueueACL.SUBMIT_APPLICATIONS, new AccessControlList(text)); } else if ("aclAdministerApps".equals(field.getTagName())) { - String text = ((Text)field.getFirstChild()).getData().trim(); + String text = ((Text)field.getFirstChild()).getData(); acls.put(QueueACL.ADMINISTER_QUEUE, new AccessControlList(text)); } else if ("queue".endsWith(field.getTagName()) || "pool".equals(field.getTagName())) { @@ -577,21 +571,16 @@ public long getFairSharePreemptionTimeout() { /** * Get the ACLs associated with this queue. If a given ACL is not explicitly - * configured, include the default value for that ACL. + * configured, include the default value for that ACL. The default for the + * root queue is everybody ("*") and the default for all other queues is + * nobody ("") */ - public Map getQueueAcls(String queue) { - HashMap out = new HashMap(); - Map queueAcl = info.queueAcls.get(queue); - if (queueAcl != null) { - out.putAll(queueAcl); + public AccessControlList getQueueAcl(String queue, QueueACL operation) { + Map queueAcls = info.queueAcls.get(queue); + if (queueAcls == null || !queueAcls.containsKey(operation)) { + return (queue.equals(ROOT_QUEUE)) ? EVERYBODY_ACL : NOBODY_ACL; } - if (!out.containsKey(QueueACL.ADMINISTER_QUEUE)) { - out.put(QueueACL.ADMINISTER_QUEUE, new AccessControlList("*")); - } - if (!out.containsKey(QueueACL.SUBMIT_APPLICATIONS)) { - out.put(QueueACL.SUBMIT_APPLICATIONS, new AccessControlList("*")); - } - return out; + return queueAcls.get(operation); } static class QueueManagerInfo { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java index ac7c68a5135..293811e63b0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java @@ -156,7 +156,6 @@ public QueueInfo getQueueInfo( return queueInfo; } - @Override public Map getQueueAcls() { Map acls = new HashMap(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index e0b81dc7747..c69b431a4dd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -865,22 +865,25 @@ public void testAllocationFileParsing() throws Exception { assertEquals(10, queueManager.getUserMaxApps("user1")); assertEquals(5, queueManager.getUserMaxApps("user2")); + // Root should get * ACL + assertEquals("*",queueManager.getQueueAcl("root", + QueueACL.ADMINISTER_QUEUE).getAclString()); + assertEquals("*", queueManager.getQueueAcl("root", + QueueACL.SUBMIT_APPLICATIONS).getAclString()); + // Unspecified queues should get default ACL - Map aclsA = queueManager.getQueueAcls("root.queueA"); - assertTrue(aclsA.containsKey(QueueACL.ADMINISTER_QUEUE)); - assertEquals("*", aclsA.get(QueueACL.ADMINISTER_QUEUE).getAclString()); - assertTrue(aclsA.containsKey(QueueACL.SUBMIT_APPLICATIONS)); - assertEquals("*", aclsA.get(QueueACL.SUBMIT_APPLICATIONS).getAclString()); + assertEquals(" ",queueManager.getQueueAcl("root.queueA", + QueueACL.ADMINISTER_QUEUE).getAclString()); + assertEquals(" ", queueManager.getQueueAcl("root.queueA", + QueueACL.SUBMIT_APPLICATIONS).getAclString()); // Queue B ACL - Map aclsB = queueManager.getQueueAcls("root.queueB"); - assertTrue(aclsB.containsKey(QueueACL.ADMINISTER_QUEUE)); - assertEquals("alice,bob admins", aclsB.get(QueueACL.ADMINISTER_QUEUE).getAclString()); + assertEquals("alice,bob admins",queueManager.getQueueAcl("root.queueB", + QueueACL.ADMINISTER_QUEUE).getAclString()); - // Queue c ACL - Map aclsC = queueManager.getQueueAcls("root.queueC"); - assertTrue(aclsC.containsKey(QueueACL.SUBMIT_APPLICATIONS)); - assertEquals("alice,bob admins", aclsC.get(QueueACL.SUBMIT_APPLICATIONS).getAclString()); + // Queue C ACL + assertEquals("alice,bob admins",queueManager.getQueueAcl("root.queueC", + QueueACL.SUBMIT_APPLICATIONS).getAclString()); assertEquals(120000, queueManager.getMinSharePreemptionTimeout("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); @@ -1063,21 +1066,19 @@ public void testBackwardsCompatibleAllocationFileParsing() throws Exception { assertEquals(5, queueManager.getUserMaxApps("user2")); // Unspecified queues should get default ACL - Map aclsA = queueManager.getQueueAcls("queueA"); - assertTrue(aclsA.containsKey(QueueACL.ADMINISTER_QUEUE)); - assertEquals("*", aclsA.get(QueueACL.ADMINISTER_QUEUE).getAclString()); - assertTrue(aclsA.containsKey(QueueACL.SUBMIT_APPLICATIONS)); - assertEquals("*", aclsA.get(QueueACL.SUBMIT_APPLICATIONS).getAclString()); + assertEquals(" ", queueManager.getQueueAcl("root.queueA", + QueueACL.ADMINISTER_QUEUE).getAclString()); + assertEquals(" ", queueManager.getQueueAcl("root.queueA", + QueueACL.SUBMIT_APPLICATIONS).getAclString()); // Queue B ACL - Map aclsB = queueManager.getQueueAcls("root.queueB"); - assertTrue(aclsB.containsKey(QueueACL.ADMINISTER_QUEUE)); - assertEquals("alice,bob admins", aclsB.get(QueueACL.ADMINISTER_QUEUE).getAclString()); + assertEquals("alice,bob admins", queueManager.getQueueAcl("root.queueB", + QueueACL.ADMINISTER_QUEUE).getAclString()); + + // Queue C ACL + assertEquals("alice,bob admins", queueManager.getQueueAcl("root.queueC", + QueueACL.SUBMIT_APPLICATIONS).getAclString()); - // Queue c ACL - Map aclsC = queueManager.getQueueAcls("root.queueC"); - assertTrue(aclsC.containsKey(QueueACL.SUBMIT_APPLICATIONS)); - assertEquals("alice,bob admins", aclsC.get(QueueACL.SUBMIT_APPLICATIONS).getAclString()); assertEquals(120000, queueManager.getMinSharePreemptionTimeout("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); @@ -1664,9 +1665,13 @@ public void testAclSubmitApplication() throws Exception { PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); - out.println(""); - out.println("norealuserhasthisname"); - out.println("norealuserhasthisname"); + out.println(""); + out.println(" "); + out.println(" "); + out.println(" "); + out.println(" norealuserhasthisname"); + out.println(" norealuserhasthisname"); + out.println(" "); out.println(""); out.println(""); out.close(); @@ -1893,9 +1898,13 @@ public void testNotAllowSubmitApplication() throws Exception { PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); - out.println(""); - out.println("userallow"); - out.println("userallow"); + out.println(""); + out.println(" "); + out.println(" "); + out.println(" "); + out.println(" userallow"); + out.println(" userallow"); + out.println(" "); out.println(""); out.println(""); out.close(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm index 7008c207685..01f39cbd5fb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm @@ -221,10 +221,14 @@ Allocation file format for containers, but apps submitted later may run concurrently if there is leftover space on the cluster after satisfying the earlier app's requests. - * aclSubmitApps: a list of users that can submit apps to the queue. A (default) - value of "*" means that any users can submit apps. A queue inherits the ACL of - its parent, so if a queue2 descends from queue1, and user1 is in queue1's ACL, - and user2 is in queue2's ACL, then both users may submit to queue2. + * aclSubmitApps: a list of users and/or groups that can submit apps to the + queue. Refer to the ACLs section below for more info on the format of this + list and how queue ACLs work. + + * aclAdministerApps: a list of users and/or groups that can administer a + queue. Currently the only administrative action is killing an application. + Refer to the ACLs section below for more info on the format of this list + and how queue ACLs work. * minSharePreemptionTimeout: number of seconds the queue is under its minimum share before it will try to preempt containers to take resources from other queues. @@ -246,6 +250,24 @@ Allocation file format An example allocation file is given here: +Queue Access Control Lists (ACLs) + + Queue Access Control Lists (ACLs) allow administrators to control who may + take actions on particular queues. They are configured with the aclSubmitApps + and aclAdministerApps properties, which can be set per queue. Currently the + only supported administrative action is killing an application. Anybody who + may administer a queue may also submit applications to it. These properties + take values in a format like "user1,user2 group1,group2" or " group1,group2". + An action on a queue will be permitted if its user or group is in the ACL of + that queue or in the ACL of any of that queue's ancestors. So if queue2 + is inside queue1, and user1 is in queue1's ACL, and user2 is in queue2's + ACL, then both users may submit to queue2. + + The root queue's ACLs are "*" by default which, because ACLs are passed down, + means that everybody may submit to and kill applications from every queue. + To start restricting access, change the root queue's ACLs to something other + than "*". + --- @@ -256,6 +278,7 @@ Allocation file format 2.0 fair + charlie 5000 mb,0vcores From fc99f3e8a3950704acd1a067ba20f96053e44e82 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Mon, 21 Oct 2013 20:42:13 +0000 Subject: [PATCH 05/11] HDFS-5382. Implement the UI of browsing filesystems in HTML 5 page. Contributed by Haohui Mai. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1534368 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + hadoop-hdfs-project/hadoop-hdfs/pom.xml | 2 + .../web/resources/DatanodeWebHdfsMethods.java | 11 +- .../src/main/webapps/hdfs/dfshealth.dust.html | 6 +- .../src/main/webapps/hdfs/dfshealth.js | 32 +-- .../hdfs/explorer-block-info.dust.html | 13 ++ .../src/main/webapps/hdfs/explorer.dust.html | 26 +++ .../src/main/webapps/hdfs/explorer.html | 86 +++++++++ .../src/main/webapps/hdfs/explorer.js | 182 ++++++++++++++++++ 9 files changed, 325 insertions(+), 36 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer-block-info.dust.html create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.dust.html create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index e6e096ab122..8944c37584e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -261,6 +261,9 @@ Release 2.3.0 - UNRELEASED HDFS-5379. Update links to datanode information in dfshealth.html. (Haohui Mai via jing9) + HDFS-5382. Implement the UI of browsing filesystems in HTML 5 page. (Haohui + Mai via jing9) + IMPROVEMENTS HDFS-5267. Remove volatile from LightWeightHashSet. (Junping Du via llu) diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 86a3c5ae6a9..4fa7213746b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -550,6 +550,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> src/main/webapps/static/dust-full-2.0.0.min.js src/main/webapps/static/dust-helpers-1.1.1.min.js src/main/webapps/hdfs/dfshealth.dust.html + src/main/webapps/hdfs/explorer-block-info.dust.html + src/main/webapps/hdfs/explorer.dust.html diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java index 973d0916b90..6e8d605d767 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java @@ -413,8 +413,15 @@ private Response get( final long n = length.getValue() != null ? Math.min(length.getValue(), in.getVisibleLength() - offset.getValue()) : in.getVisibleLength() - offset.getValue(); - return Response.ok(new OpenEntity(in, n, dfsclient)).type( - MediaType.APPLICATION_OCTET_STREAM).build(); + + /** + * Allow the Web UI to perform an AJAX request to get the data. + */ + return Response.ok(new OpenEntity(in, n, dfsclient)) + .type(MediaType.APPLICATION_OCTET_STREAM) + .header("Access-Control-Allow-Methods", "GET") + .header("Access-Control-Allow-Origin", "*") + .build(); } case GETFILECHECKSUM: { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.dust.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.dust.html index 9924825ea55..e7bb5a2b123 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.dust.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.dust.html @@ -47,7 +47,7 @@

NameNode '{HostAndPort}' ({State})

-Browse the filesystem NameNode Logs +Browse the filesystem NameNode Logs
@@ -56,7 +56,7 @@

NameNode '{HostAndPort}' ({State})

- Security is {#nnstat}{#SecurityModeEnabled}on{:else}off{/SecurityModeEnabled}{/nnstat}.

+ Security is {#nnstat}{#SecurityEnabled}on{:else}off{/SecurityEnabled}{/nnstat}.

{#nn}{#Safemode}{.}{:else}Safemode is off.{/Safemode}{/nn}

@@ -207,7 +207,7 @@

NameNode '{HostAndPort}' ({State})

{#nn.LiveNodes} - {name} ({xferaddr}) + {name} ({xferaddr}) {lastContact} {adminState} {capacity|fmt_bytes} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js index 852b8618449..a1fea90e3ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js @@ -19,19 +19,6 @@ "use strict"; var data = {}; - function generate_browse_dn_link(info_http_addr, info_https_addr) { - var is_https = window.location.protocol === 'https:'; - var authority = is_https ? info_https_addr : info_http_addr; - - var nn_info_port = window.location.port; - if (nn_info_port === "") { - nn_info_port = is_https ? 443 : 80; - } - - var l = '//' + authority + '/browseDirectory.jsp?dir=%2F&namenodeInfoPort=' + - nn_info_port + '&nnaddr=' + data.nnstat.HostAndPort; - return l; - } function render() { var helpers = { @@ -56,24 +43,7 @@ load_templates(dust, TEMPLATES, function() { dust.render('dfshealth', base.push(data), function(err, out) { - - $('#panel').append(out); - - $('#browse-dir-first').click(function () { - var len = data.nn.LiveNodes.length; - if (len < 1) { - show_err_msg('Cannot browse the DFS since there are no live nodes available.'); - return false; - } - - var dn = data.nn.LiveNodes[Math.floor(Math.random() * len)]; - window.location.href = generate_browse_dn_link(dn.infoAddr, dn.infoSecureAddr); - }); - - $('.browse-dir-links').click(function () { - var http_addr = $(this).attr('info-http-addr'), https_addr = $(this).attr('info-https-addr'); - window.location.href = generate_browse_dn_link(http_addr, https_addr); - }); + $('#panel').html(out); }); }, function () { show_err_msg('Failed to load the page.'); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer-block-info.dust.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer-block-info.dust.html new file mode 100644 index 00000000000..5e42b6881d9 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer-block-info.dust.html @@ -0,0 +1,13 @@ +{#block} +

Block ID: {blockId}

+

Block Pool ID: {blockPoolId}

+

Generation Stamp: {generationStamp}

+

Size: {numBytes}

+{/block} +

Availability: +

    +{#locations} +
  • {hostName}
  • +{/locations} +
+

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.dust.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.dust.html new file mode 100644 index 00000000000..7e45860a403 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.dust.html @@ -0,0 +1,26 @@ + + + + + + + + + + + + + +{#FileStatus} + + + + + + + + + +{/FileStatus} + +
PermissionOwnerGroupSizeReplicationBlock SizeName
{#helper_to_permission/}{owner}{group}{length|fmt_bytes}{replication}{blockSize|fmt_bytes}{pathSuffix}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html new file mode 100644 index 00000000000..ffa0935a777 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html @@ -0,0 +1,86 @@ + + + + + + + Browsing HDFS + + + +
+ + +
+
+
+
+
+
+
+
+ +
+

Hadoop, 2013.

+ + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js new file mode 100644 index 00000000000..72d3c8d0495 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js @@ -0,0 +1,182 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +(function() { + "use strict"; + + // The chunk size of tailing the files, i.e., how many bytes will be shown + // in the preview. + var TAIL_CHUNK_SIZE = 32768; + var helpers = { + 'helper_to_permission': function(chunk, ctx, bodies, params) { + var p = ctx.current().permission; + var dir = ctx.current().type == 'DIRECTORY' ? 'd' : '-'; + var symbols = [ '---', '--x', '-w-', '-wx', 'r--', 'r-x', 'rw-', 'rwx' ]; + var sticky = p > 1000; + + var res = ""; + for (var i = 0; i < 3; ++i) { + res = symbols[(p % 10)] + res; + p = Math.floor(p / 10); + } + + if (sticky) { + var exec = ((parms.perm % 10) & 1) == 1; + res[res.length - 1] = exec ? 't' : 'T'; + } + + chunk.write(dir + res); + return chunk; + } + }; + + var base = dust.makeBase(helpers); + var current_directory = ""; + + function show_err_msg(msg) { + $('#alert-panel-body').html(msg); + $('#alert-panel').show(); + } + + function network_error_handler(url) { + return function (jqxhr, text, err) { + var msg = '

Failed to retreive data from ' + url + ', cause: ' + err + '

'; + if (url.indexOf('/webhdfs/v1') === 0) { + msg += '

WebHDFS might be disabled. WebHDFS is required to browse the filesystem.

'; + } + show_err_msg(msg); + }; + } + + function append_path(prefix, s) { + var l = prefix.length; + var p = l > 0 && prefix[l - 1] == '/' ? prefix.substring(0, l - 1) : prefix; + return p + '/' + s; + } + + function get_response(data, type) { + return data[type] !== undefined ? data[type] : null; + } + + function get_response_err_msg(data) { + var msg = data.RemoteException !== undefined ? data.RemoteException.message : ""; + return msg; + } + + function view_file_details(path, abs_path) { + function show_block_info(blocks) { + var menus = $('#file-info-blockinfo-list'); + menus.empty(); + + menus.data("blocks", blocks); + menus.change(function() { + var d = $(this).data('blocks')[$(this).val()]; + if (d === undefined) { + return; + } + + dust.render('block-info', d, function(err, out) { + $('#file-info-blockinfo-body').html(out); + }); + + }); + for (var i = 0; i < blocks.length; ++i) { + var item = $(''); + menus.append(item); + } + menus.change(); + } + + var url = '/webhdfs/v1' + abs_path + '?op=GET_BLOCK_LOCATIONS'; + $.ajax({"url": url, "crossDomain": true}).done(function(data) { + var d = get_response(data, "LocatedBlocks"); + if (d === null) { + show_err_msg(get_response_err_msg(data)); + return; + } + + $('#file-info-tail').hide(); + $('#file-info-title').text("File information - " + path); + + var download_url = '/webhdfs/v1' + abs_path + '/?op=OPEN'; + + $('#file-info-download').attr('href', download_url); + $('#file-info-preview').click(function() { + var offset = d.fileLength - TAIL_CHUNK_SIZE; + var url = offset > 0 ? download_url + '&offset=' + offset : download_url; + $.get(url, function(t) { + $('#file-info-preview-body').val(t); + $('#file-info-tail').show(); + }, "text").error(network_error_handler(url)); + }); + + if (d.fileLength > 0) { + show_block_info(d.locatedBlocks); + $('#file-info-blockinfo-panel').show(); + } else { + $('#file-info-blockinfo-panel').hide(); + } + $('#file-info').modal(); + }).error(network_error_handler(url)); + } + + function browse_directory(dir) { + var url = '/webhdfs/v1' + dir + '?op=LISTSTATUS'; + $.get(url, function(data) { + var d = get_response(data, "FileStatuses"); + if (d === null) { + show_err_msg(get_response_err_msg(data)); + return; + } + + current_directory = dir; + $('#directory').val(dir); + dust.render('explorer', base.push(d), function(err, out) { + $('#panel').html(out); + + $('.explorer-browse-links').click(function() { + var type = $(this).attr('inode-type'); + var path = $(this).attr('inode-path'); + var abs_path = append_path(current_directory, path); + if (type == 'DIRECTORY') { + browse_directory(abs_path); + } else { + view_file_details(path, abs_path); + } + }); + }); + }).error(network_error_handler(url)); + } + + + function init() { + var templates = [ + { 'name': 'explorer', 'url': 'explorer.dust.html'}, + { 'name': 'block-info', 'url': 'explorer-block-info.dust.html'} + ]; + + load_templates(dust, templates, function () { + var b = function() { browse_directory($('#directory').val()); }; + $('#btn-nav-directory').click(b); + browse_directory('/'); + }, function (url, jqxhr, text, err) { + network_error_handler(url)(jqxhr, text, err); + }); + } + + init(); +})(); From 4380e480ce2aec3112458b0f1b838460796c67dc Mon Sep 17 00:00:00 2001 From: Brandon Li Date: Mon, 21 Oct 2013 21:15:20 +0000 Subject: [PATCH 06/11] HDFS-5347. Add HDFS NFS user guide. Contributed by Brandon Li git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1534377 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../src/site/apt/HdfsNfsGateway.apt.vm | 258 ++++++++++++++++++ hadoop-project/src/site/site.xml | 1 + 3 files changed, 261 insertions(+) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 8944c37584e..2b121d2b6ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -422,6 +422,8 @@ Release 2.2.1 - UNRELEASED and range in error message. (Kousuke Saruta via suresh) HDFS-5365. Fix libhdfs compile error on FreeBSD9. (Radim Kolar via cnauroth) + + HDFS-5347. Add HDFS NFS user guide. (brandonli) Release 2.2.0 - 2013-10-13 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm new file mode 100644 index 00000000000..c8de842510d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm @@ -0,0 +1,258 @@ + +~~ Licensed under the Apache License, Version 2.0 (the "License"); +~~ you may not use this file except in compliance with the License. +~~ You may obtain a copy of the License at +~~ +~~ http://www.apache.org/licenses/LICENSE-2.0 +~~ +~~ Unless required by applicable law or agreed to in writing, software +~~ distributed under the License is distributed on an "AS IS" BASIS, +~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +~~ See the License for the specific language governing permissions and +~~ limitations under the License. See accompanying LICENSE file. + + --- + Hadoop Distributed File System-${project.version} - HDFS NFS Gateway + --- + --- + ${maven.build.timestamp} + +HDFS NFS Gateway + + \[ {{{./index.html}Go Back}} \] + +%{toc|section=1|fromDepth=0} + +* {Overview} + + The NFS Gateway supports NFSv3 and allows HDFS to be mounted as part of the client's local file system. + Currently NFS Gateway supports and enables the following usage patterns: + + * Users can browse the HDFS file system through their local file system + on NFSv3 client compatible operating systems. + + * Users can download files from the the HDFS file system on to their + local file system. + + * Users can upload files from their local file system directly to the + HDFS file system. + + * Users can stream data directly to HDFS through the mount point. File + append is supported but random write is not supported. + + The NFS gateway machine needs the same thing to run an HDFS client like Hadoop JAR files, HADOOP_CONF directory. + The NFS gateway can be on the same host as DataNode, NameNode, or any HDFS client. + + +* {Configuration} + + NFS gateway can work with its default settings in most cases. However, it's + strongly recommended for the users to update a few configuration properties based on their use + cases. All the related configuration properties can be added or updated in hdfs-site.xml. + + * If the client mounts the export with access time update allowed, make sure the following + property is not disabled in the configuration file. Only NameNode needs to restart after + this property is changed. On some Unix systems, the user can disable access time update + by mounting the export with "noatime". + +---- + + dfs.access.time.precision + 3600000 + The access time for HDFS file is precise upto this value. + The default value is 1 hour. Setting a value of 0 disables + access times for HDFS. + + +---- + + * Users are expected to update the file dump directory. NFS client often + reorders writes. Sequential writes can arrive at the NFS gateway at random + order. This directory is used to temporarily save out-of-order writes + before writing to HDFS. For each file, the out-of-order writes are dumped after + they are accumulated to exceed certain threshold (e.g., 1MB) in memory. + One needs to make sure the directory has enough + space. For example, if the application uploads 10 files with each having + 100MB, it is recommended for this directory to have roughly 1GB space in case if a + worst-case write reorder happens to every file. Only NFS gateway needs to restart after + this property is updated. + +---- + + dfs.nfs3.dump.dir + /tmp/.hdfs-nfs + +---- + + * By default, the export can be mounted by any client. To better control the access, + users can update the following property. The value string contains machine name and + access privilege, separated by whitespace + characters. Machine name format can be single host, wildcards, and IPv4 networks.The + access privilege uses rw or ro to specify readwrite or readonly access of the machines to exports. If the access + privilege is not provided, the default is read-only. Entries are separated by ";". + For example: "192.168.0.0/22 rw ; host*.example.com ; host1.test.org ro;". Only NFS gateway needs to restart after + this property is updated. + +---- + + dfs.nfs.exports.allowed.hosts + * rw + +---- + + * Customize log settings. To get NFS debug trace, users can edit the log4j.property file + to add the following. Note, debug trace, especially for ONCRPC, can be very verbose. + + To change logging level: + +----------------------------------------------- + log4j.logger.org.apache.hadoop.hdfs.nfs=DEBUG +----------------------------------------------- + + To get more details of ONCRPC requests: + +----------------------------------------------- + log4j.logger.org.apache.hadoop.oncrpc=DEBUG +----------------------------------------------- + + +* {Start and stop NFS gateway service} + + Three daemons are required to provide NFS service: rpcbind (or portmap), mountd and nfsd. + The NFS gateway process has both nfsd and mountd. It shares the HDFS root "/" as the + only export. It is recommended to use the portmap included in NFS gateway package. Even + though NFS gateway works with portmap/rpcbind provide by most Linux distributions, the + package included portmap is needed on some Linux systems such as REHL6.2 due to an + {{{https://bugzilla.redhat.com/show_bug.cgi?id=731542}rpcbind bug}}. More detailed discussions can + be found in {{{https://issues.apache.org/jira/browse/HDFS-4763}HDFS-4763}}. + + [[1]] Stop nfs/rpcbind/portmap services provided by the platform (commands can be different on various Unix platforms): + +------------------------- + service nfs stop + + service rpcbind stop +------------------------- + + + [[2]] Start package included portmap (needs root privileges): + +------------------------- + hadoop portmap + + OR + + hadoop-daemon.sh start portmap +------------------------- + + [[3]] Start mountd and nfsd. + + No root privileges are required for this command. However, ensure that the user starting + the Hadoop cluster and the user starting the NFS gateway are same. + +------------------------- + hadoop nfs3 + + OR + + hadoop-daemon.sh start nfs3 +------------------------- + + Note, if the hadoop-daemon.sh script starts the NFS gateway, its log can be found in the hadoop log folder. + + + [[4]] Stop NFS gateway services. + +------------------------- + hadoop-daemon.sh stop nfs3 + + hadoop-daemon.sh stop portmap +------------------------- + + +* {Verify validity of NFS related services} + + [[1]] Execute the following command to verify if all the services are up and running: + +------------------------- + rpcinfo -p $nfs_server_ip +------------------------- + + You should see output similar to the following: + +------------------------- + program vers proto port + + 100005 1 tcp 4242 mountd + + 100005 2 udp 4242 mountd + + 100005 2 tcp 4242 mountd + + 100000 2 tcp 111 portmapper + + 100000 2 udp 111 portmapper + + 100005 3 udp 4242 mountd + + 100005 1 udp 4242 mountd + + 100003 3 tcp 2049 nfs + + 100005 3 tcp 4242 mountd +------------------------- + + [[2]] Verify if the HDFS namespace is exported and can be mounted. + +------------------------- + showmount -e $nfs_server_ip +------------------------- + + You should see output similar to the following: + +------------------------- + Exports list on $nfs_server_ip : + + / (everyone) +------------------------- + + +* {Mount the export “/”} + + Currently NFS v3 only uses TCP as the transportation protocol. + NLM is not supported so mount option "nolock" is needed. It's recommended to use + hard mount. This is because, even after the client sends all data to + NFS gateway, it may take NFS gateway some extra time to transfer data to HDFS + when writes were reorderd by NFS client Kernel. + + If soft mount has to be used, the user should give it a relatively + long timeout (at least no less than the default timeout on the host) . + + The users can mount the HDFS namespace as shown below: + +------------------------------------------------------------------- + mount -t nfs -o vers=3,proto=tcp,nolock $server:/ $mount_point +------------------------------------------------------------------- + + Then the users can access HDFS as part of the local file system except that, + hard link and random write are not supported yet. + +* {User authentication and mapping} + + NFS gateway in this release uses AUTH_UNIX style authentication. When the user on NFS client + accesses the mount point, NFS client passes the UID to NFS gateway. + NFS gateway does a lookup to find user name from the UID, and then passes the + username to the HDFS along with the HDFS requests. + For example, if the NFS client has current user as "admin", when the user accesses + the mounted directory, NFS gateway will access HDFS as user "admin". To access HDFS + as the user "hdfs", one needs to switch the current user to "hdfs" on the client system + when accessing the mounted directory. + + The system administrator must ensure that the user on NFS client host has the same + name and UID as that on the NFS gateway host. This is usually not a problem if + the same user management system (e.g., LDAP/NIS) is used to create and deploy users on + HDFS nodes and NFS client node. In case the user account is created manually in different hosts, one might need to + modify UID (e.g., do "usermod -u 123 myusername") on either NFS client or NFS gateway host + in order to make it the same on both sides. More technical details of RPC AUTH_UNIX can be found + in {{{http://tools.ietf.org/html/rfc1057}RPC specification}}. + diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml index f6496b85f5e..1399e393f0d 100644 --- a/hadoop-project/src/site/site.xml +++ b/hadoop-project/src/site/site.xml @@ -80,6 +80,7 @@ + From df87ed34f2b18b42cfbc8253bd28431063654e19 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Tue, 22 Oct 2013 00:20:39 +0000 Subject: [PATCH 07/11] HDFS-4885. Improve the verifyBlockPlacement() API in BlockPlacementPolicy. Contributed by Junping Du git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1534426 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../blockmanagement/BlockPlacementPolicy.java | 15 ++-- .../BlockPlacementPolicyDefault.java | 12 +-- .../blockmanagement/BlockPlacementStatus.java | 42 +++++++++++ .../BlockPlacementStatusDefault.java | 44 +++++++++++ .../hdfs/server/namenode/NamenodeFsck.java | 12 +-- .../hadoop/hdfs/server/namenode/TestFsck.java | 75 ++++++++++++++++++- 7 files changed, 182 insertions(+), 21 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatus.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatusDefault.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 2b121d2b6ea..1eb89a5baee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -332,6 +332,9 @@ Release 2.3.0 - UNRELEASED HDFS-4511. Cover package org.apache.hadoop.hdfs.tools with unit test (Andrey Klochkov via jeagles) + HDFS-4885. Improve the verifyBlockPlacement() API in BlockPlacementPolicy. + (Junping Du via szetszwo) + OPTIMIZATIONS HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java index 48f77298dbb..ecf69c89547 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java @@ -95,18 +95,17 @@ DatanodeDescriptor[] chooseTarget(String src, } /** - * Verify that the block is replicated on at least minRacks different racks - * if there is more than minRacks rack in the system. + * Verify if the block's placement meets requirement of placement policy, + * i.e. replicas are placed on no less than minRacks racks in the system. * * @param srcPath the full pathname of the file to be verified * @param lBlk block with locations - * @param minRacks number of racks the block should be replicated to - * @return the difference between the required and the actual number of racks - * the block is replicated to. + * @param numOfReplicas replica number of file to be verified + * @return the result of verification */ - abstract public int verifyBlockPlacement(String srcPath, - LocatedBlock lBlk, - int minRacks); + abstract public BlockPlacementStatus verifyBlockPlacement(String srcPath, + LocatedBlock lBlk, + int numOfReplicas); /** * Decide whether deleting the specified replica of the block still makes * the block conform to the configured block placement policy. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java index 7fb4cf01f7d..bbe3f3a5ecf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java @@ -655,22 +655,22 @@ private DatanodeDescriptor[] getPipeline(Node writer, } @Override - public int verifyBlockPlacement(String srcPath, - LocatedBlock lBlk, - int minRacks) { + public BlockPlacementStatus verifyBlockPlacement(String srcPath, + LocatedBlock lBlk, int numberOfReplicas) { DatanodeInfo[] locs = lBlk.getLocations(); if (locs == null) locs = DatanodeDescriptor.EMPTY_ARRAY; int numRacks = clusterMap.getNumOfRacks(); if(numRacks <= 1) // only one rack - return 0; - minRacks = Math.min(minRacks, numRacks); + return new BlockPlacementStatusDefault( + Math.min(numRacks, numberOfReplicas), numRacks); + int minRacks = Math.min(2, numberOfReplicas); // 1. Check that all locations are different. // 2. Count locations on different racks. Set racks = new TreeSet(); for (DatanodeInfo dn : locs) racks.add(dn.getNetworkLocation()); - return minRacks - racks.size(); + return new BlockPlacementStatusDefault(racks.size(), minRacks); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatus.java new file mode 100644 index 00000000000..e2ac54a3537 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatus.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.blockmanagement; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public interface BlockPlacementStatus { + + /** + * Boolean value to identify if replicas of this block satisfy requirement of + * placement policy + * @return if replicas satisfy placement policy's requirement + */ + public boolean isPlacementPolicySatisfied(); + + /** + * Get description info for log or printed in case replicas are failed to meet + * requirement of placement policy + * @return description in case replicas are failed to meet requirement of + * placement policy + */ + public String getErrorDescription(); + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatusDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatusDefault.java new file mode 100644 index 00000000000..0b8b9659601 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatusDefault.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.blockmanagement; + +public class BlockPlacementStatusDefault implements BlockPlacementStatus { + + private int requiredRacks = 0; + private int currentRacks = 0; + + public BlockPlacementStatusDefault(int currentRacks, int requiredRacks){ + this.requiredRacks = requiredRacks; + this.currentRacks = currentRacks; + } + + @Override + public boolean isPlacementPolicySatisfied() { + return requiredRacks <= currentRacks; + } + + @Override + public String getErrorDescription() { + if (isPlacementPolicySatisfied()) { + return null; + } + return "Block should be additionally replicated on " + + (requiredRacks - currentRacks) + " more rack(s)."; + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index 14582dccd09..b933387a31b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.net.NetUtils; @@ -374,9 +375,10 @@ void check(String parent, HdfsFileStatus file, Result res) throws IOException { locs.length + " replica(s)."); } // verify block placement policy - int missingRacks = BlockPlacementPolicy.getInstance(conf, null, networktopology). - verifyBlockPlacement(path, lBlk, Math.min(2,targetFileReplication)); - if (missingRacks > 0) { + BlockPlacementStatus blockPlacementStatus = + BlockPlacementPolicy.getInstance(conf, null, networktopology). + verifyBlockPlacement(path, lBlk, targetFileReplication); + if (!blockPlacementStatus.isPlacementPolicySatisfied()) { res.numMisReplicatedBlocks++; misReplicatedPerFile++; if (!showFiles) { @@ -385,9 +387,7 @@ void check(String parent, HdfsFileStatus file, Result res) throws IOException { out.print(path + ": "); } out.println(" Replica placement policy is violated for " + - block + - ". Block should be additionally replicated on " + - missingRacks + " more rack(s)."); + block + ". " + blockPlacementStatus.getErrorDescription()); } report.append(i + ". " + blkName + " len=" + block.getNumBytes()); if (locs.length == 0) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index 9aaeb74a1c3..bcebce4e201 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -83,7 +83,6 @@ import org.junit.Test; import com.google.common.collect.Sets; -import org.mockito.Mockito; import static org.mockito.Mockito.*; /** @@ -892,6 +891,80 @@ public void testFsckMissingReplicas() throws IOException { } } } + + /** + * Tests that the # of misreplaced replicas is correct + * @throws IOException + */ + @Test + public void testFsckMisPlacedReplicas() throws IOException { + // Desired replication factor + final short REPL_FACTOR = 2; + // Number of replicas to actually start + short NUM_DN = 2; + // Number of blocks to write + final short NUM_BLOCKS = 3; + // Set a small-ish blocksize + final long blockSize = 512; + + String [] racks = {"/rack1", "/rack1"}; + String [] hosts = {"host1", "host2"}; + + Configuration conf = new Configuration(); + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); + + MiniDFSCluster cluster = null; + DistributedFileSystem dfs = null; + + try { + // Startup a minicluster + cluster = + new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts) + .racks(racks).build(); + assertNotNull("Failed Cluster Creation", cluster); + cluster.waitClusterUp(); + dfs = (DistributedFileSystem) cluster.getFileSystem(); + assertNotNull("Failed to get FileSystem", dfs); + + // Create a file that will be intentionally under-replicated + final String pathString = new String("/testfile"); + final Path path = new Path(pathString); + long fileLen = blockSize * NUM_BLOCKS; + DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1); + + // Create an under-replicated file + NameNode namenode = cluster.getNameNode(); + NetworkTopology nettop = cluster.getNamesystem().getBlockManager() + .getDatanodeManager().getNetworkTopology(); + // Add a new node on different rack, so previous blocks' replicas + // are considered to be misplaced + nettop.add(DFSTestUtil.getDatanodeDescriptor("/rack2", "/host3")); + NUM_DN++; + + Map pmap = new HashMap(); + Writer result = new StringWriter(); + PrintWriter out = new PrintWriter(result, true); + InetAddress remoteAddress = InetAddress.getLocalHost(); + NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, + NUM_DN, (short)REPL_FACTOR, remoteAddress); + + // Run the fsck and check the Result + final HdfsFileStatus file = + namenode.getRpcServer().getFileInfo(pathString); + assertNotNull(file); + Result res = new Result(conf); + fsck.check(pathString, file, res); + // check misReplicatedBlock number. + assertEquals(res.numMisReplicatedBlocks, NUM_BLOCKS); + } finally { + if(dfs != null) { + dfs.close(); + } + if(cluster != null) { + cluster.shutdown(); + } + } + } /** Test fsck with FileNotFound */ @Test From 8ad510bb93790fdb7ea8ff16aa6dcff44e4f3bea Mon Sep 17 00:00:00 2001 From: Jonathan Turner Eagles Date: Tue, 22 Oct 2013 03:41:49 +0000 Subject: [PATCH 08/11] HADOOP-9291. enhance unit-test coverage of package o.a.h.metrics2 (Ivan A. Veselovsky via jeagles) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1534474 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 + .../filter/AbstractPatternFilter.java | 2 +- .../apache/hadoop/metrics2/package-info.java | 2 +- .../metrics2/filter/TestPatternFilter.java | 63 ++++++-- .../hadoop/metrics2/sink/TestFileSink.java | 138 ++++++++++++++++++ 5 files changed, 196 insertions(+), 12 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestFileSink.java diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 19e0c441964..204715631af 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -366,6 +366,9 @@ Release 2.3.0 - UNRELEASED HDFS-5276. FileSystem.Statistics should use thread-local counters to avoid multi-threaded performance issues on read/write. (Colin Patrick McCabe) + HADOOP-9291. enhance unit-test coverage of package o.a.h.metrics2 (Ivan A. + Veselovsky via jeagles) + OPTIMIZATIONS HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/filter/AbstractPatternFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/filter/AbstractPatternFilter.java index 1f779735a59..07b50ab9774 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/filter/AbstractPatternFilter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/filter/AbstractPatternFilter.java @@ -112,7 +112,7 @@ public boolean accepts(MetricsTag tag) { return false; } // Reject if no match in whitelist only mode - if (ipat != null && epat == null) { + if (!includeTagPatterns.isEmpty() && excludeTagPatterns.isEmpty()) { return false; } return true; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/package-info.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/package-info.java index 2f787d04492..be2149977cd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/package-info.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/package-info.java @@ -234,7 +234,7 @@ identify a particular sink instance. The asterisk (*) can be patterns.

Similarly, you can specify the record.filter and - metrics.filter options, which operate at record and metric + metric.filter options, which operate at record and metric level, respectively. Filters can be combined to optimize the filtering efficiency.

diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/filter/TestPatternFilter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/filter/TestPatternFilter.java index 2bdfdb978a9..a8f38d6136b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/filter/TestPatternFilter.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/filter/TestPatternFilter.java @@ -23,9 +23,11 @@ import org.apache.commons.configuration.SubsetConfiguration; import org.junit.Test; + import static org.junit.Assert.*; import static org.mockito.Mockito.*; +import org.apache.hadoop.metrics2.MetricsFilter; import org.apache.hadoop.metrics2.MetricsRecord; import org.apache.hadoop.metrics2.MetricsTag; import org.apache.hadoop.metrics2.impl.ConfigBuilder; @@ -53,7 +55,7 @@ public class TestPatternFilter { .add("p.include.tags", "foo:f").subset("p"); shouldAccept(wl, "foo"); shouldAccept(wl, Arrays.asList(tag("bar", "", ""), - tag("foo", "", "f"))); + tag("foo", "", "f")), new boolean[] {false, true}); shouldAccept(wl, mockMetricsRecord("foo", Arrays.asList( tag("bar", "", ""), tag("foo", "", "f")))); shouldReject(wl, "bar"); @@ -78,7 +80,7 @@ public class TestPatternFilter { tag("bar", "", "")))); shouldReject(bl, "foo"); shouldReject(bl, Arrays.asList(tag("bar", "", ""), - tag("foo", "", "f"))); + tag("foo", "", "f")), new boolean[] {true, false}); shouldReject(bl, mockMetricsRecord("foo", Arrays.asList( tag("bar", "", "")))); shouldReject(bl, mockMetricsRecord("bar", Arrays.asList( @@ -125,15 +127,61 @@ public class TestPatternFilter { shouldAccept(c, mockMetricsRecord("foo", Arrays.asList( tag("foo", "", "f")))); } - + static void shouldAccept(SubsetConfiguration conf, String s) { assertTrue("accepts "+ s, newGlobFilter(conf).accepts(s)); assertTrue("accepts "+ s, newRegexFilter(conf).accepts(s)); } + // Version for one tag: static void shouldAccept(SubsetConfiguration conf, List tags) { - assertTrue("accepts "+ tags, newGlobFilter(conf).accepts(tags)); - assertTrue("accepts "+ tags, newRegexFilter(conf).accepts(tags)); + shouldAcceptImpl(true, conf, tags, new boolean[] {true}); + } + // Version for multiple tags: + static void shouldAccept(SubsetConfiguration conf, List tags, + boolean[] expectedAcceptedSpec) { + shouldAcceptImpl(true, conf, tags, expectedAcceptedSpec); + } + + // Version for one tag: + static void shouldReject(SubsetConfiguration conf, List tags) { + shouldAcceptImpl(false, conf, tags, new boolean[] {false}); + } + // Version for multiple tags: + static void shouldReject(SubsetConfiguration conf, List tags, + boolean[] expectedAcceptedSpec) { + shouldAcceptImpl(false, conf, tags, expectedAcceptedSpec); + } + + private static void shouldAcceptImpl(final boolean expectAcceptList, + SubsetConfiguration conf, List tags, boolean[] expectedAcceptedSpec) { + final MetricsFilter globFilter = newGlobFilter(conf); + final MetricsFilter regexFilter = newRegexFilter(conf); + + // Test acceptance of the tag list: + assertEquals("accepts "+ tags, expectAcceptList, globFilter.accepts(tags)); + assertEquals("accepts "+ tags, expectAcceptList, regexFilter.accepts(tags)); + + // Test results on each of the individual tags: + int acceptedCount = 0; + for (int i=0; i 0); + } else { + // At least one individual tag should be rejected: + assertTrue("No tag of the following rejected: " + tags, acceptedCount < tags.size()); + } } /** @@ -152,11 +200,6 @@ static void shouldReject(SubsetConfiguration conf, String s) { assertTrue("rejects "+ s, !newRegexFilter(conf).accepts(s)); } - static void shouldReject(SubsetConfiguration conf, List tags) { - assertTrue("rejects "+ tags, !newGlobFilter(conf).accepts(tags)); - assertTrue("rejects "+ tags, !newRegexFilter(conf).accepts(tags)); - } - /** * Asserts that filters with the given configuration reject the given record. * diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestFileSink.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestFileSink.java new file mode 100644 index 00000000000..8c918b8431b --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestFileSink.java @@ -0,0 +1,138 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.metrics2.sink; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.regex.Pattern; + +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.metrics2.MetricsSystem; +import org.apache.hadoop.metrics2.annotation.Metric; +import org.apache.hadoop.metrics2.annotation.Metrics; +import org.apache.hadoop.metrics2.annotation.Metric.Type; +import org.apache.hadoop.metrics2.impl.ConfigBuilder; +import org.apache.hadoop.metrics2.impl.MetricsSystemImpl; +import org.apache.hadoop.metrics2.impl.TestMetricsConfig; +import org.apache.hadoop.metrics2.lib.MutableGaugeInt; +import org.junit.After; +import org.junit.Test; +import static org.junit.Assert.*; + +public class TestFileSink { + + private File outFile; + + // The 2 sample metric classes: + @Metrics(name="testRecord1", context="test1") + static class MyMetrics1 { + @Metric(value={"testTag1", ""}, type=Type.TAG) + String testTag1() { return "testTagValue1"; } + + @Metric(value={"testTag2", ""}, type=Type.TAG) + String gettestTag2() { return "testTagValue2"; } + + @Metric(value={"testMetric1", "An integer gauge"},always=true) + MutableGaugeInt testMetric1; + + @Metric(value={"testMetric2", "An integer gauge"},always=true) + MutableGaugeInt testMetric2; + + public MyMetrics1 registerWith(MetricsSystem ms) { + return ms.register("m1", null, this); + } + } + + @Metrics(name="testRecord2", context="test1") + static class MyMetrics2 { + @Metric(value={"testTag22", ""}, type=Type.TAG) + String testTag1() { return "testTagValue22"; } + + public MyMetrics2 registerWith(MetricsSystem ms) { + return ms.register("m2", null, this); + } + } + + private File getTestTempFile(String prefix, String suffix) throws IOException { + String tmpPath = System.getProperty("java.io.tmpdir", "/tmp"); + String user = System.getProperty("user.name", "unknown-user"); + File dir = new File(tmpPath + "/" + user); + dir.mkdirs(); + return File.createTempFile(prefix, suffix, dir); + } + + @Test(timeout=6000) + public void testFileSink() throws IOException { + outFile = getTestTempFile("test-file-sink-", ".out"); + final String outPath = outFile.getAbsolutePath(); + + // NB: specify large period to avoid multiple metrics snapshotting: + new ConfigBuilder().add("*.period", 10000) + .add("test.sink.mysink0.class", FileSink.class.getName()) + .add("test.sink.mysink0.filename", outPath) + // NB: we filter by context to exclude "metricssystem" context metrics: + .add("test.sink.mysink0.context", "test1") + .save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test")); + MetricsSystemImpl ms = new MetricsSystemImpl("test"); + ms.start(); + + final MyMetrics1 mm1 + = new MyMetrics1().registerWith(ms); + new MyMetrics2().registerWith(ms); + + mm1.testMetric1.incr(); + mm1.testMetric2.incr(2); + + ms.publishMetricsNow(); // publish the metrics + ms.stop(); + ms.shutdown(); + + InputStream is = new FileInputStream(outFile); + ByteArrayOutputStream baos = new ByteArrayOutputStream((int)outFile.length()); + IOUtils.copyBytes(is, baos, 1024, true); + String outFileContent = new String(baos.toByteArray(), "UTF-8"); + + // Check the out file content. Should be something like the following: + //1360244820087 test1.testRecord1: Context=test1, testTag1=testTagValue1, testTag2=testTagValue2, Hostname=myhost, testMetric1=1, testMetric2=2 + //1360244820089 test1.testRecord2: Context=test1, testTag22=testTagValue22, Hostname=myhost + + // Note that in the below expression we allow tags and metrics to go in arbitrary order. + Pattern expectedContentPattern = Pattern.compile( + // line #1: + "^\\d+\\s+test1.testRecord1:\\s+Context=test1,\\s+" + + "(testTag1=testTagValue1,\\s+testTag2=testTagValue2|testTag2=testTagValue2,\\s+testTag1=testTagValue1)," + + "\\s+Hostname=.*,\\s+(testMetric1=1,\\s+testMetric2=2|testMetric2=2,\\s+testMetric1=1)" + + // line #2: + "$[\\n\\r]*^\\d+\\s+test1.testRecord2:\\s+Context=test1," + + "\\s+testTag22=testTagValue22,\\s+Hostname=.*$[\\n\\r]*", + Pattern.MULTILINE); + assertTrue(expectedContentPattern.matcher(outFileContent).matches()); + } + + @After + public void after() { + if (outFile != null) { + outFile.delete(); + assertTrue(!outFile.exists()); + } + } +} From 725e0133596919cffed7263c475ff0c29d1591cd Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Tue, 22 Oct 2013 05:38:42 +0000 Subject: [PATCH 09/11] YARN-1331. yarn.cmd exits with NoClassDefFoundError trying to run rmadmin or logs. Contributed by Chris Nauroth. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1534499 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 +++ hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd | 5 ++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 58ff240e4a2..f69ee6bf57b 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -125,6 +125,9 @@ Release 2.2.1 - UNRELEASED YARN-1185. Fixed FileSystemRMStateStore to not leave partial files that prevent subsequent ResourceManager recovery. (Omkar Vinit Joshi via vinodkv) + YARN-1331. yarn.cmd exits with NoClassDefFoundError trying to run rmadmin or + logs. (cnauroth) + Release 2.2.0 - 2013-10-13 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd index 955df46245b..bf52c63b6c8 100644 --- a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd +++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd @@ -149,7 +149,7 @@ goto :eof goto :eof :rmadmin - set CLASS=org.apache.hadoop.yarn.server.resourcemanager.tools.RMAdmin + set CLASS=org.apache.hadoop.yarn.client.cli.RMAdminCLI set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% goto :eof @@ -200,7 +200,7 @@ goto :eof goto :eof :logs - set CLASS=org.apache.hadoop.yarn.logaggregation.LogDumper + set CLASS=org.apache.hadoop.yarn.client.cli.LogsCLI set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% goto :eof @@ -237,7 +237,6 @@ goto :eof @echo where COMMAND is one of: @echo resourcemanager run the ResourceManager @echo nodemanager run a nodemanager on each slave - @echo historyserver run job history servers as a standalone daemon @echo rmadmin admin tools @echo version print the version @echo jar ^ run a jar file From 755def8fe9bc3174154a03d60ef6303ee209a5e2 Mon Sep 17 00:00:00 2001 From: Sanford Ryza Date: Tue, 22 Oct 2013 06:22:46 +0000 Subject: [PATCH 10/11] YARN-1315. TestQueueACLs should also test FairScheduler (Sandy Ryza) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1534508 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 2 + .../fair/FairSchedulerConfiguration.java | 6 +- ...tQueueACLs.java => QueueACLsTestBase.java} | 90 ++++++------------- .../TestCapacitySchedulerQueueACLs.java | 73 +++++++++++++++ .../fair/TestFairSchedulerQueueACLs.java | 62 +++++++++++++ 5 files changed, 168 insertions(+), 65 deletions(-) rename hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/{TestQueueACLs.java => QueueACLsTestBase.java} (74%) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerQueueACLs.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerQueueACLs.java diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index f69ee6bf57b..e1b7dca708e 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -97,6 +97,8 @@ Release 2.2.1 - UNRELEASED YARN-1288. Make Fair Scheduler ACLs more user friendly (Sandy Ryza) + YARN-1315. TestQueueACLs should also test FairScheduler (Sandy Ryza) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java index b76d8eb8d75..955b102fee4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java @@ -50,7 +50,7 @@ public class FairSchedulerConfiguration extends Configuration { private static final String CONF_PREFIX = "yarn.scheduler.fair."; - protected static final String ALLOCATION_FILE = CONF_PREFIX + "allocation.file"; + public static final String ALLOCATION_FILE = CONF_PREFIX + "allocation.file"; protected static final String DEFAULT_ALLOCATION_FILE = "fair-scheduler.xml"; protected static final String EVENT_LOG_DIR = "eventlog.dir"; @@ -113,6 +113,10 @@ public class FairSchedulerConfiguration extends Configuration { protected static final String MAX_ASSIGN = CONF_PREFIX + "max.assign"; protected static final int DEFAULT_MAX_ASSIGN = -1; + public FairSchedulerConfiguration() { + super(); + } + public FairSchedulerConfiguration(Configuration conf) { super(conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestQueueACLs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/QueueACLsTestBase.java similarity index 74% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestQueueACLs.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/QueueACLsTestBase.java index 9714b4c7ce3..4760dba4455 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestQueueACLs.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/QueueACLsTestBase.java @@ -42,41 +42,43 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; -import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; import org.apache.hadoop.yarn.server.utils.BuilderUtils; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.After; +import org.junit.Before; import org.junit.Test; -public class TestQueueACLs { +public abstract class QueueACLsTestBase { - private static final String COMMON_USER = "common_user"; - private static final String QUEUE_A_USER = "queueA_user"; - private static final String QUEUE_B_USER = "queueB_user"; - private static final String ROOT_ADMIN = "root_admin"; - private static final String QUEUE_A_ADMIN = "queueA_admin"; - private static final String QUEUE_B_ADMIN = "queueB_admin"; + protected static final String COMMON_USER = "common_user"; + protected static final String QUEUE_A_USER = "queueA_user"; + protected static final String QUEUE_B_USER = "queueB_user"; + protected static final String ROOT_ADMIN = "root_admin"; + protected static final String QUEUE_A_ADMIN = "queueA_admin"; + protected static final String QUEUE_B_ADMIN = "queueB_admin"; - private static final String QUEUEA = "queueA"; - private static final String QUEUEB = "queueB"; + protected static final String QUEUEA = "queueA"; + protected static final String QUEUEB = "queueB"; private static final Log LOG = LogFactory.getLog(TestApplicationACLs.class); - static MockRM resourceManager; - static Configuration conf = createConfiguration(); - final static YarnRPC rpc = YarnRPC.create(conf); - final static InetSocketAddress rmAddress = conf.getSocketAddr( - YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS, - YarnConfiguration.DEFAULT_RM_PORT); + MockRM resourceManager; + Configuration conf; + YarnRPC rpc; + InetSocketAddress rmAddress; - @BeforeClass - public static void setup() throws InterruptedException, IOException { + @Before + public void setup() throws InterruptedException, IOException { + conf = createConfiguration(); + rpc = YarnRPC.create(conf); + rmAddress = conf.getSocketAddr( + YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS, + YarnConfiguration.DEFAULT_RM_PORT); + AccessControlList adminACL = new AccessControlList(""); conf.set(YarnConfiguration.YARN_ADMIN_ACL, adminACL.getAclString()); @@ -109,8 +111,8 @@ public void run() { } } - @AfterClass - public static void tearDown() { + @After + public void tearDown() { if (resourceManager != null) { resourceManager.stop(); } @@ -262,45 +264,5 @@ public ApplicationClientProtocol run() throws Exception { return userClient; } - private static YarnConfiguration createConfiguration() { - CapacitySchedulerConfiguration csConf = - new CapacitySchedulerConfiguration(); - csConf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] { - QUEUEA, QUEUEB }); - - csConf.setCapacity(CapacitySchedulerConfiguration.ROOT + "." + QUEUEA, 50f); - csConf.setCapacity(CapacitySchedulerConfiguration.ROOT + "." + QUEUEB, 50f); - - Map aclsOnQueueA = - new HashMap(); - AccessControlList submitACLonQueueA = new AccessControlList(QUEUE_A_USER); - submitACLonQueueA.addUser(COMMON_USER); - AccessControlList adminACLonQueueA = new AccessControlList(QUEUE_A_ADMIN); - aclsOnQueueA.put(QueueACL.SUBMIT_APPLICATIONS, submitACLonQueueA); - aclsOnQueueA.put(QueueACL.ADMINISTER_QUEUE, adminACLonQueueA); - csConf.setAcls(CapacitySchedulerConfiguration.ROOT + "." + QUEUEA, - aclsOnQueueA); - - Map aclsOnQueueB = - new HashMap(); - AccessControlList submitACLonQueueB = new AccessControlList(QUEUE_B_USER); - submitACLonQueueB.addUser(COMMON_USER); - AccessControlList adminACLonQueueB = new AccessControlList(QUEUE_B_ADMIN); - aclsOnQueueB.put(QueueACL.SUBMIT_APPLICATIONS, submitACLonQueueB); - aclsOnQueueB.put(QueueACL.ADMINISTER_QUEUE, adminACLonQueueB); - csConf.setAcls(CapacitySchedulerConfiguration.ROOT + "." + QUEUEB, - aclsOnQueueB); - - Map aclsOnRootQueue = - new HashMap(); - AccessControlList submitACLonRoot = new AccessControlList(""); - AccessControlList adminACLonRoot = new AccessControlList(ROOT_ADMIN); - aclsOnRootQueue.put(QueueACL.SUBMIT_APPLICATIONS, submitACLonRoot); - aclsOnRootQueue.put(QueueACL.ADMINISTER_QUEUE, adminACLonRoot); - csConf.setAcls(CapacitySchedulerConfiguration.ROOT, aclsOnRootQueue); - - YarnConfiguration conf = new YarnConfiguration(csConf); - conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true); - return conf; - } + protected abstract Configuration createConfiguration() throws IOException; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerQueueACLs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerQueueACLs.java new file mode 100644 index 00000000000..14ea21b89f8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerQueueACLs.java @@ -0,0 +1,73 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.authorize.AccessControlList; +import org.apache.hadoop.yarn.api.records.QueueACL; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.QueueACLsTestBase; + +public class TestCapacitySchedulerQueueACLs extends QueueACLsTestBase { + @Override + protected Configuration createConfiguration() { + CapacitySchedulerConfiguration csConf = + new CapacitySchedulerConfiguration(); + csConf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] { + QUEUEA, QUEUEB }); + + csConf.setCapacity(CapacitySchedulerConfiguration.ROOT + "." + QUEUEA, 50f); + csConf.setCapacity(CapacitySchedulerConfiguration.ROOT + "." + QUEUEB, 50f); + + Map aclsOnQueueA = + new HashMap(); + AccessControlList submitACLonQueueA = new AccessControlList(QUEUE_A_USER); + submitACLonQueueA.addUser(COMMON_USER); + AccessControlList adminACLonQueueA = new AccessControlList(QUEUE_A_ADMIN); + aclsOnQueueA.put(QueueACL.SUBMIT_APPLICATIONS, submitACLonQueueA); + aclsOnQueueA.put(QueueACL.ADMINISTER_QUEUE, adminACLonQueueA); + csConf.setAcls(CapacitySchedulerConfiguration.ROOT + "." + QUEUEA, + aclsOnQueueA); + + Map aclsOnQueueB = + new HashMap(); + AccessControlList submitACLonQueueB = new AccessControlList(QUEUE_B_USER); + submitACLonQueueB.addUser(COMMON_USER); + AccessControlList adminACLonQueueB = new AccessControlList(QUEUE_B_ADMIN); + aclsOnQueueB.put(QueueACL.SUBMIT_APPLICATIONS, submitACLonQueueB); + aclsOnQueueB.put(QueueACL.ADMINISTER_QUEUE, adminACLonQueueB); + csConf.setAcls(CapacitySchedulerConfiguration.ROOT + "." + QUEUEB, + aclsOnQueueB); + + Map aclsOnRootQueue = + new HashMap(); + AccessControlList submitACLonRoot = new AccessControlList(""); + AccessControlList adminACLonRoot = new AccessControlList(ROOT_ADMIN); + aclsOnRootQueue.put(QueueACL.SUBMIT_APPLICATIONS, submitACLonRoot); + aclsOnRootQueue.put(QueueACL.ADMINISTER_QUEUE, adminACLonRoot); + csConf.setAcls(CapacitySchedulerConfiguration.ROOT, aclsOnRootQueue); + + csConf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true); + csConf.set("yarn.resourcemanager.scheduler.class", CapacityScheduler.class.getName()); + + return csConf; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerQueueACLs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerQueueACLs.java new file mode 100644 index 00000000000..a09cfe8c9c6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerQueueACLs.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; + +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.io.PrintWriter; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.QueueACLsTestBase; + +public class TestFairSchedulerQueueACLs extends QueueACLsTestBase { + @Override + protected Configuration createConfiguration() throws IOException { + FairSchedulerConfiguration fsConf = new FairSchedulerConfiguration(); + + final String TEST_DIR = new File(System.getProperty("test.build.data", + "/tmp")).getAbsolutePath(); + final String ALLOC_FILE = new File(TEST_DIR, "test-queues.xml") + .getAbsolutePath(); + PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); + out.println(""); + out.println(""); + out.println(""); + out.println(" "); + out.println(" root_admin "); + out.println(" "); + out.println(" queueA_user,common_user "); + out.println(" queueA_admin "); + out.println(" "); + out.println(" "); + out.println(" queueB_user,common_user "); + out.println(" queueB_admin "); + out.println(" "); + out.println(""); + out.println(""); + out.close(); + fsConf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); + + fsConf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true); + fsConf.set("yarn.resourcemanager.scheduler.class", FairScheduler.class.getName()); + + return fsConf; + } +} From d45ca49e3d0e267730392bd6ebc7a0d6a159425a Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Tue, 22 Oct 2013 15:10:17 +0000 Subject: [PATCH 11/11] HDFS-5400. DFS_CLIENT_MMAP_CACHE_THREAD_RUNS_PER_TIMEOUT constant is set to the wrong value (Colin Patrick McCabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1534655 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 1eb89a5baee..cfffa11abd0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -375,6 +375,9 @@ Release 2.3.0 - UNRELEASED HDFS-5336. DataNode should not output 'StartupProgress' metrics. (Akira Ajisaka via cnauroth) + HDFS-5400. DFS_CLIENT_MMAP_CACHE_THREAD_RUNS_PER_TIMEOUT constant is set + to the wrong value. (Colin Patrick McCabe) + Release 2.2.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 8d41ae2b2c2..0f9c8244d11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -383,7 +383,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final int DFS_CLIENT_MMAP_CACHE_SIZE_DEFAULT = 1024; public static final String DFS_CLIENT_MMAP_CACHE_TIMEOUT_MS = "dfs.client.mmap.cache.timeout.ms"; public static final long DFS_CLIENT_MMAP_CACHE_TIMEOUT_MS_DEFAULT = 15 * 60 * 1000; - public static final String DFS_CLIENT_MMAP_CACHE_THREAD_RUNS_PER_TIMEOUT = "dfs.client.mmap.cache.timeout.ms"; + public static final String DFS_CLIENT_MMAP_CACHE_THREAD_RUNS_PER_TIMEOUT = "dfs.client.mmap.cache.thread.runs.per.timeout"; public static final int DFS_CLIENT_MMAP_CACHE_THREAD_RUNS_PER_TIMEOUT_DEFAULT = 4; // property for fsimage compression