From 38f94dc16d82e4a63e409d03639fb1ee2e05e06f Mon Sep 17 00:00:00 2001 From: Sanford Ryza Date: Mon, 25 Nov 2013 06:04:26 +0000 Subject: [PATCH 01/27] YARN-1423. Support queue placement by secondary group in the Fair Scheduler (Ted Malaska via Sandy Ryza) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1545157 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 ++ .../scheduler/fair/QueuePlacementPolicy.java | 2 + .../scheduler/fair/QueuePlacementRule.java | 43 ++++++++++++++++--- .../scheduler/fair/SimpleGroupsMapping.java | 2 +- .../scheduler/fair/TestFairScheduler.java | 8 +++- .../src/site/apt/FairScheduler.apt.vm | 4 ++ 6 files changed, 53 insertions(+), 9 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index f20b9e8a588..20c71fc4c16 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -117,6 +117,9 @@ Release 2.3.0 - UNRELEASED YARN-1303. Fixed DistributedShell to not fail with multiple commands separated by a semi-colon as shell-command. (Xuan Gong via vinodkv) + YARN-1423. Support queue placement by secondary group in the Fair Scheduler + (Ted Malaska via Sandy Ryza) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueuePlacementPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueuePlacementPolicy.java index 402b47a0e44..4bf6b613166 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueuePlacementPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueuePlacementPolicy.java @@ -39,6 +39,8 @@ public class QueuePlacementPolicy { new HashMap>(); map.put("user", QueuePlacementRule.User.class); map.put("primaryGroup", QueuePlacementRule.PrimaryGroup.class); + map.put("secondaryGroupExistingQueue", + QueuePlacementRule.SecondaryGroupExistingQueue.class); map.put("specified", QueuePlacementRule.Specified.class); map.put("default", QueuePlacementRule.Default.class); map.put("reject", QueuePlacementRule.Reject.class); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueuePlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueuePlacementRule.java index 95acdcae04f..ac0df509546 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueuePlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueuePlacementRule.java @@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; import java.io.IOException; import java.util.Collection; import java.util.HashMap; +import java.util.List; import java.util.Map; import org.apache.hadoop.security.Groups; @@ -58,7 +59,7 @@ public abstract class QueuePlacementRule { */ public String assignAppToQueue(String requestedQueue, String user, Groups groups, Collection configuredQueues) throws IOException { - String queue = getQueueForApp(requestedQueue, user, groups); + String queue = getQueueForApp(requestedQueue, user, groups, configuredQueues); if (create || configuredQueues.contains(queue)) { return queue; } else { @@ -103,7 +104,7 @@ public abstract class QueuePlacementRule { * continue to the next rule. */ protected abstract String getQueueForApp(String requestedQueue, String user, - Groups groups) throws IOException; + Groups groups, Collection configuredQueues) throws IOException; /** * Places apps in queues by username of the submitter @@ -111,7 +112,7 @@ public abstract class QueuePlacementRule { public static class User extends QueuePlacementRule { @Override protected String getQueueForApp(String requestedQueue, - String user, Groups groups) { + String user, Groups groups, Collection configuredQueues) { return "root." + user; } @@ -127,7 +128,8 @@ public abstract class QueuePlacementRule { public static class PrimaryGroup extends QueuePlacementRule { @Override protected String getQueueForApp(String requestedQueue, - String user, Groups groups) throws IOException { + String user, Groups groups, + Collection configuredQueues) throws IOException { return "root." + groups.getGroups(user).get(0); } @@ -136,6 +138,33 @@ public abstract class QueuePlacementRule { return create; } } + + /** + * Places apps in queues by secondary group of the submitter + * + * Match will be made on first secondary group that exist in + * queues + */ + public static class SecondaryGroupExistingQueue extends QueuePlacementRule { + @Override + protected String getQueueForApp(String requestedQueue, + String user, Groups groups, + Collection configuredQueues) throws IOException { + List groupNames = groups.getGroups(user); + for (int i = 1; i < groupNames.size(); i++) { + if (configuredQueues.contains("root." + groupNames.get(i))) { + return "root." + groupNames.get(i); + } + } + + return ""; + } + + @Override + public boolean isTerminal() { + return create; + } + } /** * Places apps in queues by requested queue of the submitter @@ -143,7 +172,7 @@ public abstract class QueuePlacementRule { public static class Specified extends QueuePlacementRule { @Override protected String getQueueForApp(String requestedQueue, - String user, Groups groups) { + String user, Groups groups, Collection configuredQueues) { if (requestedQueue.equals(YarnConfiguration.DEFAULT_QUEUE_NAME)) { return ""; } else { @@ -166,7 +195,7 @@ public abstract class QueuePlacementRule { public static class Default extends QueuePlacementRule { @Override protected String getQueueForApp(String requestedQueue, String user, - Groups groups) { + Groups groups, Collection configuredQueues) { return "root." + YarnConfiguration.DEFAULT_QUEUE_NAME; } @@ -188,7 +217,7 @@ public abstract class QueuePlacementRule { @Override protected String getQueueForApp(String requestedQueue, String user, - Groups groups) { + Groups groups, Collection configuredQueues) { throw new UnsupportedOperationException(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/SimpleGroupsMapping.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/SimpleGroupsMapping.java index 24dd65d7f5c..47a33d8a19e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/SimpleGroupsMapping.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/SimpleGroupsMapping.java @@ -28,7 +28,7 @@ public class SimpleGroupsMapping implements GroupMappingServiceProvider { @Override public List getGroups(String user) { - return Arrays.asList(user + "group"); + return Arrays.asList(user + "group", user + "subgroup1", user + "subgroup2"); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index cc2e1ccbdcd..14daf33aaf6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -682,8 +682,10 @@ public class TestFairScheduler { rules.add(new QueuePlacementRule.Specified().initialize(true, null)); rules.add(new QueuePlacementRule.User().initialize(false, null)); rules.add(new QueuePlacementRule.PrimaryGroup().initialize(false, null)); + rules.add(new QueuePlacementRule.SecondaryGroupExistingQueue().initialize(false, null)); rules.add(new QueuePlacementRule.Default().initialize(true, null)); - Set queues = Sets.newHashSet("root.user1", "root.user3group"); + Set queues = Sets.newHashSet("root.user1", "root.user3group", + "root.user4subgroup1", "root.user4subgroup2" , "root.user5subgroup2"); scheduler.getQueueManager().placementPolicy = new QueuePlacementPolicy( rules, queues, conf); appId = createSchedulingRequest(1024, "somequeue", "user1"); @@ -692,6 +694,10 @@ public class TestFairScheduler { assertEquals("root.user1", apps.get(appId).getQueueName()); appId = createSchedulingRequest(1024, "default", "user3"); assertEquals("root.user3group", apps.get(appId).getQueueName()); + appId = createSchedulingRequest(1024, "default", "user4"); + assertEquals("root.user4subgroup1", apps.get(appId).getQueueName()); + appId = createSchedulingRequest(1024, "default", "user5"); + assertEquals("root.user5subgroup2", apps.get(appId).getQueueName()); appId = createSchedulingRequest(1024, "default", "otheruser"); assertEquals("root.default", apps.get(appId).getQueueName()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm index d0cd30e5afd..29b9fd31d09 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm @@ -287,6 +287,10 @@ Allocation file format * primaryGroup: the app is placed into a queue with the name of the primary group of the user who submitted it. + * secondaryGroupExistingQueue: the app is placed into a queue with a name + that matches a secondary group of the user who submitted it. The first + secondary group that matches a configured queue will be selected. + * default: the app is placed into the queue named "default". * reject: the app is rejected. From 65ee88b0de5218a07c0f9dbb7416db551584f0a6 Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Mon, 25 Nov 2013 15:33:52 +0000 Subject: [PATCH 02/27] HDFS-5526. Datanode cannot roll back to previous layout version. Contributed by Kihwal Lee. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1545322 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../hdfs/server/datanode/DataStorage.java | 81 ++++++++++++++----- .../apache/hadoop/hdfs/TestDFSRollback.java | 22 ++--- 3 files changed, 76 insertions(+), 29 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 58c2b1fa8ec..ca7fc3500e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -4018,6 +4018,8 @@ Release 0.23.10 - UNRELEASED HDFS-4329. DFSShell issues with directories with spaces in name (Cristina L. Abad via jeagles) + HDFS-5526. Datanode cannot roll back to previous layout version (kihwal) + Release 0.23.9 - 2013-07-08 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java index 9d31ffa673e..f5ee53da5e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java @@ -299,7 +299,16 @@ public class DataStorage extends Storage { @Override protected void setFieldsFromProperties(Properties props, StorageDirectory sd) throws IOException { - setLayoutVersion(props, sd); + setFieldsFromProperties(props, sd, false, 0); + } + + private void setFieldsFromProperties(Properties props, StorageDirectory sd, + boolean overrideLayoutVersion, int toLayoutVersion) throws IOException { + if (overrideLayoutVersion) { + this.layoutVersion = toLayoutVersion; + } else { + setLayoutVersion(props, sd); + } setcTime(props, sd); setStorageType(props, sd); setClusterId(props, layoutVersion, sd); @@ -347,13 +356,20 @@ public class DataStorage extends Storage { return true; } + /** Read VERSION file for rollback */ + void readProperties(StorageDirectory sd, int rollbackLayoutVersion) + throws IOException { + Properties props = readPropertiesFile(sd.getVersionFile()); + setFieldsFromProperties(props, sd, true, rollbackLayoutVersion); + } + /** * Analize which and whether a transition of the fs state is required * and perform it if necessary. * - * Rollback if previousLV >= LAYOUT_VERSION && prevCTime <= namenode.cTime - * Upgrade if this.LV > LAYOUT_VERSION || this.cTime < namenode.cTime - * Regular startup if this.LV = LAYOUT_VERSION && this.cTime = namenode.cTime + * Rollback if the rollback startup option was specified. + * Upgrade if this.LV > LAYOUT_VERSION + * Regular startup if this.LV = LAYOUT_VERSION * * @param datanode Datanode to which this storage belongs to * @param sd storage directory @@ -393,25 +409,28 @@ public class DataStorage extends Storage { + nsInfo.getClusterID() + "; datanode clusterID = " + getClusterID()); } - // regular start up - if (this.layoutVersion == HdfsConstants.LAYOUT_VERSION - && this.cTime == nsInfo.getCTime()) + // After addition of the federation feature, ctime check is only + // meaningful at BlockPoolSliceStorage level. + + // regular start up. + if (this.layoutVersion == HdfsConstants.LAYOUT_VERSION) return; // regular startup // do upgrade - if (this.layoutVersion > HdfsConstants.LAYOUT_VERSION - || this.cTime < nsInfo.getCTime()) { + if (this.layoutVersion > HdfsConstants.LAYOUT_VERSION) { doUpgrade(sd, nsInfo); // upgrade return; } - // layoutVersion == LAYOUT_VERSION && this.cTime > nsInfo.cTime - // must shutdown - throw new IOException("Datanode state: LV = " + this.getLayoutVersion() - + " CTime = " + this.getCTime() - + " is newer than the namespace state: LV = " - + nsInfo.getLayoutVersion() - + " CTime = " + nsInfo.getCTime()); + // layoutVersion < LAYOUT_VERSION. I.e. stored layout version is newer + // than the version supported by datanode. This should have been caught + // in readProperties(), even if rollback was not carried out or somehow + // failed. + throw new IOException("BUG: The stored LV = " + this.getLayoutVersion() + + " is newer than the supported LV = " + + HdfsConstants.LAYOUT_VERSION + + " or name node LV = " + + nsInfo.getLayoutVersion()); } /** @@ -437,8 +456,13 @@ public class DataStorage extends Storage { * @throws IOException on error */ void doUpgrade(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException { + // If the existing on-disk layout version supportes federation, simply + // update its layout version. if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) { - clusterID = nsInfo.getClusterID(); + // The VERSION file is already read in. Override the layoutVersion + // field and overwrite the file. + LOG.info("Updating layout version from " + layoutVersion + " to " + + nsInfo.getLayoutVersion() + " for storage " + sd.getRoot()); layoutVersion = nsInfo.getLayoutVersion(); writeProperties(sd); return; @@ -523,15 +547,32 @@ public class DataStorage extends Storage { *
  • Remove removed.tmp
  • * * - * Do nothing, if previous directory does not exist. + * If previous directory does not exist and the current version supports + * federation, perform a simple rollback of layout version. This does not + * involve saving/restoration of actual data. */ void doRollback( StorageDirectory sd, NamespaceInfo nsInfo ) throws IOException { File prevDir = sd.getPreviousDir(); - // regular startup if previous dir does not exist - if (!prevDir.exists()) + // This is a regular startup or a post-federation rollback + if (!prevDir.exists()) { + // The current datanode version supports federation and the layout + // version from namenode matches what the datanode supports. An invalid + // rollback may happen if namenode didn't rollback and datanode is + // running a wrong version. But this will be detected in block pool + // level and the invalid VERSION content will be overwritten when + // the error is corrected and rollback is retried. + if (LayoutVersion.supports(Feature.FEDERATION, + HdfsConstants.LAYOUT_VERSION) && + HdfsConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion()) { + readProperties(sd, nsInfo.getLayoutVersion()); + writeProperties(sd); + LOG.info("Layout version rolled back to " + + nsInfo.getLayoutVersion() + " for storage " + sd.getRoot()); + } return; + } DataStorage prevInfo = new DataStorage(); prevInfo.readPreviousVersionProperties(sd); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java index fbcce3946ba..771504186c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java @@ -191,21 +191,25 @@ public class TestDFSRollback { // Create a previous snapshot for the blockpool UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "previous", UpgradeUtilities.getCurrentBlockPoolID(cluster)); - // Older LayoutVersion to make it rollback + // Put newer layout version in current. storageInfo = new StorageInfo( - UpgradeUtilities.getCurrentLayoutVersion()+1, + UpgradeUtilities.getCurrentLayoutVersion()-1, UpgradeUtilities.getCurrentNamespaceID(cluster), UpgradeUtilities.getCurrentClusterID(cluster), UpgradeUtilities.getCurrentFsscTime(cluster)); - // Create old VERSION file for each data dir + + // Overwrite VERSION file in the current directory of + // volume directories and block pool slice directories + // with a layout version from future. + File[] dataCurrentDirs = new File[dataNodeDirs.length]; for (int i=0; i Date: Mon, 25 Nov 2013 18:05:36 +0000 Subject: [PATCH 03/27] HDFS-5533. Symlink delete/create should be treated as DELETE/CREATE in snapshot diff report. Contributed by Binglin Chang. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1545357 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../snapshot/INodeDirectoryWithSnapshot.java | 12 +++----- .../snapshot/TestSnapshotDiffReport.java | 29 +++++++++++++++++-- 3 files changed, 33 insertions(+), 11 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index ca7fc3500e6..335c67ae597 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -612,6 +612,9 @@ Release 2.3.0 - UNRELEASED HDFS-5552. Fix wrong information of "Cluster summay" in dfshealth.html. (Haohui Mai via jing9) + HDFS-5533. Symlink delete/create should be treated as DELETE/CREATE in snapshot diff + report. (Binglin Chang via jing9) + Release 2.2.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java index f062048439c..4680d08eaf9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java @@ -185,14 +185,10 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota { INode dnode = deleted.get(d); if (cnode.compareTo(dnode.getLocalNameBytes()) == 0) { fullPath[fullPath.length - 1] = cnode.getLocalNameBytes(); - if (cnode.isSymlink() && dnode.isSymlink()) { - dList.add(new DiffReportEntry(DiffType.MODIFY, fullPath)); - } else { - // must be the case: delete first and then create an inode with the - // same name - cList.add(new DiffReportEntry(DiffType.CREATE, fullPath)); - dList.add(new DiffReportEntry(DiffType.DELETE, fullPath)); - } + // must be the case: delete first and then create an inode with the + // same name + cList.add(new DiffReportEntry(DiffType.CREATE, fullPath)); + dList.add(new DiffReportEntry(DiffType.DELETE, fullPath)); c++; d++; } else if (cnode.compareTo(dnode.getLocalNameBytes()) < 0) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java index 711fcd9ca98..6dfd0effa5a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java @@ -92,12 +92,15 @@ public class TestSnapshotDiffReport { Path file11 = new Path(modifyDir, "file11"); Path file12 = new Path(modifyDir, "file12"); Path file13 = new Path(modifyDir, "file13"); + Path link13 = new Path(modifyDir, "link13"); Path file14 = new Path(modifyDir, "file14"); Path file15 = new Path(modifyDir, "file15"); DFSTestUtil.createFile(hdfs, file10, BLOCKSIZE, REPLICATION_1, seed); DFSTestUtil.createFile(hdfs, file11, BLOCKSIZE, REPLICATION_1, seed); DFSTestUtil.createFile(hdfs, file12, BLOCKSIZE, REPLICATION_1, seed); DFSTestUtil.createFile(hdfs, file13, BLOCKSIZE, REPLICATION_1, seed); + // create link13 + hdfs.createSymlink(file13, link13, false); // create snapshot for (Path snapshotDir : snapshotDirs) { hdfs.allowSnapshot(snapshotDir); @@ -110,6 +113,8 @@ public class TestSnapshotDiffReport { hdfs.setReplication(file12, REPLICATION); // modify file13 hdfs.setReplication(file13, REPLICATION); + // delete link13 + hdfs.delete(link13, false); // create file14 DFSTestUtil.createFile(hdfs, file14, BLOCKSIZE, REPLICATION, seed); // create file15 @@ -126,6 +131,8 @@ public class TestSnapshotDiffReport { hdfs.delete(file12, true); // modify file13 hdfs.setReplication(file13, (short) (REPLICATION - 2)); + // create link13 again + hdfs.createSymlink(file13, link13, false); // delete file14 hdfs.delete(file14, true); // modify file15 @@ -222,7 +229,9 @@ public class TestSnapshotDiffReport { new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("file12")), new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("file11")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file11")), - new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("file13"))); + new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("file13")), + new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("link13")), + new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("link13"))); verifyDiffReport(sub1, "s0", "s5", new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")), @@ -232,6 +241,8 @@ public class TestSnapshotDiffReport { new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("file11")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file11")), new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("file13")), + new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("link13")), + new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("link13")), new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("subsub1/subsubsub1")), new DiffReportEntry(DiffType.CREATE, @@ -240,6 +251,8 @@ public class TestSnapshotDiffReport { DFSUtil.string2Bytes("subsub1/subsubsub1/file11")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsub1/subsubsub1/file13")), + new DiffReportEntry(DiffType.CREATE, + DFSUtil.string2Bytes("subsub1/subsubsub1/link13")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsub1/subsubsub1/file15"))); @@ -253,6 +266,8 @@ public class TestSnapshotDiffReport { DFSUtil.string2Bytes("subsub1/subsubsub1/file11")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsub1/subsubsub1/file13")), + new DiffReportEntry(DiffType.CREATE, + DFSUtil.string2Bytes("subsub1/subsubsub1/link13")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsub1/subsubsub1/file15"))); @@ -270,7 +285,11 @@ public class TestSnapshotDiffReport { new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsub1/subsubsub1/file11")), new DiffReportEntry(DiffType.MODIFY, - DFSUtil.string2Bytes("subsub1/subsubsub1/file13"))); + DFSUtil.string2Bytes("subsub1/subsubsub1/file13")), + new DiffReportEntry(DiffType.CREATE, + DFSUtil.string2Bytes("subsub1/subsubsub1/link13")), + new DiffReportEntry(DiffType.DELETE, + DFSUtil.string2Bytes("subsub1/subsubsub1/link13"))); } /** @@ -300,7 +319,11 @@ public class TestSnapshotDiffReport { new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsub1/subsubsub1/file11")), new DiffReportEntry(DiffType.MODIFY, - DFSUtil.string2Bytes("subsub1/subsubsub1/file13"))); + DFSUtil.string2Bytes("subsub1/subsubsub1/file13")), + new DiffReportEntry(DiffType.CREATE, + DFSUtil.string2Bytes("subsub1/subsubsub1/link13")), + new DiffReportEntry(DiffType.DELETE, + DFSUtil.string2Bytes("subsub1/subsubsub1/link13"))); // check diff report between s0 and the current status verifyDiffReport(sub1, "s0", "", new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")), From b1d6574a113fcee54aefb74d7c701123ebdb990b Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Mon, 25 Nov 2013 19:42:19 +0000 Subject: [PATCH 04/27] HADOOP-10126. LightWeightGSet log message is confusing. Contributed by Vinay. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1545376 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++ .../main/java/org/apache/hadoop/util/LightWeightGSet.java | 7 +++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index f6196af25e2..141834bd7a2 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -388,6 +388,8 @@ Release 2.3.0 - UNRELEASED HADOOP-10111. Allow DU to be initialized with an initial value (Kihwal Lee via jeagles) + HADOOP-10126. LightWeightGSet log message is confusing. (Vinay via suresh) + OPTIMIZATIONS HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightGSet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightGSet.java index 50e291d46c7..f1661d750d7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightGSet.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightGSet.java @@ -348,8 +348,11 @@ public class LightWeightGSet implements GSet { LOG.info("Computing capacity for map " + mapName); LOG.info("VM type = " + vmBit + "-bit"); - LOG.info(percentage + "% max memory = " - + StringUtils.TraditionalBinaryPrefix.long2String(maxMemory, "B", 1)); + LOG.info(percentage + "% max memory " + + StringUtils.TraditionalBinaryPrefix.long2String(maxMemory, "B", 1) + + " = " + + StringUtils.TraditionalBinaryPrefix.long2String((long) percentMemory, + "B", 1)); LOG.info("capacity = 2^" + exponent + " = " + c + " entries"); return c; } From 6c243fceac85701dd4d8fd4e7d2dc2442d6eebdc Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Mon, 25 Nov 2013 23:10:34 +0000 Subject: [PATCH 05/27] YARN-1416. Fixed a few invalid transitions in RMApp, RMAppAttempt and in some tests. Contributed by Jian He. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1545448 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 + .../resourcemanager/rmapp/RMAppImpl.java | 7 +- .../rmapp/attempt/RMAppAttemptImpl.java | 1 + .../rmapp/TestRMAppTransitions.java | 102 +++--------------- .../attempt/TestRMAppAttemptTransitions.java | 37 ++++++- 5 files changed, 56 insertions(+), 94 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 20c71fc4c16..a835a57ab92 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -176,6 +176,9 @@ Release 2.3.0 - UNRELEASED YARN-1320. Fixed Distributed Shell application to respect custom log4j properties file. (Xuan Gong via vinodkv) + YARN-1416. Fixed a few invalid transitions in RMApp, RMAppAttempt and in some + tests. (Jian He via vinodkv) + Release 2.2.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java index 16868369a73..76d59ec9608 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java @@ -130,7 +130,7 @@ public class RMAppImpl implements RMApp, Recoverable { .addTransition(RMAppState.NEW, RMAppState.NEW, RMAppEventType.NODE_UPDATE, new RMAppNodeUpdateTransition()) .addTransition(RMAppState.NEW, RMAppState.NEW_SAVING, - RMAppEventType.START, new RMAppSavingTransition()) + RMAppEventType.START, new RMAppNewlySavingTransition()) .addTransition(RMAppState.NEW, EnumSet.of(RMAppState.SUBMITTED, RMAppState.RUNNING, RMAppState.FINISHED, RMAppState.FAILED, RMAppState.KILLED, RMAppState.FINAL_SAVING), @@ -215,7 +215,8 @@ public class RMAppImpl implements RMApp, Recoverable { new AttemptFinishedAtFinalSavingTransition()) // ignorable transitions .addTransition(RMAppState.FINAL_SAVING, RMAppState.FINAL_SAVING, - EnumSet.of(RMAppEventType.NODE_UPDATE, RMAppEventType.KILL)) + EnumSet.of(RMAppEventType.NODE_UPDATE, RMAppEventType.KILL, + RMAppEventType.APP_NEW_SAVED)) // Transitions from FINISHING state .addTransition(RMAppState.FINISHING, RMAppState.FINISHED, @@ -760,7 +761,7 @@ public class RMAppImpl implements RMApp, Recoverable { return msg; } - private static final class RMAppSavingTransition extends RMAppTransition { + private static final class RMAppNewlySavingTransition extends RMAppTransition { @Override public void transition(RMAppImpl app, RMAppEvent event) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java index 5bfd178de21..ffa021f6b01 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java @@ -334,6 +334,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { // Saving in scheduler RMAppAttemptEventType.CONTAINER_ALLOCATED, RMAppAttemptEventType.CONTAINER_ACQUIRED, + RMAppAttemptEventType.ATTEMPT_NEW_SAVED, RMAppAttemptEventType.KILL)) // Transitions from FAILED State diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java index b5f49926024..e3e0e69acb1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java @@ -18,19 +18,14 @@ package org.apache.hadoop.yarn.server.resourcemanager.rmapp; -import static org.junit.Assert.assertEquals; -import static org.junit.Assume.assumeTrue; import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; import java.io.IOException; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; -import java.util.List; import junit.framework.Assert; @@ -41,13 +36,10 @@ import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.yarn.MockApps; -import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; -import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; -import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.DrainDispatcher; @@ -64,11 +56,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; -import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerAllocatedEvent; -import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptNewSavedEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptUpdateSavedEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType; @@ -76,7 +65,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretMan import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM; import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM; import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; -import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; @@ -290,7 +278,6 @@ public class TestRMAppTransitions { // test to make sure times are set when app finishes private void assertTimesAtFinish(RMApp application) { - sendAppUpdateSavedEvent(application); assertStartTimeSet(application); Assert.assertTrue("application finish time is not greater then 0", (application.getFinishTime() > 0)); @@ -303,7 +290,6 @@ public class TestRMAppTransitions { } private void assertKilled(RMApp application) { - sendAppUpdateSavedEvent(application); assertTimesAtFinish(application); assertAppState(RMAppState.KILLED, application); assertFinalAppStatus(FinalApplicationStatus.KILLED, application); @@ -314,6 +300,7 @@ public class TestRMAppTransitions { private void assertAppAndAttemptKilled(RMApp application) throws InterruptedException { + sendAppUpdateSavedEvent(application); assertKilled(application); // send attempt final state saved event. application.getCurrentAppAttempt().handle( @@ -325,7 +312,6 @@ public class TestRMAppTransitions { } private void assertFailed(RMApp application, String regex) { - sendAppUpdateSavedEvent(application); assertTimesAtFinish(application); assertAppState(RMAppState.FAILED, application); assertFinalAppStatus(FinalApplicationStatus.FAILED, application); @@ -475,6 +461,7 @@ public class TestRMAppTransitions { rmDispatcher.await(); RMAppAttempt appAttempt = application.getCurrentAppAttempt(); Assert.assertEquals(1, appAttempt.getAppAttemptId().getAttemptId()); + sendAppUpdateSavedEvent(application); assertFailed(application, ".*Unmanaged application.*Failing the application.*"); } @@ -504,6 +491,7 @@ public class TestRMAppTransitions { new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL); application.handle(event); rmDispatcher.await(); + sendAppUpdateSavedEvent(application); assertKilled(application); } @@ -518,6 +506,7 @@ public class TestRMAppTransitions { new RMAppRejectedEvent(application.getApplicationId(), rejectedText); application.handle(event); rmDispatcher.await(); + sendAppUpdateSavedEvent(application); assertFailed(application, rejectedText); } @@ -531,6 +520,7 @@ public class TestRMAppTransitions { new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL); application.handle(event); rmDispatcher.await(); + sendAppUpdateSavedEvent(application); assertKilled(application); } @@ -545,6 +535,7 @@ public class TestRMAppTransitions { new RMAppRejectedEvent(application.getApplicationId(), rejectedText); application.handle(event); rmDispatcher.await(); + sendAppUpdateSavedEvent(application); assertFailed(application, rejectedText); } @@ -559,6 +550,7 @@ public class TestRMAppTransitions { new RMAppRejectedEvent(application.getApplicationId(), rejectedText); application.handle(event); rmDispatcher.await(); + sendAppUpdateSavedEvent(application); assertFailed(application, rejectedText); } @@ -603,6 +595,7 @@ public class TestRMAppTransitions { RMAppEventType.ATTEMPT_FAILED, message); application.handle(event); rmDispatcher.await(); + sendAppUpdateSavedEvent(application); assertFailed(application, ".*" + message + ".*Failing the application.*"); } @@ -635,7 +628,7 @@ public class TestRMAppTransitions { new RMAppFinishedAttemptEvent(application.getApplicationId(), null); application.handle(finishEvent); assertAppState(RMAppState.FINAL_SAVING, application); - + sendAppUpdateSavedEvent(application); assertKilled(application); } @@ -681,6 +674,7 @@ public class TestRMAppTransitions { RMAppEventType.ATTEMPT_FAILED, ""); application.handle(event); rmDispatcher.await(); + sendAppUpdateSavedEvent(application); assertFailed(application, ".*Failing the application.*"); // FAILED => FAILED event RMAppEventType.KILL @@ -758,6 +752,7 @@ public class TestRMAppTransitions { new RMAppRejectedEvent(application.getApplicationId(), ""); application.handle(event); rmDispatcher.await(); + sendAppUpdateSavedEvent(application); assertTimesAtFinish(application); assertAppState(RMAppState.FAILED, application); @@ -769,10 +764,6 @@ public class TestRMAppTransitions { assertTimesAtFinish(application); assertAppState(RMAppState.FAILED, application); - // FAILED => FAILED event RMAppEventType.APP_SAVED - event = new RMAppNewSavedEvent(application.getApplicationId(), null); - application.handle(event); - rmDispatcher.await(); assertTimesAtFinish(application); assertAppState(RMAppState.FAILED, application); } @@ -788,6 +779,7 @@ public class TestRMAppTransitions { new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL); application.handle(event); rmDispatcher.await(); + sendAppUpdateSavedEvent(application); assertTimesAtFinish(application); assertAppState(RMAppState.KILLED, application); @@ -824,10 +816,6 @@ public class TestRMAppTransitions { assertTimesAtFinish(application); assertAppState(RMAppState.KILLED, application); - // KILLED => KILLED event RMAppEventType.APP_SAVED - event = new RMAppNewSavedEvent(application.getApplicationId(), null); - application.handle(event); - rmDispatcher.await(); assertTimesAtFinish(application); assertAppState(RMAppState.KILLED, application); } @@ -841,70 +829,4 @@ public class TestRMAppTransitions { report = app.createAndGetApplicationReport("clientuser", true); Assert.assertNotNull(report.getApplicationResourceUsageReport()); } - - @Test - public void testClientTokens() throws Exception { - assumeTrue(isSecurityEnabled); - - RMApp app = createNewTestApp(null); - assertAppState(RMAppState.NEW, app); - ApplicationReport report = app.createAndGetApplicationReport(null, true); - Assert.assertNull(report.getClientToAMToken()); - report = app.createAndGetApplicationReport("clientuser", true); - Assert.assertNull(report.getClientToAMToken()); - - app = testCreateAppRunning(null); - rmDispatcher.await(); - assertAppState(RMAppState.RUNNING, app); - - report = app.createAndGetApplicationReport("clientuser", true); - Assert.assertNull(report.getClientToAMToken()); - - // this method is to make AMLaunchedTransition invoked inside which - // ClientTokenMasterKey is registered in ClientTokenSecretManager - moveCurrentAttemptToLaunchedState(app.getCurrentAppAttempt()); - - report = app.createAndGetApplicationReport(null, true); - Assert.assertNull(report.getClientToAMToken()); - report = app.createAndGetApplicationReport("clientuser", true); - Assert.assertNotNull(report.getClientToAMToken()); - - // kill the app attempt and verify client token is unavailable - app.handle(new RMAppEvent(app.getApplicationId(), RMAppEventType.KILL)); - rmDispatcher.await(); - assertAppAndAttemptKilled(app); - report = app.createAndGetApplicationReport(null, true); - Assert.assertNull(report.getClientToAMToken()); - report = app.createAndGetApplicationReport("clientuser", true); - Assert.assertNull(report.getClientToAMToken()); - } - - @SuppressWarnings("unchecked") - private void moveCurrentAttemptToLaunchedState(RMAppAttempt attempt) { - attempt.handle(new RMAppAttemptEvent(attempt.getAppAttemptId(), - RMAppAttemptEventType.APP_ACCEPTED)); - // Mock the allocation of AM container - Container container = mock(Container.class); - Resource resource = BuilderUtils.newResource(2048, 1); - when(container.getId()).thenReturn( - BuilderUtils.newContainerId(attempt.getAppAttemptId(), 1)); - when(container.getResource()).thenReturn(resource); - Allocation allocation = mock(Allocation.class); - when(allocation.getContainers()).thenReturn( - Collections.singletonList(container)); - when(allocation.getContainers()). - thenReturn(Collections.singletonList(container)); - when( - scheduler.allocate(any(ApplicationAttemptId.class), any(List.class), - any(List.class), any(List.class), any(List.class))).thenReturn( - allocation); - attempt.handle(new RMAppAttemptContainerAllocatedEvent(attempt - .getAppAttemptId(), container)); - attempt - .handle(new RMAppAttemptNewSavedEvent(attempt.getAppAttemptId(), null)); - attempt.handle(new RMAppAttemptEvent(attempt.getAppAttemptId(), - RMAppAttemptEventType.LAUNCHED)); - - assertEquals(RMAppAttemptState.LAUNCHED, attempt.getAppAttemptState()); - } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java index d391d950036..140b53e62ff 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java @@ -23,6 +23,7 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeTrue; import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; @@ -44,6 +45,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; +import org.apache.hadoop.security.token.Token; import org.apache.hadoop.yarn.MockApps; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -57,6 +59,7 @@ import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.event.InlineDispatcher; +import org.apache.hadoop.yarn.security.client.ClientToAMTokenIdentifier; import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; @@ -74,9 +77,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAt import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerAllocatedEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerFinishedEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptLaunchFailedEvent; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptNewSavedEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptRegistrationEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptRejectedEvent; -import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptNewSavedEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptUnregistrationEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptUpdateSavedEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer; @@ -1068,6 +1071,38 @@ public class TestRMAppAttemptTransitions { diagnostics, 0, false); } + // this is to test user can get client tokens only after the client token + // master key is saved in the state store and also registered in + // ClientTokenSecretManager + @Test + public void testGetClientToken() throws Exception { + assumeTrue(isSecurityEnabled); + Container amContainer = allocateApplicationAttempt(); + + // before attempt is launched, can not get ClientToken + Token token = + applicationAttempt.createClientToken(null); + Assert.assertNull(token); + token = applicationAttempt.createClientToken("clientuser"); + Assert.assertNull(token); + + launchApplicationAttempt(amContainer); + // after attempt is launched , can get ClientToken + token = applicationAttempt.createClientToken(null); + Assert.assertNull(token); + token = applicationAttempt.createClientToken("clientuser"); + Assert.assertNotNull(token); + + applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt + .getAppAttemptId(), RMAppAttemptEventType.KILL)); + sendAttemptUpdateSavedEvent(applicationAttempt); + // after attempt is killed, can not get Client Token + token = applicationAttempt.createClientToken(null); + Assert.assertNull(token); + token = applicationAttempt.createClientToken("clientuser"); + Assert.assertNull(token); + } + private void verifyTokenCount(ApplicationAttemptId appAttemptId, int count) { verify(amRMTokenManager, times(count)).applicationMasterFinished(appAttemptId); if (UserGroupInformation.isSecurityEnabled()) { From c4bdddeab56287c8a8ae314fac238cbbc6c1bcf4 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Tue, 26 Nov 2013 01:10:21 +0000 Subject: [PATCH 06/27] YARN-1314. Fixed DistributedShell to not fail with multiple arguments for a shell command separated by spaces. Contributed by Xuan Gong. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1545486 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 + .../distributedshell/ApplicationMaster.java | 40 ++++--- .../applications/distributedshell/Client.java | 106 ++++++++---------- .../TestDistributedShell.java | 34 ++++++ 4 files changed, 107 insertions(+), 76 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index a835a57ab92..20cf15bda48 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -120,6 +120,9 @@ Release 2.3.0 - UNRELEASED YARN-1423. Support queue placement by secondary group in the Fair Scheduler (Ted Malaska via Sandy Ryza) + YARN-1314. Fixed DistributedShell to not fail with multiple arguments for a + shell command separated by spaces. (Xuan Gong via vinodkv) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java index 1928003119c..9c49bdcf678 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java @@ -224,6 +224,7 @@ public class ApplicationMaster { private final String log4jPath = "log4j.properties"; private final String shellCommandPath = "shellCommands"; + private final String shellArgsPath = "shellArgs"; private volatile boolean done; private volatile boolean success; @@ -309,7 +310,6 @@ public class ApplicationMaster { "App Attempt ID. Not to be used unless for testing purposes"); opts.addOption("shell_script", true, "Location of the shell script to be executed"); - opts.addOption("shell_args", true, "Command line args for the shell script"); opts.addOption("shell_env", true, "Environment for shell script. Specified as env_key=env_val pairs"); opts.addOption("container_memory", true, @@ -331,10 +331,10 @@ public class ApplicationMaster { } //Check whether customer log4j.properties file exists - File customerLog4jFile = new File(log4jPath); - if (customerLog4jFile.exists()) { + if (fileExist(log4jPath)) { try { - Log4jPropertyHelper.updateLog4jConfiguration(ApplicationMaster.class, log4jPath); + Log4jPropertyHelper.updateLog4jConfiguration(ApplicationMaster.class, + log4jPath); } catch (Exception e) { LOG.warn("Can not set up custom log4j properties. " + e); } @@ -387,24 +387,16 @@ public class ApplicationMaster { + appAttemptID.getApplicationId().getClusterTimestamp() + ", attemptId=" + appAttemptID.getAttemptId()); - File shellCommandFile = new File(shellCommandPath); - if (!shellCommandFile.exists()) { + if (!fileExist(shellCommandPath)) { throw new IllegalArgumentException( "No shell command specified to be executed by application master"); } - FileInputStream fs = null; - DataInputStream ds = null; - try { - ds = new DataInputStream(new FileInputStream(shellCommandFile)); - shellCommand = ds.readUTF(); - } finally { - org.apache.commons.io.IOUtils.closeQuietly(ds); - org.apache.commons.io.IOUtils.closeQuietly(fs); + shellCommand = readContent(shellCommandPath); + + if (fileExist(shellArgsPath)) { + shellArgs = readContent(shellArgsPath); } - if (cliParser.hasOption("shell_args")) { - shellArgs = cliParser.getOptionValue("shell_args"); - } if (cliParser.hasOption("shell_env")) { String shellEnvs[] = cliParser.getOptionValues("shell_env"); for (String env : shellEnvs) { @@ -922,4 +914,18 @@ public class ApplicationMaster { LOG.info("Requested container ask: " + request.toString()); return request; } + + private boolean fileExist(String filePath) { + return new File(filePath).exists(); + } + + private String readContent(String filePath) throws IOException { + DataInputStream ds = null; + try { + ds = new DataInputStream(new FileInputStream(filePath)); + return ds.readUTF(); + } finally { + org.apache.commons.io.IOUtils.closeQuietly(ds); + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java index 067db0c9da1..46d4d44377b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java @@ -30,9 +30,11 @@ import java.util.Vector; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.apache.commons.io.IOUtils; +import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -137,7 +139,7 @@ public class Client { // Location of shell script private String shellScriptPath = ""; // Args to be passed to the shell command - private String shellArgs = ""; + private String[] shellArgs = new String[] {}; // Env variables to be setup for the shell command private Map shellEnv = new HashMap(); // Shell Command Container priority @@ -166,6 +168,8 @@ public class Client { private Options opts; private final String shellCommandPath = "shellCommands"; + private final String shellArgsPath = "shellArgs"; + private final String appMasterJarPath = "AppMaster.jar"; // Hardcoded path to custom log_properties private final String log4jPath = "log4j.properties"; @@ -223,7 +227,9 @@ public class Client { opts.addOption("jar", true, "Jar file containing the application master"); opts.addOption("shell_command", true, "Shell command to be executed by the Application Master"); opts.addOption("shell_script", true, "Location of the shell script to be executed"); - opts.addOption("shell_args", true, "Command line args for the shell script"); + opts.addOption("shell_args", true, "Command line args for the shell script." + + "Multiple args can be separated by empty space."); + opts.getOption("shell_args").setArgs(Option.UNLIMITED_VALUES); opts.addOption("shell_env", true, "Environment for shell script. Specified as env_key=env_val pairs"); opts.addOption("shell_cmd_priority", true, "Priority for the shell command containers"); opts.addOption("container_memory", true, "Amount of memory in MB to be requested to run the shell command"); @@ -311,7 +317,7 @@ public class Client { shellScriptPath = cliParser.getOptionValue("shell_script"); } if (cliParser.hasOption("shell_args")) { - shellArgs = cliParser.getOptionValue("shell_args"); + shellArgs = cliParser.getOptionValues("shell_args"); } if (cliParser.hasOption("shell_env")) { String envs[] = cliParser.getOptionValues("shell_env"); @@ -440,43 +446,13 @@ public class Client { // Copy the application master jar to the filesystem // Create a local resource to point to the destination jar path FileSystem fs = FileSystem.get(conf); - Path src = new Path(appMasterJar); - String pathSuffix = appName + "/" + appId.getId() + "/AppMaster.jar"; - Path dst = new Path(fs.getHomeDirectory(), pathSuffix); - fs.copyFromLocalFile(false, true, src, dst); - FileStatus destStatus = fs.getFileStatus(dst); - LocalResource amJarRsrc = Records.newRecord(LocalResource.class); - - // Set the type of resource - file or archive - // archives are untarred at destination - // we don't need the jar file to be untarred for now - amJarRsrc.setType(LocalResourceType.FILE); - // Set visibility of the resource - // Setting to most private option - amJarRsrc.setVisibility(LocalResourceVisibility.APPLICATION); - // Set the resource to be copied over - amJarRsrc.setResource(ConverterUtils.getYarnUrlFromPath(dst)); - // Set timestamp and length of file so that the framework - // can do basic sanity checks for the local resource - // after it has been copied over to ensure it is the same - // resource the client intended to use with the application - amJarRsrc.setTimestamp(destStatus.getModificationTime()); - amJarRsrc.setSize(destStatus.getLen()); - localResources.put("AppMaster.jar", amJarRsrc); + addToLocalResources(fs, appMasterJar, appMasterJarPath, appId.getId(), + localResources, null); // Set the log4j properties if needed if (!log4jPropFile.isEmpty()) { - Path log4jSrc = new Path(log4jPropFile); - String log4jPathSuffix = appName + "/" + appId.getId() + "/" + log4jPath; - Path log4jDst = new Path(fs.getHomeDirectory(), log4jPathSuffix); - fs.copyFromLocalFile(false, true, log4jSrc, log4jDst); - FileStatus log4jFileStatus = fs.getFileStatus(log4jDst); - LocalResource log4jRsrc = - LocalResource.newInstance( - ConverterUtils.getYarnUrlFromURI(log4jDst.toUri()), - LocalResourceType.FILE, LocalResourceVisibility.APPLICATION, - log4jFileStatus.getLen(), log4jFileStatus.getModificationTime()); - localResources.put(log4jPath, log4jRsrc); + addToLocalResources(fs, log4jPropFile, log4jPath, appId.getId(), + localResources, null); } // The shell script has to be made available on the final container(s) @@ -500,25 +476,13 @@ public class Client { } if (!shellCommand.isEmpty()) { - String shellCommandSuffix = - appName + "/" + appId.getId() + "/" + shellCommandPath; - Path shellCommandDst = - new Path(fs.getHomeDirectory(), shellCommandSuffix); - FSDataOutputStream ostream = null; - try { - ostream = FileSystem - .create(fs, shellCommandDst, new FsPermission((short) 0710)); - ostream.writeUTF(shellCommand); - } finally { - IOUtils.closeQuietly(ostream); - } - FileStatus scFileStatus = fs.getFileStatus(shellCommandDst); - LocalResource scRsrc = - LocalResource.newInstance( - ConverterUtils.getYarnUrlFromURI(shellCommandDst.toUri()), - LocalResourceType.FILE, LocalResourceVisibility.APPLICATION, - scFileStatus.getLen(), scFileStatus.getModificationTime()); - localResources.put(shellCommandPath, scRsrc); + addToLocalResources(fs, null, shellCommandPath, appId.getId(), + localResources, shellCommand); + } + + if (shellArgs.length > 0) { + addToLocalResources(fs, null, shellArgsPath, appId.getId(), + localResources, StringUtils.join(shellArgs, " ")); } // Set local resource info into app master container launch context amContainer.setLocalResources(localResources); @@ -579,9 +543,6 @@ public class Client { vargs.add("--num_containers " + String.valueOf(numContainers)); vargs.add("--priority " + String.valueOf(shellCmdPriority)); - if (!shellArgs.isEmpty()) { - vargs.add("--shell_args " + shellArgs + ""); - } for (Map.Entry entry : shellEnv.entrySet()) { vargs.add("--shell_env " + entry.getKey() + "=" + entry.getValue()); } @@ -750,4 +711,31 @@ public class Client { yarnClient.killApplication(appId); } + private void addToLocalResources(FileSystem fs, String fileSrcPath, + String fileDstPath, int appId, Map localResources, + String resources) throws IOException { + String suffix = + appName + "/" + appId + "/" + fileDstPath; + Path dst = + new Path(fs.getHomeDirectory(), suffix); + if (fileSrcPath == null) { + FSDataOutputStream ostream = null; + try { + ostream = FileSystem + .create(fs, dst, new FsPermission((short) 0710)); + ostream.writeUTF(resources); + } finally { + IOUtils.closeQuietly(ostream); + } + } else { + fs.copyFromLocalFile(new Path(fileSrcPath), dst); + } + FileStatus scFileStatus = fs.getFileStatus(dst); + LocalResource scRsrc = + LocalResource.newInstance( + ConverterUtils.getYarnUrlFromURI(dst.toUri()), + LocalResourceType.FILE, LocalResourceVisibility.APPLICATION, + scFileStatus.getLen(), scFileStatus.getModificationTime()); + localResources.put(fileDstPath, scRsrc); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java index 9df580d3f90..a11c805ca8b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java @@ -269,6 +269,40 @@ public class TestDistributedShell { verifyContainerLog(2, expectedContent, false, ""); } + @Test(timeout=90000) + public void testDSShellWithMultipleArgs() throws Exception { + String[] args = { + "--jar", + APPMASTER_JAR, + "--num_containers", + "4", + "--shell_command", + "echo", + "--shell_args", + "HADOOP YARN MAPREDUCE HDFS", + "--master_memory", + "512", + "--master_vcores", + "2", + "--container_memory", + "128", + "--container_vcores", + "1" + }; + + LOG.info("Initializing DS Client"); + final Client client = + new Client(new Configuration(yarnCluster.getConfig())); + boolean initSuccess = client.init(args); + Assert.assertTrue(initSuccess); + LOG.info("Running DS Client"); + boolean result = client.run(); + LOG.info("Client run completed. Result=" + result); + List expectedContent = new ArrayList(); + expectedContent.add("HADOOP YARN MAPREDUCE HDFS"); + verifyContainerLog(4, expectedContent, false, ""); + } + @Test(timeout=90000) public void testDSShellWithInvalidArgs() throws Exception { Client client = new Client(new Configuration(yarnCluster.getConfig())); From d8a23834614581a292aad214dddcbcc4bbe86d27 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Tue, 26 Nov 2013 01:16:51 +0000 Subject: [PATCH 07/27] HDFS-5538. URLConnectionFactory should pick up the SSL related configuration by default. Contributed by Haohui Mai. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1545491 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/hadoop/security/SecurityUtil.java | 75 ++----------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../qjournal/client/QuorumJournalManager.java | 8 +- .../namenode/EditLogFileInputStream.java | 39 +++++++--- .../hdfs/server/namenode/TransferFsImage.java | 22 +++++- .../org/apache/hadoop/hdfs/tools/DFSck.java | 22 +++++- .../hdfs/tools/DelegationTokenFetcher.java | 2 +- .../hadoop/hdfs/web/HftpFileSystem.java | 9 ++- .../hadoop/hdfs/web/HsftpFileSystem.java | 13 +--- .../hadoop/hdfs/web/SWebHdfsFileSystem.java | 18 ----- .../hadoop/hdfs/web/URLConnectionFactory.java | 58 ++++++++------ .../hadoop/hdfs/web/WebHdfsFileSystem.java | 15 +--- .../namenode/TestEditLogFileInputStream.java | 73 +++++++----------- .../hdfs/web/TestURLConnectionFactory.java | 5 +- .../hadoop/hdfs/web/TestWebHdfsTimeouts.java | 14 +++- .../TestDelegationTokenRemoteFetcher.java | 2 +- 16 files changed, 169 insertions(+), 209 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java index 416a442f127..623c3ebbdfb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java @@ -22,19 +22,14 @@ import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.URI; -import java.net.URL; -import java.net.URLConnection; import java.net.UnknownHostException; -import java.security.AccessController; import java.security.PrivilegedAction; import java.security.PrivilegedExceptionAction; import java.util.Arrays; import java.util.List; import java.util.Locale; import java.util.ServiceLoader; -import java.util.Set; -import javax.security.auth.Subject; import javax.security.auth.kerberos.KerberosPrincipal; import javax.security.auth.kerberos.KerberosTicket; @@ -44,22 +39,19 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; -import org.apache.hadoop.security.authentication.client.AuthenticatedURL; -import org.apache.hadoop.security.authentication.client.AuthenticationException; -import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenInfo; -import com.google.common.annotations.VisibleForTesting; //this will need to be replaced someday when there is a suitable replacement import sun.net.dns.ResolverConfiguration; import sun.net.util.IPAddressUtil; +import com.google.common.annotations.VisibleForTesting; + @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) @InterfaceStability.Evolving public class SecurityUtil { @@ -73,24 +65,14 @@ public class SecurityUtil { @VisibleForTesting static HostResolver hostResolver; - private static SSLFactory sslFactory; - static { Configuration conf = new Configuration(); boolean useIp = conf.getBoolean( - CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP, - CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP_DEFAULT); + CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP, + CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP_DEFAULT); setTokenServiceUseIp(useIp); - if (HttpConfig.isSecure()) { - sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf); - try { - sslFactory.init(); - } catch (Exception ex) { - throw new RuntimeException(ex); - } - } } - + /** * For use only by tests and initialization */ @@ -102,29 +84,6 @@ public class SecurityUtil { : new StandardHostResolver(); } - /** - * Find the original TGT within the current subject's credentials. Cross-realm - * TGT's of the form "krbtgt/TWO.COM@ONE.COM" may be present. - * - * @return The TGT from the current subject - * @throws IOException - * if TGT can't be found - */ - private static KerberosTicket getTgtFromSubject() throws IOException { - Subject current = Subject.getSubject(AccessController.getContext()); - if (current == null) { - throw new IOException( - "Can't get TGT from current Subject, because it is null"); - } - Set tickets = current - .getPrivateCredentials(KerberosTicket.class); - for (KerberosTicket t : tickets) { - if (isOriginalTGT(t)) - return t; - } - throw new IOException("Failed to find TGT from current Subject:"+current); - } - /** * TGS must have the server principal of the form "krbtgt/FOO@FOO". * @param principal @@ -492,30 +451,6 @@ public class SecurityUtil { } } - /** - * Open a (if need be) secure connection to a URL in a secure environment - * that is using SPNEGO to authenticate its URLs. All Namenode and Secondary - * Namenode URLs that are protected via SPNEGO should be accessed via this - * method. - * - * @param url to authenticate via SPNEGO. - * @return A connection that has been authenticated via SPNEGO - * @throws IOException If unable to authenticate via SPNEGO - */ - public static URLConnection openSecureHttpConnection(URL url) throws IOException { - if (!HttpConfig.isSecure() && !UserGroupInformation.isSecurityEnabled()) { - return url.openConnection(); - } - - AuthenticatedURL.Token token = new AuthenticatedURL.Token(); - try { - return new AuthenticatedURL(null, sslFactory).openConnection(url, token); - } catch (AuthenticationException e) { - throw new IOException("Exception trying to open authenticated connection to " - + url, e); - } - } - /** * Resolves a host subject to the security requirements determined by * hadoop.security.token.service.use_ip. diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 335c67ae597..ac53b602ec3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -212,6 +212,9 @@ Trunk (Unreleased) and INodeFileUnderConstructionWithSnapshot with FileUnderContructionFeature. (jing9 via szetszwo) + HDFS-5538. URLConnectionFactory should pick up the SSL related configuration + by default. (Haohui Mai via jing9) + OPTIMIZATIONS HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java index 4f1b96b6f42..e0fcb53ba0f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.server.namenode.JournalSet; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; +import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.util.StringUtils; @@ -87,6 +88,7 @@ public class QuorumJournalManager implements JournalManager { private final AsyncLoggerSet loggers; private int outputBufferCapacity = 512 * 1024; + private final URLConnectionFactory connectionFactory; public QuorumJournalManager(Configuration conf, URI uri, NamespaceInfo nsInfo) throws IOException { @@ -102,6 +104,8 @@ public class QuorumJournalManager implements JournalManager { this.uri = uri; this.nsInfo = nsInfo; this.loggers = new AsyncLoggerSet(createLoggers(loggerFactory)); + this.connectionFactory = URLConnectionFactory + .newDefaultURLConnectionFactory(conf); // Configure timeouts. this.startSegmentTimeoutMs = conf.getInt( @@ -475,8 +479,8 @@ public class QuorumJournalManager implements JournalManager { URL url = logger.buildURLToFetchLogs(remoteLog.getStartTxId()); EditLogInputStream elis = EditLogFileInputStream.fromUrl( - url, remoteLog.getStartTxId(), remoteLog.getEndTxId(), - remoteLog.isInProgress()); + connectionFactory, url, remoteLog.getStartTxId(), + remoteLog.getEndTxId(), remoteLog.isInProgress()); allStreams.add(elis); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java index 1ee581cde9f..ab1634b0c96 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java @@ -36,8 +36,11 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.namenode.TransferFsImage.HttpGetFailedException; +import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.client.AuthenticationException; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; @@ -100,15 +103,22 @@ public class EditLogFileInputStream extends EditLogInputStream { /** * Open an EditLogInputStream for the given URL. * - * @param url the url hosting the log - * @param startTxId the expected starting txid - * @param endTxId the expected ending txid - * @param inProgress whether the log is in-progress + * @param connectionFactory + * the URLConnectionFactory used to create the connection. + * @param url + * the url hosting the log + * @param startTxId + * the expected starting txid + * @param endTxId + * the expected ending txid + * @param inProgress + * whether the log is in-progress * @return a stream from which edits may be read */ - public static EditLogInputStream fromUrl(URL url, long startTxId, - long endTxId, boolean inProgress) { - return new EditLogFileInputStream(new URLLog(url), + public static EditLogInputStream fromUrl( + URLConnectionFactory connectionFactory, URL url, long startTxId, + long endTxId, boolean inProgress) { + return new EditLogFileInputStream(new URLLog(connectionFactory, url), startTxId, endTxId, inProgress); } @@ -365,8 +375,12 @@ public class EditLogFileInputStream extends EditLogInputStream { private long advertisedSize = -1; private final static String CONTENT_LENGTH = "Content-Length"; + private final URLConnectionFactory connectionFactory; + private final boolean isSpnegoEnabled; - public URLLog(URL url) { + public URLLog(URLConnectionFactory connectionFactory, URL url) { + this.connectionFactory = connectionFactory; + this.isSpnegoEnabled = UserGroupInformation.isSecurityEnabled(); this.url = url; } @@ -376,8 +390,13 @@ public class EditLogFileInputStream extends EditLogInputStream { new PrivilegedExceptionAction() { @Override public InputStream run() throws IOException { - HttpURLConnection connection = (HttpURLConnection) - SecurityUtil.openSecureHttpConnection(url); + HttpURLConnection connection; + try { + connection = (HttpURLConnection) + connectionFactory.openConnection(url, isSpnegoEnabled); + } catch (AuthenticationException e) { + throw new IOException(e); + } if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) { throw new HttpGetFailedException( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java index 8788d7570fc..59ef97dd027 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java @@ -35,7 +35,8 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.http.HttpConfig; -import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.util.Time; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -46,6 +47,7 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.util.DataTransferThrottler; +import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.apache.hadoop.io.MD5Hash; import com.google.common.annotations.VisibleForTesting; @@ -62,6 +64,15 @@ public class TransferFsImage { public final static String MD5_HEADER = "X-MD5-Digest"; @VisibleForTesting static int timeout = 0; + private static URLConnectionFactory connectionFactory; + private static boolean isSpnegoEnabled; + + static { + Configuration conf = new Configuration(); + connectionFactory = URLConnectionFactory + .newDefaultURLConnectionFactory(conf); + isSpnegoEnabled = UserGroupInformation.isSecurityEnabled(); + } private static final Log LOG = LogFactory.getLog(TransferFsImage.class); @@ -250,8 +261,13 @@ public class TransferFsImage { public static MD5Hash doGetUrl(URL url, List localPaths, Storage dstStorage, boolean getChecksum) throws IOException { long startTime = Time.monotonicNow(); - HttpURLConnection connection = (HttpURLConnection) - SecurityUtil.openSecureHttpConnection(url); + HttpURLConnection connection; + try { + connection = (HttpURLConnection) + connectionFactory.openConnection(url, isSpnegoEnabled); + } catch (AuthenticationException e) { + throw new IOException(e); + } if (timeout <= 0) { Configuration conf = new HdfsConfiguration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java index c3238f0de30..3b846c3a397 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java @@ -36,9 +36,10 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck; +import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.apache.hadoop.http.HttpConfig; -import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; @@ -94,6 +95,8 @@ public class DFSck extends Configured implements Tool { private final UserGroupInformation ugi; private final PrintStream out; + private final URLConnectionFactory connectionFactory; + private final boolean isSpnegoEnabled; /** * Filesystem checker. @@ -107,6 +110,9 @@ public class DFSck extends Configured implements Tool { super(conf); this.ugi = UserGroupInformation.getCurrentUser(); this.out = out; + this.connectionFactory = URLConnectionFactory + .newDefaultURLConnectionFactory(conf); + this.isSpnegoEnabled = UserGroupInformation.isSecurityEnabled(); } /** @@ -158,7 +164,12 @@ public class DFSck extends Configured implements Tool { url.append("&startblockafter=").append(String.valueOf(cookie)); } URL path = new URL(url.toString()); - URLConnection connection = SecurityUtil.openSecureHttpConnection(path); + URLConnection connection; + try { + connection = connectionFactory.openConnection(path, isSpnegoEnabled); + } catch (AuthenticationException e) { + throw new IOException(e); + } InputStream stream = connection.getInputStream(); BufferedReader input = new BufferedReader(new InputStreamReader( stream, "UTF-8")); @@ -278,7 +289,12 @@ public class DFSck extends Configured implements Tool { return listCorruptFileBlocks(dir, url.toString()); } URL path = new URL(url.toString()); - URLConnection connection = SecurityUtil.openSecureHttpConnection(path); + URLConnection connection; + try { + connection = connectionFactory.openConnection(path, isSpnegoEnabled); + } catch (AuthenticationException e) { + throw new IOException(e); + } InputStream stream = connection.getInputStream(); BufferedReader input = new BufferedReader(new InputStreamReader( stream, "UTF-8")); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java index d1e50cfebc1..bc76cc07879 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java @@ -145,7 +145,7 @@ public class DelegationTokenFetcher { // default to using the local file system FileSystem local = FileSystem.getLocal(conf); final Path tokenFile = new Path(local.getWorkingDirectory(), remaining[0]); - final URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY; + final URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_SYSTEM_CONNECTION_FACTORY; // Login the current user UserGroupInformation.getCurrentUser().doAs( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HftpFileSystem.java index ff882119e1a..076b4b51d86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HftpFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HftpFileSystem.java @@ -176,10 +176,9 @@ public class HftpFileSystem extends FileSystem * Initialize connectionFactory and tokenAspect. This function is intended to * be overridden by HsFtpFileSystem. */ - protected void initConnectionFactoryAndTokenAspect(Configuration conf) + protected void initTokenAspect(Configuration conf) throws IOException { tokenAspect = new TokenAspect(this, TOKEN_KIND); - connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY; } @Override @@ -187,6 +186,8 @@ public class HftpFileSystem extends FileSystem throws IOException { super.initialize(name, conf); setConf(conf); + this.connectionFactory = URLConnectionFactory + .newDefaultURLConnectionFactory(conf); this.ugi = UserGroupInformation.getCurrentUser(); this.nnUri = getNamenodeUri(name); @@ -197,7 +198,7 @@ public class HftpFileSystem extends FileSystem throw new IllegalArgumentException(e); } - initConnectionFactoryAndTokenAspect(conf); + initTokenAspect(conf); if (UserGroupInformation.isSecurityEnabled()) { tokenAspect.initDelegationToken(ugi); } @@ -338,7 +339,7 @@ public class HftpFileSystem extends FileSystem } static class RangeHeaderUrlOpener extends ByteRangeInputStream.URLOpener { - URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY; + URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_SYSTEM_CONNECTION_FACTORY; RangeHeaderUrlOpener(final URL url) { super(url); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HsftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HsftpFileSystem.java index e64becd1813..3393fc5c180 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HsftpFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HsftpFileSystem.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.web; import java.io.IOException; -import java.security.GeneralSecurityException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -61,18 +60,8 @@ public class HsftpFileSystem extends HftpFileSystem { } @Override - protected void initConnectionFactoryAndTokenAspect(Configuration conf) throws IOException { + protected void initTokenAspect(Configuration conf) throws IOException { tokenAspect = new TokenAspect(this, TOKEN_KIND); - - connectionFactory = new URLConnectionFactory( - URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT); - try { - connectionFactory.setConnConfigurator(URLConnectionFactory - .newSslConnConfigurator(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT, - conf)); - } catch (GeneralSecurityException e) { - throw new IOException(e); - } } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java index bce7b7c6cdb..ef603061d3a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java @@ -17,10 +17,6 @@ */ package org.apache.hadoop.hdfs.web; -import java.io.IOException; -import java.security.GeneralSecurityException; - -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.io.Text; @@ -44,20 +40,6 @@ public class SWebHdfsFileSystem extends WebHdfsFileSystem { tokenAspect = new TokenAspect(this, TOKEN_KIND); } - @Override - protected void initializeConnectionFactory(Configuration conf) - throws IOException { - connectionFactory = new URLConnectionFactory( - URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT); - try { - connectionFactory.setConnConfigurator(URLConnectionFactory - .newSslConnConfigurator(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT, - conf)); - } catch (GeneralSecurityException e) { - throw new IOException(e); - } - } - @Override protected int getDefaultPort() { return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java index 9418c1aef4f..00e9e982bca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java @@ -39,6 +39,8 @@ import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.authentication.client.ConnectionConfigurator; import org.apache.hadoop.security.ssl.SSLFactory; +import com.google.common.annotations.VisibleForTesting; + /** * Utilities for handling URLs */ @@ -54,26 +56,50 @@ public class URLConnectionFactory { * Timeout for socket connects and reads */ public final static int DEFAULT_SOCKET_TIMEOUT = 1 * 60 * 1000; // 1 minute + private final ConnectionConfigurator connConfigurator; - public static final URLConnectionFactory DEFAULT_CONNECTION_FACTORY = new URLConnectionFactory( - DEFAULT_SOCKET_TIMEOUT); - - private int socketTimeout; - - /** Configure connections for AuthenticatedURL */ - private ConnectionConfigurator connConfigurator = new ConnectionConfigurator() { + private static final ConnectionConfigurator DEFAULT_TIMEOUT_CONN_CONFIGURATOR = new ConnectionConfigurator() { @Override public HttpURLConnection configure(HttpURLConnection conn) throws IOException { - URLConnectionFactory.setTimeouts(conn, socketTimeout); + URLConnectionFactory.setTimeouts(conn, DEFAULT_SOCKET_TIMEOUT); return conn; } }; + /** + * The URLConnectionFactory that sets the default timeout and it only trusts + * Java's SSL certificates. + */ + public static final URLConnectionFactory DEFAULT_SYSTEM_CONNECTION_FACTORY = new URLConnectionFactory( + DEFAULT_TIMEOUT_CONN_CONFIGURATOR); + + /** + * Construct a new URLConnectionFactory based on the configuration. It will + * try to load SSL certificates when it is specified. + */ + public static URLConnectionFactory newDefaultURLConnectionFactory(Configuration conf) { + ConnectionConfigurator conn = null; + try { + conn = newSslConnConfigurator(DEFAULT_SOCKET_TIMEOUT, conf); + } catch (Exception e) { + LOG.debug( + "Cannot load customized ssl related configuration. Fallback to system-generic settings.", + e); + conn = DEFAULT_TIMEOUT_CONN_CONFIGURATOR; + } + return new URLConnectionFactory(conn); + } + + @VisibleForTesting + URLConnectionFactory(ConnectionConfigurator connConfigurator) { + this.connConfigurator = connConfigurator; + } + /** * Create a new ConnectionConfigurator for SSL connections */ - static ConnectionConfigurator newSslConnConfigurator(final int timeout, + private static ConnectionConfigurator newSslConnConfigurator(final int timeout, Configuration conf) throws IOException, GeneralSecurityException { final SSLFactory factory; final SSLSocketFactory sf; @@ -99,10 +125,6 @@ public class URLConnectionFactory { }; } - public URLConnectionFactory(int socketTimeout) { - this.socketTimeout = socketTimeout; - } - /** * Opens a url with read and connect timeouts * @@ -153,14 +175,6 @@ public class URLConnectionFactory { } } - public ConnectionConfigurator getConnConfigurator() { - return connConfigurator; - } - - public void setConnConfigurator(ConnectionConfigurator connConfigurator) { - this.connConfigurator = connConfigurator; - } - /** * Sets timeout parameters on the given URLConnection. * @@ -169,7 +183,7 @@ public class URLConnectionFactory { * @param socketTimeout * the connection and read timeout of the connection. */ - static void setTimeouts(URLConnection connection, int socketTimeout) { + private static void setTimeouts(URLConnection connection, int socketTimeout) { connection.setConnectTimeout(socketTimeout); connection.setReadTimeout(socketTimeout); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index b5c21699634..ce4531c55ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -112,7 +112,7 @@ public class WebHdfsFileSystem extends FileSystem public static final String PATH_PREFIX = "/" + SCHEME + "/v" + VERSION; /** Default connection factory may be overridden in tests to use smaller timeout values */ - URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY; + protected URLConnectionFactory connectionFactory; /** Delegation token kind */ public static final Text TOKEN_KIND = new Text("WEBHDFS delegation"); @@ -152,22 +152,15 @@ public class WebHdfsFileSystem extends FileSystem tokenAspect = new TokenAspect(this, TOKEN_KIND); } - /** - * Initialize connectionFactory. This function is intended to - * be overridden by SWebHdfsFileSystem. - */ - protected void initializeConnectionFactory(Configuration conf) - throws IOException { - connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY; - } - @Override public synchronized void initialize(URI uri, Configuration conf ) throws IOException { super.initialize(uri, conf); setConf(conf); + connectionFactory = URLConnectionFactory + .newDefaultURLConnectionFactory(conf); initializeTokenAspect(); - initializeConnectionFactory(conf); + ugi = UserGroupInformation.getCurrentUser(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java index c3497064c8a..c3d29971361 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java @@ -20,64 +20,47 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.hamcrest.CoreMatchers.is; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; +import static org.mockito.Matchers.anyBoolean; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; -import java.io.IOException; -import java.io.OutputStream; +import java.io.ByteArrayInputStream; +import java.net.HttpURLConnection; import java.net.URL; import java.util.EnumMap; -import javax.servlet.ServletException; -import javax.servlet.http.HttpServlet; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.util.Holder; -import org.apache.hadoop.http.HttpServer; +import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.junit.Test; +import org.mockito.Mockito; public class TestEditLogFileInputStream { private static final byte[] FAKE_LOG_DATA = TestEditLog.HADOOP20_SOME_EDITS; @Test public void testReadURL() throws Exception { - // Start a simple web server which hosts the log data. - HttpServer server = new HttpServer.Builder().setName("test") - .setBindAddress("0.0.0.0").setPort(0).setFindPort(true).build(); - server.start(); - try { - server.addServlet("fakeLog", "/fakeLog", FakeLogServlet.class); - URL url = new URL("http://localhost:" + server.getPort() + "/fakeLog"); - EditLogInputStream elis = EditLogFileInputStream.fromUrl( - url, HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID, - false); - // Read the edit log and verify that we got all of the data. - EnumMap> counts = - FSImageTestUtil.countEditLogOpTypes(elis); - assertThat(counts.get(FSEditLogOpCodes.OP_ADD).held, is(1)); - assertThat(counts.get(FSEditLogOpCodes.OP_SET_GENSTAMP_V1).held, is(1)); - assertThat(counts.get(FSEditLogOpCodes.OP_CLOSE).held, is(1)); + HttpURLConnection conn = mock(HttpURLConnection.class); + doReturn(new ByteArrayInputStream(FAKE_LOG_DATA)).when(conn).getInputStream(); + doReturn(HttpURLConnection.HTTP_OK).when(conn).getResponseCode(); + doReturn(Integer.toString(FAKE_LOG_DATA.length)).when(conn).getHeaderField("Content-Length"); - // Check that length header was picked up. - assertEquals(FAKE_LOG_DATA.length, elis.length()); - elis.close(); - } finally { - server.stop(); - } + URLConnectionFactory factory = mock(URLConnectionFactory.class); + doReturn(conn).when(factory).openConnection(Mockito. any(), + anyBoolean()); + + URL url = new URL("http://localhost/fakeLog"); + EditLogInputStream elis = EditLogFileInputStream.fromUrl(factory, url, + HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID, false); + // Read the edit log and verify that we got all of the data. + EnumMap> counts = FSImageTestUtil + .countEditLogOpTypes(elis); + assertThat(counts.get(FSEditLogOpCodes.OP_ADD).held, is(1)); + assertThat(counts.get(FSEditLogOpCodes.OP_SET_GENSTAMP_V1).held, is(1)); + assertThat(counts.get(FSEditLogOpCodes.OP_CLOSE).held, is(1)); + + // Check that length header was picked up. + assertEquals(FAKE_LOG_DATA.length, elis.length()); + elis.close(); } - - @SuppressWarnings("serial") - public static class FakeLogServlet extends HttpServlet { - @Override - public void doGet(HttpServletRequest request, - HttpServletResponse response - ) throws ServletException, IOException { - response.setHeader("Content-Length", - String.valueOf(FAKE_LOG_DATA.length)); - OutputStream out = response.getOutputStream(); - out.write(FAKE_LOG_DATA); - out.close(); - } - } - } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java index e8593b62163..997e9ca90a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java @@ -34,10 +34,7 @@ public final class TestURLConnectionFactory { public void testConnConfiguratior() throws IOException { final URL u = new URL("http://localhost"); final List conns = Lists.newArrayList(); - URLConnectionFactory fc = new URLConnectionFactory( - URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT); - - fc.setConnConfigurator(new ConnectionConfigurator() { + URLConnectionFactory fc = new URLConnectionFactory(new ConnectionConfigurator() { @Override public HttpURLConnection configure(HttpURLConnection conn) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java index 41e0b928689..63786930aec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java @@ -26,6 +26,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; +import java.net.HttpURLConnection; import java.net.InetSocketAddress; import java.net.ServerSocket; import java.net.Socket; @@ -41,6 +42,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.authentication.client.ConnectionConfigurator; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -66,7 +68,14 @@ public class TestWebHdfsTimeouts { private InetSocketAddress nnHttpAddress; private ServerSocket serverSocket; private Thread serverThread; - private URLConnectionFactory connectionFactory = new URLConnectionFactory(SHORT_SOCKET_TIMEOUT); + private URLConnectionFactory connectionFactory = new URLConnectionFactory(new ConnectionConfigurator() { + @Override + public HttpURLConnection configure(HttpURLConnection conn) throws IOException { + conn.setReadTimeout(SHORT_SOCKET_TIMEOUT); + conn.setConnectTimeout(SHORT_SOCKET_TIMEOUT); + return conn; + } + }); @Before public void setUp() throws Exception { @@ -82,7 +91,6 @@ public class TestWebHdfsTimeouts { @After public void tearDown() throws Exception { - fs.connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY; IOUtils.cleanup(LOG, clients.toArray(new SocketChannel[clients.size()])); IOUtils.cleanup(LOG, fs); if (serverSocket != null) { @@ -242,7 +250,7 @@ public class TestWebHdfsTimeouts { */ private void startSingleTemporaryRedirectResponseThread( final boolean consumeConnectionBacklog) { - fs.connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY; + fs.connectionFactory = URLConnectionFactory.DEFAULT_SYSTEM_CONNECTION_FACTORY; serverThread = new Thread() { @Override public void run() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenRemoteFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenRemoteFetcher.java index 2b090d27170..e424681177a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenRemoteFetcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenRemoteFetcher.java @@ -83,7 +83,7 @@ public class TestDelegationTokenRemoteFetcher { private static final String EXP_DATE = "124123512361236"; private static final String tokenFile = "http.file.dta"; - private static final URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY; + private static final URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_SYSTEM_CONNECTION_FACTORY; private int httpPort; private URI serviceUrl; From 4a1acfc96fb7d418ff3fe538a3942834948f6d1c Mon Sep 17 00:00:00 2001 From: Brandon Li Date: Tue, 26 Nov 2013 18:13:04 +0000 Subject: [PATCH 08/27] HDFS-5548. Use ConcurrentHashMap in portmap. Contributed by Haohui Mai git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1545756 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop/portmap/PortmapInterface.java | 95 --------------- .../apache/hadoop/portmap/PortmapRequest.java | 3 +- .../hadoop/portmap/RpcProgramPortmap.java | 108 +++++++++++------- .../apache/hadoop/portmap/TestPortmap.java | 6 +- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + 5 files changed, 72 insertions(+), 142 deletions(-) delete mode 100644 hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapInterface.java diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapInterface.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapInterface.java deleted file mode 100644 index ae968cb046d..00000000000 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapInterface.java +++ /dev/null @@ -1,95 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.portmap; - -import org.apache.hadoop.oncrpc.XDR; - -/** - * Methods that need to be implemented to provide Portmap RPC program. - * See RFC 1833 for details. - */ -public interface PortmapInterface { - public enum Procedure { - // the order of the values below are significant. - PMAPPROC_NULL, - PMAPPROC_SET, - PMAPPROC_UNSET, - PMAPPROC_GETPORT, - PMAPPROC_DUMP, - PMAPPROC_CALLIT, - PMAPPROC_GETTIME, - PMAPPROC_UADDR2TADDR, - PMAPPROC_TADDR2UADDR, - PMAPPROC_GETVERSADDR, - PMAPPROC_INDIRECT, - PMAPPROC_GETADDRLIST, - PMAPPROC_GETSTAT; - - public int getValue() { - return ordinal(); - } - - public static Procedure fromValue(int value) { - if (value < 0 || value >= values().length) { - return null; - } - return values()[value]; - } - } - - /** - * This procedure does no work. By convention, procedure zero of any protocol - * takes no parameters and returns no results. - */ - public XDR nullOp(int xidd, XDR in, XDR out); - - /** - * When a program first becomes available on a machine, it registers itself - * with the port mapper program on the same machine. The program passes its - * program number "prog", version number "vers", transport protocol number - * "prot", and the port "port" on which it awaits service request. The - * procedure returns a boolean reply whose value is "TRUE" if the procedure - * successfully established the mapping and "FALSE" otherwise. The procedure - * refuses to establish a mapping if one already exists for the tuple - * "(prog, vers, prot)". - */ - public XDR set(int xid, XDR in, XDR out); - - /** - * When a program becomes unavailable, it should unregister itself with the - * port mapper program on the same machine. The parameters and results have - * meanings identical to those of "PMAPPROC_SET". The protocol and port number - * fields of the argument are ignored. - */ - public XDR unset(int xid, XDR in, XDR out); - - /** - * Given a program number "prog", version number "vers", and transport - * protocol number "prot", this procedure returns the port number on which the - * program is awaiting call requests. A port value of zeros means the program - * has not been registered. The "port" field of the argument is ignored. - */ - public XDR getport(int xid, XDR in, XDR out); - - /** - * This procedure enumerates all entries in the port mapper's database. The - * procedure takes no parameters and returns a list of program, version, - * protocol, and port values. - */ - public XDR dump(int xid, XDR in, XDR out); -} diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapRequest.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapRequest.java index 943b4abc5c5..2932c78237f 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapRequest.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapRequest.java @@ -22,7 +22,6 @@ import org.apache.hadoop.oncrpc.RpcUtil; import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.security.CredentialsNone; import org.apache.hadoop.oncrpc.security.VerifierNone; -import org.apache.hadoop.portmap.PortmapInterface.Procedure; /** * Helper utility for building portmap request @@ -37,7 +36,7 @@ public class PortmapRequest { RpcCall call = RpcCall.getInstance( RpcUtil.getNewXid(String.valueOf(RpcProgramPortmap.PROGRAM)), RpcProgramPortmap.PROGRAM, RpcProgramPortmap.VERSION, - Procedure.PMAPPROC_SET.getValue(), new CredentialsNone(), + RpcProgramPortmap.PMAPPROC_SET, new CredentialsNone(), new VerifierNone()); call.write(request); return mapping.serialize(request); diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/RpcProgramPortmap.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/RpcProgramPortmap.java index d68657cc42f..67175d0640d 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/RpcProgramPortmap.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/RpcProgramPortmap.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.portmap; -import java.util.HashMap; +import java.util.concurrent.ConcurrentHashMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -40,20 +40,26 @@ import org.jboss.netty.handler.timeout.IdleState; import org.jboss.netty.handler.timeout.IdleStateAwareChannelUpstreamHandler; import org.jboss.netty.handler.timeout.IdleStateEvent; -final class RpcProgramPortmap extends IdleStateAwareChannelUpstreamHandler implements PortmapInterface { +final class RpcProgramPortmap extends IdleStateAwareChannelUpstreamHandler { static final int PROGRAM = 100000; static final int VERSION = 2; + + static final int PMAPPROC_NULL = 0; + static final int PMAPPROC_SET = 1; + static final int PMAPPROC_UNSET = 2; + static final int PMAPPROC_GETPORT = 3; + static final int PMAPPROC_DUMP = 4; + static final int PMAPPROC_GETVERSADDR = 9; + private static final Log LOG = LogFactory.getLog(RpcProgramPortmap.class); - /** Map synchronized usis monitor lock of this instance */ - private final HashMap map; + private final ConcurrentHashMap map = new ConcurrentHashMap(); /** ChannelGroup that remembers all active channels for gracefully shutdown. */ private final ChannelGroup allChannels; RpcProgramPortmap(ChannelGroup allChannels) { this.allChannels = allChannels; - map = new HashMap(256); PortmapMapping m = new PortmapMapping(PROGRAM, VERSION, PortmapMapping.TRANSPORT_TCP, RpcProgram.RPCB_PORT); PortmapMapping m1 = new PortmapMapping(PROGRAM, VERSION, @@ -61,48 +67,66 @@ final class RpcProgramPortmap extends IdleStateAwareChannelUpstreamHandler imple map.put(PortmapMapping.key(m), m); map.put(PortmapMapping.key(m1), m1); } - - @Override - public XDR nullOp(int xid, XDR in, XDR out) { + + /** + * This procedure does no work. By convention, procedure zero of any protocol + * takes no parameters and returns no results. + */ + private XDR nullOp(int xid, XDR in, XDR out) { return PortmapResponse.voidReply(out, xid); } - @Override - public XDR set(int xid, XDR in, XDR out) { + /** + * When a program first becomes available on a machine, it registers itself + * with the port mapper program on the same machine. The program passes its + * program number "prog", version number "vers", transport protocol number + * "prot", and the port "port" on which it awaits service request. The + * procedure returns a boolean reply whose value is "TRUE" if the procedure + * successfully established the mapping and "FALSE" otherwise. The procedure + * refuses to establish a mapping if one already exists for the tuple + * "(prog, vers, prot)". + */ + private XDR set(int xid, XDR in, XDR out) { PortmapMapping mapping = PortmapRequest.mapping(in); String key = PortmapMapping.key(mapping); if (LOG.isDebugEnabled()) { LOG.debug("Portmap set key=" + key); } - PortmapMapping value = null; - synchronized(this) { - map.put(key, mapping); - value = map.get(key); - } - return PortmapResponse.intReply(out, xid, value.getPort()); + map.put(key, mapping); + return PortmapResponse.intReply(out, xid, mapping.getPort()); } - @Override - public synchronized XDR unset(int xid, XDR in, XDR out) { + /** + * When a program becomes unavailable, it should unregister itself with the + * port mapper program on the same machine. The parameters and results have + * meanings identical to those of "PMAPPROC_SET". The protocol and port number + * fields of the argument are ignored. + */ + private XDR unset(int xid, XDR in, XDR out) { PortmapMapping mapping = PortmapRequest.mapping(in); - synchronized(this) { - map.remove(PortmapMapping.key(mapping)); - } + String key = PortmapMapping.key(mapping); + + if (LOG.isDebugEnabled()) + LOG.debug("Portmap remove key=" + key); + + map.remove(key); return PortmapResponse.booleanReply(out, xid, true); } - @Override - public synchronized XDR getport(int xid, XDR in, XDR out) { + /** + * Given a program number "prog", version number "vers", and transport + * protocol number "prot", this procedure returns the port number on which the + * program is awaiting call requests. A port value of zeros means the program + * has not been registered. The "port" field of the argument is ignored. + */ + private XDR getport(int xid, XDR in, XDR out) { PortmapMapping mapping = PortmapRequest.mapping(in); String key = PortmapMapping.key(mapping); if (LOG.isDebugEnabled()) { LOG.debug("Portmap GETPORT key=" + key + " " + mapping); } - PortmapMapping value = null; - synchronized(this) { - value = map.get(key); - } + PortmapMapping value = map.get(key); int res = 0; if (value != null) { res = value.getPort(); @@ -115,13 +139,13 @@ final class RpcProgramPortmap extends IdleStateAwareChannelUpstreamHandler imple return PortmapResponse.intReply(out, xid, res); } - @Override - public synchronized XDR dump(int xid, XDR in, XDR out) { - PortmapMapping[] pmapList = null; - synchronized(this) { - pmapList = new PortmapMapping[map.values().size()]; - map.values().toArray(pmapList); - } + /** + * This procedure enumerates all entries in the port mapper's database. The + * procedure takes no parameters and returns a list of program, version, + * protocol, and port values. + */ + private XDR dump(int xid, XDR in, XDR out) { + PortmapMapping[] pmapList = map.values().toArray(new PortmapMapping[0]); return PortmapResponse.pmapList(out, xid, pmapList); } @@ -131,23 +155,23 @@ final class RpcProgramPortmap extends IdleStateAwareChannelUpstreamHandler imple RpcInfo info = (RpcInfo) e.getMessage(); RpcCall rpcCall = (RpcCall) info.header(); - final Procedure portmapProc = Procedure.fromValue(rpcCall.getProcedure()); + final int portmapProc = rpcCall.getProcedure(); int xid = rpcCall.getXid(); XDR in = new XDR(info.data().toByteBuffer().asReadOnlyBuffer(), XDR.State.READING); XDR out = new XDR(); - if (portmapProc == Procedure.PMAPPROC_NULL) { + if (portmapProc == PMAPPROC_NULL) { out = nullOp(xid, in, out); - } else if (portmapProc == Procedure.PMAPPROC_SET) { + } else if (portmapProc == PMAPPROC_SET) { out = set(xid, in, out); - } else if (portmapProc == Procedure.PMAPPROC_UNSET) { + } else if (portmapProc == PMAPPROC_UNSET) { out = unset(xid, in, out); - } else if (portmapProc == Procedure.PMAPPROC_DUMP) { + } else if (portmapProc == PMAPPROC_DUMP) { out = dump(xid, in, out); - } else if (portmapProc == Procedure.PMAPPROC_GETPORT) { + } else if (portmapProc == PMAPPROC_GETPORT) { out = getport(xid, in, out); - } else if (portmapProc == Procedure.PMAPPROC_GETVERSADDR) { + } else if (portmapProc == PMAPPROC_GETVERSADDR) { out = getport(xid, in, out); } else { LOG.info("PortmapHandler unknown rpc procedure=" + portmapProc); @@ -161,7 +185,7 @@ final class RpcProgramPortmap extends IdleStateAwareChannelUpstreamHandler imple RpcResponse rsp = new RpcResponse(buf, info.remoteAddress()); RpcUtil.sendRpcResponse(ctx, rsp); } - + @Override public void channelOpen(ChannelHandlerContext ctx, ChannelStateEvent e) throws Exception { diff --git a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/portmap/TestPortmap.java b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/portmap/TestPortmap.java index 2ed16bb13e6..cc88d34920a 100644 --- a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/portmap/TestPortmap.java +++ b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/portmap/TestPortmap.java @@ -23,7 +23,7 @@ import java.net.DatagramPacket; import java.net.DatagramSocket; import java.net.InetSocketAddress; import java.net.Socket; -import java.util.HashMap; +import java.util.Map; import junit.framework.Assert; @@ -80,7 +80,7 @@ public class TestPortmap { XDR req = new XDR(); RpcCall.getInstance(++xid, RpcProgramPortmap.PROGRAM, RpcProgramPortmap.VERSION, - PortmapInterface.Procedure.PMAPPROC_SET.getValue(), + RpcProgramPortmap.PMAPPROC_SET, new CredentialsNone(), new VerifierNone()).write(req); PortmapMapping sent = new PortmapMapping(90000, 1, @@ -101,7 +101,7 @@ public class TestPortmap { Thread.sleep(100); boolean found = false; @SuppressWarnings("unchecked") - HashMap map = (HashMap) Whitebox + Map map = (Map) Whitebox .getInternalState(pm.getHandler(), "map"); for (PortmapMapping m : map.values()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index ac53b602ec3..b1d40f495b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -733,6 +733,8 @@ Release 2.2.1 - UNRELEASED HDFS-5407. Fix typos in DFSClientCache (Haohui Mai via brandonli) + HDFS-5548. Use ConcurrentHashMap in portmap (Haohui Mai via brandonli) + Release 2.2.0 - 2013-10-13 INCOMPATIBLE CHANGES From 82ff2d3f2e569879500d851f4d67dfa2d02b5c9b Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Tue, 26 Nov 2013 18:33:22 +0000 Subject: [PATCH 09/27] HDFS-5286. Flatten INodeDirectory hierarchy: Replace INodeDirectoryWithQuota with DirectoryWithQuotaFeature. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1545768 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + ...ta.java => DirectoryWithQuotaFeature.java} | 146 ++++++------------ .../hdfs/server/namenode/FSDirectory.java | 62 +++----- .../hadoop/hdfs/server/namenode/FSImage.java | 4 +- .../hdfs/server/namenode/FSImageFormat.java | 27 ++-- .../hadoop/hdfs/server/namenode/INode.java | 56 +++++++ .../hdfs/server/namenode/INodeDirectory.java | 145 +++++++++++++---- .../hdfs/server/namenode/INodeFile.java | 28 +--- .../snapshot/INodeDirectoryWithSnapshot.java | 9 +- .../server/namenode/snapshot/Snapshot.java | 2 +- .../org/apache/hadoop/hdfs/TestQuota.java | 5 +- .../namenode/TestFSImageWithSnapshot.java | 2 +- .../hdfs/server/namenode/TestFsLimits.java | 6 +- .../hdfs/server/namenode/TestINodeFile.java | 19 ++- .../snapshot/TestRenameWithSnapshots.java | 22 ++- .../snapshot/TestSnapshotDeletion.java | 17 +- .../org/apache/hadoop/hdfs/util/TestDiff.java | 2 +- 17 files changed, 324 insertions(+), 231 deletions(-) rename hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/{INodeDirectoryWithQuota.java => DirectoryWithQuotaFeature.java} (50%) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index b1d40f495b7..2b1e6972d0a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -215,6 +215,9 @@ Trunk (Unreleased) HDFS-5538. URLConnectionFactory should pick up the SSL related configuration by default. (Haohui Mai via jing9) + HDFS-5286. Flatten INodeDirectory hierarchy: Replace INodeDirectoryWithQuota + with DirectoryWithQuotaFeature. (szetszwo) + OPTIMIZATIONS HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java similarity index 50% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java rename to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java index 41f1984f77e..c03a7971d11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java @@ -17,121 +17,76 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; -import com.google.common.annotations.VisibleForTesting; - /** - * Directory INode class that has a quota restriction + * Quota feature for {@link INodeDirectory}. */ -public class INodeDirectoryWithQuota extends INodeDirectory { +public final class DirectoryWithQuotaFeature extends INodeDirectory.Feature { + public static final long DEFAULT_NAMESPACE_QUOTA = Long.MAX_VALUE; + public static final long DEFAULT_DISKSPACE_QUOTA = HdfsConstants.QUOTA_RESET; + /** Name space quota */ - private long nsQuota = Long.MAX_VALUE; + private long nsQuota = DEFAULT_NAMESPACE_QUOTA; /** Name space count */ private long namespace = 1L; /** Disk space quota */ - private long dsQuota = HdfsConstants.QUOTA_RESET; + private long dsQuota = DEFAULT_DISKSPACE_QUOTA; /** Disk space count */ private long diskspace = 0L; - /** Convert an existing directory inode to one with the given quota - * - * @param nsQuota Namespace quota to be assigned to this inode - * @param dsQuota Diskspace quota to be assigned to this indoe - * @param other The other inode from which all other properties are copied - */ - INodeDirectoryWithQuota(INodeDirectory other, boolean adopt, - long nsQuota, long dsQuota) { - super(other, adopt); - final Quota.Counts counts = other.computeQuotaUsage(); - this.namespace = counts.get(Quota.NAMESPACE); - this.diskspace = counts.get(Quota.DISKSPACE); + DirectoryWithQuotaFeature(long nsQuota, long dsQuota) { this.nsQuota = nsQuota; this.dsQuota = dsQuota; } - - public INodeDirectoryWithQuota(INodeDirectory other, boolean adopt, - Quota.Counts quota) { - this(other, adopt, quota.get(Quota.NAMESPACE), quota.get(Quota.DISKSPACE)); - } - /** constructor with no quota verification */ - INodeDirectoryWithQuota(long id, byte[] name, PermissionStatus permissions, - long modificationTime, long nsQuota, long dsQuota) { - super(id, name, permissions, modificationTime); - this.nsQuota = nsQuota; - this.dsQuota = dsQuota; - } - - /** constructor with no quota verification */ - INodeDirectoryWithQuota(long id, byte[] name, PermissionStatus permissions) { - super(id, name, permissions, 0L); - } - - @Override - public Quota.Counts getQuotaCounts() { + /** @return the quota set or -1 if it is not set. */ + Quota.Counts getQuota() { return Quota.Counts.newInstance(nsQuota, dsQuota); } /** Set this directory's quota * * @param nsQuota Namespace quota to be set - * @param dsQuota diskspace quota to be set + * @param dsQuota Diskspace quota to be set */ - public void setQuota(long nsQuota, long dsQuota) { + void setQuota(long nsQuota, long dsQuota) { this.nsQuota = nsQuota; this.dsQuota = dsQuota; } - @Override - public Quota.Counts computeQuotaUsage(Quota.Counts counts, boolean useCache, - int lastSnapshotId) { - if (useCache && isQuotaSet()) { - // use cache value - counts.add(Quota.NAMESPACE, namespace); - counts.add(Quota.DISKSPACE, diskspace); - } else { - super.computeQuotaUsage(counts, false, lastSnapshotId); - } + Quota.Counts addNamespaceDiskspace(Quota.Counts counts) { + counts.add(Quota.NAMESPACE, namespace); + counts.add(Quota.DISKSPACE, diskspace); return counts; } - @Override - public ContentSummaryComputationContext computeContentSummary( + ContentSummaryComputationContext computeContentSummary(final INodeDirectory dir, final ContentSummaryComputationContext summary) { final long original = summary.getCounts().get(Content.DISKSPACE); long oldYieldCount = summary.getYieldCount(); - super.computeContentSummary(summary); + dir.computeDirectoryContentSummary(summary); // Check only when the content has not changed in the middle. if (oldYieldCount == summary.getYieldCount()) { - checkDiskspace(summary.getCounts().get(Content.DISKSPACE) - original); + checkDiskspace(dir, summary.getCounts().get(Content.DISKSPACE) - original); } return summary; } - private void checkDiskspace(final long computed) { - if (-1 != getQuotaCounts().get(Quota.DISKSPACE) && diskspace != computed) { + private void checkDiskspace(final INodeDirectory dir, final long computed) { + if (-1 != getQuota().get(Quota.DISKSPACE) && diskspace != computed) { NameNode.LOG.error("BUG: Inconsistent diskspace for directory " - + getFullPathName() + ". Cached = " + diskspace + + dir.getFullPathName() + ". Cached = " + diskspace + " != Computed = " + computed); } } - /** Get the number of names in the subtree rooted at this directory - * @return the size of the subtree rooted at this directory - */ - long numItemsInTree() { - return namespace; - } - - @Override - public final void addSpaceConsumed(final long nsDelta, final long dsDelta, - boolean verify) throws QuotaExceededException { - if (isQuotaSet()) { + void addSpaceConsumed(final INodeDirectory dir, final long nsDelta, + final long dsDelta, boolean verify) throws QuotaExceededException { + if (dir.isQuotaSet()) { // The following steps are important: // check quotas in this inode and all ancestors before changing counts // so that no change is made if there is any quota violation. @@ -141,11 +96,11 @@ public class INodeDirectoryWithQuota extends INodeDirectory { verifyQuota(nsDelta, dsDelta); } // (2) verify quota and then add count in ancestors - super.addSpaceConsumed(nsDelta, dsDelta, verify); + dir.addSpaceConsumed2Parent(nsDelta, dsDelta, verify); // (3) add count in this inode addSpaceConsumed2Cache(nsDelta, dsDelta); } else { - super.addSpaceConsumed(nsDelta, dsDelta, verify); + dir.addSpaceConsumed2Parent(nsDelta, dsDelta, verify); } } @@ -154,7 +109,7 @@ public class INodeDirectoryWithQuota extends INodeDirectory { * @param nsDelta the change of the tree size * @param dsDelta change to disk space occupied */ - protected void addSpaceConsumed2Cache(long nsDelta, long dsDelta) { + public void addSpaceConsumed2Cache(long nsDelta, long dsDelta) { namespace += nsDelta; diskspace += dsDelta; } @@ -172,41 +127,42 @@ public class INodeDirectoryWithQuota extends INodeDirectory { this.diskspace = diskspace; } + /** @return the namespace and diskspace consumed. */ + public Quota.Counts getSpaceConsumed() { + return Quota.Counts.newInstance(namespace, diskspace); + } + /** Verify if the namespace quota is violated after applying delta. */ - void verifyNamespaceQuota(long delta) throws NSQuotaExceededException { + private void verifyNamespaceQuota(long delta) throws NSQuotaExceededException { if (Quota.isViolated(nsQuota, namespace, delta)) { throw new NSQuotaExceededException(nsQuota, namespace + delta); } } - - /** Verify if the namespace count disk space satisfies the quota restriction - * @throws QuotaExceededException if the given quota is less than the count - */ - void verifyQuota(long nsDelta, long dsDelta) throws QuotaExceededException { - verifyNamespaceQuota(nsDelta); - - if (Quota.isViolated(dsQuota, diskspace, dsDelta)) { - throw new DSQuotaExceededException(dsQuota, diskspace + dsDelta); + /** Verify if the diskspace quota is violated after applying delta. */ + private void verifyDiskspaceQuota(long delta) throws DSQuotaExceededException { + if (Quota.isViolated(dsQuota, diskspace, delta)) { + throw new DSQuotaExceededException(dsQuota, diskspace + delta); } } - String namespaceString() { + /** + * @throws QuotaExceededException if namespace or diskspace quotas is + * violated after applying the deltas. + */ + void verifyQuota(long nsDelta, long dsDelta) throws QuotaExceededException { + verifyNamespaceQuota(nsDelta); + verifyDiskspaceQuota(dsDelta); + } + + private String namespaceString() { return "namespace: " + (nsQuota < 0? "-": namespace + "/" + nsQuota); } - String diskspaceString() { + private String diskspaceString() { return "diskspace: " + (dsQuota < 0? "-": diskspace + "/" + dsQuota); } - String quotaString() { - return ", Quota[" + namespaceString() + ", " + diskspaceString() + "]"; - } - @VisibleForTesting - public long getNamespace() { - return this.namespace; - } - - @VisibleForTesting - public long getDiskspace() { - return this.diskspace; + @Override + public String toString() { + return "Quota[" + namespaceString() + ", " + diskspaceString() + "]"; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index cc5691b8e3c..07e2cdc02fd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -86,11 +86,15 @@ import com.google.common.base.Preconditions; * *************************************************/ public class FSDirectory implements Closeable { - private static INodeDirectoryWithQuota createRoot(FSNamesystem namesystem) { - final INodeDirectoryWithQuota r = new INodeDirectoryWithQuota( + private static INodeDirectorySnapshottable createRoot(FSNamesystem namesystem) { + final INodeDirectory r = new INodeDirectory( INodeId.ROOT_INODE_ID, INodeDirectory.ROOT_NAME, - namesystem.createFsOwnerPermissions(new FsPermission((short) 0755))); + namesystem.createFsOwnerPermissions(new FsPermission((short) 0755)), + 0L); + r.addDirectoryWithQuotaFeature( + DirectoryWithQuotaFeature.DEFAULT_NAMESPACE_QUOTA, + DirectoryWithQuotaFeature.DEFAULT_DISKSPACE_QUOTA); final INodeDirectorySnapshottable s = new INodeDirectorySnapshottable(r); s.setSnapshotQuota(0); return s; @@ -106,7 +110,7 @@ public class FSDirectory implements Closeable { public final static String DOT_INODES_STRING = ".inodes"; public final static byte[] DOT_INODES = DFSUtil.string2Bytes(DOT_INODES_STRING); - INodeDirectoryWithQuota rootDir; + INodeDirectory rootDir; FSImage fsImage; private final FSNamesystem namesystem; private volatile boolean ready = false; @@ -201,7 +205,7 @@ public class FSDirectory implements Closeable { } /** @return the root directory inode. */ - public INodeDirectoryWithQuota getRoot() { + public INodeDirectory getRoot() { return rootDir; } @@ -1799,9 +1803,8 @@ public class FSDirectory implements Closeable { final INode[] inodes = inodesInPath.getINodes(); for(int i=0; i < numOfINodes; i++) { if (inodes[i].isQuotaSet()) { // a directory with quota - INodeDirectoryWithQuota node = (INodeDirectoryWithQuota) inodes[i] - .asDirectory(); - node.addSpaceConsumed2Cache(nsDelta, dsDelta); + inodes[i].asDirectory().getDirectoryWithQuotaFeature() + .addSpaceConsumed2Cache(nsDelta, dsDelta); } } } @@ -2034,10 +2037,11 @@ public class FSDirectory implements Closeable { // Stop checking for quota when common ancestor is reached return; } - if (inodes[i].isQuotaSet()) { // a directory with quota + final DirectoryWithQuotaFeature q + = inodes[i].asDirectory().getDirectoryWithQuotaFeature(); + if (q != null) { // a directory with quota try { - ((INodeDirectoryWithQuota) inodes[i].asDirectory()).verifyQuota( - nsDelta, dsDelta); + q.verifyQuota(nsDelta, dsDelta); } catch (QuotaExceededException e) { e.setPathName(getFullPathName(inodes, i)); throw e; @@ -2384,35 +2388,14 @@ public class FSDirectory implements Closeable { if (dsQuota == HdfsConstants.QUOTA_DONT_SET) { dsQuota = oldDsQuota; } + if (oldNsQuota == nsQuota && oldDsQuota == dsQuota) { + return null; + } final Snapshot latest = iip.getLatestSnapshot(); - if (dirNode instanceof INodeDirectoryWithQuota) { - INodeDirectoryWithQuota quotaNode = (INodeDirectoryWithQuota) dirNode; - Quota.Counts counts = null; - if (!quotaNode.isQuotaSet()) { - // dirNode must be an INodeDirectoryWithSnapshot whose quota has not - // been set yet - counts = quotaNode.computeQuotaUsage(); - } - // a directory with quota; so set the quota to the new value - quotaNode.setQuota(nsQuota, dsQuota); - if (quotaNode.isQuotaSet() && counts != null) { - quotaNode.setSpaceConsumed(counts.get(Quota.NAMESPACE), - counts.get(Quota.DISKSPACE)); - } else if (!quotaNode.isQuotaSet() && latest == null) { - // do not replace the node if the node is a snapshottable directory - // without snapshots - if (!(quotaNode instanceof INodeDirectoryWithSnapshot)) { - // will not come here for root because root is snapshottable and - // root's nsQuota is always set - return quotaNode.replaceSelf4INodeDirectory(inodeMap); - } - } - } else { - // a non-quota directory; so replace it with a directory with quota - return dirNode.replaceSelf4Quota(latest, nsQuota, dsQuota, inodeMap); - } - return (oldNsQuota != nsQuota || oldDsQuota != dsQuota) ? dirNode : null; + dirNode = dirNode.recordModification(latest, inodeMap); + dirNode.setQuota(nsQuota, dsQuota); + return dirNode; } } @@ -2441,7 +2424,8 @@ public class FSDirectory implements Closeable { long totalInodes() { readLock(); try { - return rootDir.numItemsInTree(); + return rootDir.getDirectoryWithQuotaFeature().getSpaceConsumed() + .get(Quota.NAMESPACE); } finally { readUnlock(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index dd37cdad815..ee743fe65bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -755,7 +755,7 @@ public class FSImage implements Closeable { * This is an update of existing state of the filesystem and does not * throw QuotaExceededException. */ - static void updateCountForQuota(INodeDirectoryWithQuota root) { + static void updateCountForQuota(INodeDirectory root) { updateCountForQuotaRecursively(root, Quota.Counts.newInstance()); } @@ -795,7 +795,7 @@ public class FSImage implements Closeable { + " quota = " + dsQuota + " < consumed = " + diskspace); } - ((INodeDirectoryWithQuota)dir).setSpaceConsumed(namespace, diskspace); + dir.getDirectoryWithQuotaFeature().setSpaceConsumed(namespace, diskspace); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index f6db1529161..5ce0e3f0269 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -375,7 +375,7 @@ public class FSImageFormat { final long dsQuota = q.get(Quota.DISKSPACE); FSDirectory fsDir = namesystem.dir; if (nsQuota != -1 || dsQuota != -1) { - fsDir.rootDir.setQuota(nsQuota, dsQuota); + fsDir.rootDir.getDirectoryWithQuotaFeature().setQuota(nsQuota, dsQuota); } fsDir.rootDir.cloneModificationTime(root); fsDir.rootDir.clonePermissionStatus(root); @@ -729,10 +729,11 @@ public class FSImageFormat { if (counter != null) { counter.increment(); } - final INodeDirectory dir = nsQuota >= 0 || dsQuota >= 0? - new INodeDirectoryWithQuota(inodeId, localName, permissions, - modificationTime, nsQuota, dsQuota) - : new INodeDirectory(inodeId, localName, permissions, modificationTime); + final INodeDirectory dir = new INodeDirectory(inodeId, localName, + permissions, modificationTime); + if (nsQuota >= 0 || dsQuota >= 0) { + dir.addDirectoryWithQuotaFeature(nsQuota, dsQuota); + } return snapshottable ? new INodeDirectorySnapshottable(dir) : withSnapshot ? new INodeDirectoryWithSnapshot(dir) : dir; @@ -972,13 +973,14 @@ public class FSImageFormat { checkNotSaved(); final FSNamesystem sourceNamesystem = context.getSourceNamesystem(); - FSDirectory fsDir = sourceNamesystem.dir; + final INodeDirectory rootDir = sourceNamesystem.dir.rootDir; + final long numINodes = rootDir.getDirectoryWithQuotaFeature() + .getSpaceConsumed().get(Quota.NAMESPACE); String sdPath = newFile.getParentFile().getParentFile().getAbsolutePath(); Step step = new Step(StepType.INODES, sdPath); StartupProgress prog = NameNode.getStartupProgress(); prog.beginStep(Phase.SAVING_CHECKPOINT, step); - prog.setTotal(Phase.SAVING_CHECKPOINT, step, - fsDir.rootDir.numItemsInTree()); + prog.setTotal(Phase.SAVING_CHECKPOINT, step, numINodes); Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step); long startTime = now(); // @@ -997,7 +999,7 @@ public class FSImageFormat { // fairness-related deadlock. See the comments on HDFS-2223. out.writeInt(sourceNamesystem.unprotectedGetNamespaceInfo() .getNamespaceID()); - out.writeLong(fsDir.rootDir.numItemsInTree()); + out.writeLong(numINodes); out.writeLong(sourceNamesystem.getGenerationStampV1()); out.writeLong(sourceNamesystem.getGenerationStampV2()); out.writeLong(sourceNamesystem.getGenerationStampAtblockIdSwitch()); @@ -1014,14 +1016,13 @@ public class FSImageFormat { " using " + compression); // save the root - saveINode2Image(fsDir.rootDir, out, false, referenceMap, counter); + saveINode2Image(rootDir, out, false, referenceMap, counter); // save the rest of the nodes - saveImage(fsDir.rootDir, out, true, false, counter); + saveImage(rootDir, out, true, false, counter); prog.endStep(Phase.SAVING_CHECKPOINT, step); // Now that the step is finished, set counter equal to total to adjust // for possible under-counting due to reference inodes. - prog.setCount(Phase.SAVING_CHECKPOINT, step, - fsDir.rootDir.numItemsInTree()); + prog.setCount(Phase.SAVING_CHECKPOINT, step, numINodes); // save files under construction // TODO: for HDFS-5428, since we cannot break the compatibility of // fsimage, we store part of the under-construction files that are only diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index e5f26b08b1e..b16a719eacf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -406,6 +406,15 @@ public abstract class INode implements INodeAttributes, Diff.Element { */ public void addSpaceConsumed(long nsDelta, long dsDelta, boolean verify) throws QuotaExceededException { + addSpaceConsumed2Parent(nsDelta, dsDelta, verify); + } + + /** + * Check and add namespace/diskspace consumed to itself and the ancestors. + * @throws QuotaExceededException if quote is violated. + */ + void addSpaceConsumed2Parent(long nsDelta, long dsDelta, boolean verify) + throws QuotaExceededException { if (parent != null) { parent.addSpaceConsumed(nsDelta, dsDelta, verify); } @@ -744,4 +753,51 @@ public abstract class INode implements INodeAttributes, Diff.Element { toDeleteList.clear(); } } + + /** INode feature such as {@link FileUnderConstructionFeature} + * and {@link DirectoryWithQuotaFeature}. + */ + interface Feature> { + /** @return the next feature. */ + public F getNextFeature(); + + /** Set the next feature. */ + public void setNextFeature(F next); + + /** Utility methods such as addFeature and removeFeature. */ + static class Util { + /** + * Add a feature to the linked list. + * @return the new head. + */ + static > F addFeature(F feature, F head) { + feature.setNextFeature(head); + return feature; + } + + /** + * Remove a feature from the linked list. + * @return the new head. + */ + static > F removeFeature(F feature, F head) { + if (feature == head) { + final F newHead = head.getNextFeature(); + head.setNextFeature(null); + return newHead; + } else if (head != null) { + F prev = head; + F curr = head.getNextFeature(); + for (; curr != null && curr != feature; + prev = curr, curr = curr.getNextFeature()) + ; + if (curr != null) { + prev.setNextFeature(curr.getNextFeature()); + curr.setNextFeature(null); + return head; + } + } + throw new IllegalStateException("Feature " + feature + " not found."); + } + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index 68ce1231385..ae5077af637 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -46,6 +46,21 @@ import com.google.common.base.Preconditions; */ public class INodeDirectory extends INodeWithAdditionalFields implements INodeDirectoryAttributes { + /** Directory related features such as quota and snapshots. */ + public static abstract class Feature implements INode.Feature { + private Feature nextFeature; + + @Override + public Feature getNextFeature() { + return nextFeature; + } + + @Override + public void setNextFeature(Feature next) { + this.nextFeature = next; + } + } + /** Cast INode to INodeDirectory. */ public static INodeDirectory valueOf(INode inode, Object path ) throws FileNotFoundException, PathIsNotDirectoryException { @@ -63,6 +78,9 @@ public class INodeDirectory extends INodeWithAdditionalFields final static byte[] ROOT_NAME = DFSUtil.string2Bytes(""); private List children = null; + + /** A linked list of {@link Feature}s. */ + private Feature headFeature = null; /** constructor */ public INodeDirectory(long id, byte[] name, PermissionStatus permissions, @@ -76,7 +94,7 @@ public class INodeDirectory extends INodeWithAdditionalFields * @param adopt Indicate whether or not need to set the parent field of child * INodes to the new node */ - public INodeDirectory(INodeDirectory other, boolean adopt) { + public INodeDirectory(INodeDirectory other, boolean adopt, boolean copyFeatures) { super(other); this.children = other.children; if (adopt && this.children != null) { @@ -84,6 +102,9 @@ public class INodeDirectory extends INodeWithAdditionalFields child.setParent(this); } } + if (copyFeatures) { + this.headFeature = other.headFeature; + } } /** @return true unconditionally. */ @@ -103,6 +124,73 @@ public class INodeDirectory extends INodeWithAdditionalFields return false; } + void setQuota(long nsQuota, long dsQuota) { + DirectoryWithQuotaFeature quota = getDirectoryWithQuotaFeature(); + if (quota != null) { + // already has quota; so set the quota to the new values + quota.setQuota(nsQuota, dsQuota); + if (!isQuotaSet() && !isRoot()) { + removeFeature(quota); + } + } else { + final Quota.Counts c = computeQuotaUsage(); + quota = addDirectoryWithQuotaFeature(nsQuota, dsQuota); + quota.setSpaceConsumed(c.get(Quota.NAMESPACE), c.get(Quota.DISKSPACE)); + } + } + + @Override + public Quota.Counts getQuotaCounts() { + final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature(); + return q != null? q.getQuota(): super.getQuotaCounts(); + } + + @Override + public void addSpaceConsumed(long nsDelta, long dsDelta, boolean verify) + throws QuotaExceededException { + final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature(); + if (q != null) { + q.addSpaceConsumed(this, nsDelta, dsDelta, verify); + } else { + addSpaceConsumed2Parent(nsDelta, dsDelta, verify); + } + } + + /** + * If the directory contains a {@link DirectoryWithQuotaFeature}, return it; + * otherwise, return null. + */ + public final DirectoryWithQuotaFeature getDirectoryWithQuotaFeature() { + for(Feature f = headFeature; f != null; f = f.nextFeature) { + if (f instanceof DirectoryWithQuotaFeature) { + return (DirectoryWithQuotaFeature)f; + } + } + return null; + } + + /** Is this directory with quota? */ + final boolean isWithQuota() { + return getDirectoryWithQuotaFeature() != null; + } + + DirectoryWithQuotaFeature addDirectoryWithQuotaFeature( + long nsQuota, long dsQuota) { + Preconditions.checkState(!isWithQuota(), "Directory is already with quota"); + final DirectoryWithQuotaFeature quota = new DirectoryWithQuotaFeature( + nsQuota, dsQuota); + addFeature(quota); + return quota; + } + + private void addFeature(Feature f) { + headFeature = INode.Feature.Util.addFeature(f, headFeature); + } + + private void removeFeature(Feature f) { + headFeature = INode.Feature.Util.removeFeature(f, headFeature); + } + private int searchChildren(byte[] name) { return children == null? -1: Collections.binarySearch(children, name); } @@ -142,27 +230,6 @@ public class INodeDirectory extends INodeWithAdditionalFields return true; } - /** - * Replace itself with {@link INodeDirectoryWithQuota} or - * {@link INodeDirectoryWithSnapshot} depending on the latest snapshot. - */ - INodeDirectoryWithQuota replaceSelf4Quota(final Snapshot latest, - final long nsQuota, final long dsQuota, final INodeMap inodeMap) - throws QuotaExceededException { - Preconditions.checkState(!(this instanceof INodeDirectoryWithQuota), - "this is already an INodeDirectoryWithQuota, this=%s", this); - - if (!this.isInLatestSnapshot(latest)) { - final INodeDirectoryWithQuota q = new INodeDirectoryWithQuota( - this, true, nsQuota, dsQuota); - replaceSelf(q, inodeMap); - return q; - } else { - final INodeDirectoryWithSnapshot s = new INodeDirectoryWithSnapshot(this); - s.setQuota(nsQuota, dsQuota); - return replaceSelf(s, inodeMap).saveSelf2Snapshot(latest, this); - } - } /** Replace itself with an {@link INodeDirectorySnapshottable}. */ public INodeDirectorySnapshottable replaceSelf4INodeDirectorySnapshottable( Snapshot latest, final INodeMap inodeMap) throws QuotaExceededException { @@ -183,7 +250,7 @@ public class INodeDirectory extends INodeWithAdditionalFields public INodeDirectory replaceSelf4INodeDirectory(final INodeMap inodeMap) { Preconditions.checkState(getClass() != INodeDirectory.class, "the class is already INodeDirectory, this=%s", this); - return replaceSelf(new INodeDirectory(this, true), inodeMap); + return replaceSelf(new INodeDirectory(this, true, true), inodeMap); } /** Replace itself with the given directory. */ @@ -439,6 +506,21 @@ public class INodeDirectory extends INodeWithAdditionalFields @Override public Quota.Counts computeQuotaUsage(Quota.Counts counts, boolean useCache, int lastSnapshotId) { + final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature(); + if (q != null) { + if (useCache && isQuotaSet()) { + q.addNamespaceDiskspace(counts); + } else { + computeDirectoryQuotaUsage(counts, false, lastSnapshotId); + } + return counts; + } else { + return computeDirectoryQuotaUsage(counts, useCache, lastSnapshotId); + } + } + + Quota.Counts computeDirectoryQuotaUsage(Quota.Counts counts, boolean useCache, + int lastSnapshotId) { if (children != null) { for (INode child : children) { child.computeQuotaUsage(counts, useCache, lastSnapshotId); @@ -456,6 +538,16 @@ public class INodeDirectory extends INodeWithAdditionalFields @Override public ContentSummaryComputationContext computeContentSummary( ContentSummaryComputationContext summary) { + final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature(); + if (q != null) { + return q.computeContentSummary(this, summary); + } else { + return computeDirectoryContentSummary(summary); + } + } + + ContentSummaryComputationContext computeDirectoryContentSummary( + ContentSummaryComputationContext summary) { ReadOnlyList childrenList = getChildrenList(null); // Explicit traversing is done to enable repositioning after relinquishing // and reacquiring locks. @@ -570,7 +662,7 @@ public class INodeDirectory extends INodeWithAdditionalFields Quota.Counts counts = cleanSubtreeRecursively(snapshot, prior, collectedBlocks, removedINodes, null, countDiffChange); if (isQuotaSet()) { - ((INodeDirectoryWithQuota) this).addSpaceConsumed2Cache( + getDirectoryWithQuotaFeature().addSpaceConsumed2Cache( -counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE)); } return counts; @@ -606,8 +698,9 @@ public class INodeDirectory extends INodeWithAdditionalFields final Snapshot snapshot) { super.dumpTreeRecursively(out, prefix, snapshot); out.print(", childrenSize=" + getChildrenList(snapshot).size()); - if (this instanceof INodeDirectoryWithQuota) { - out.print(((INodeDirectoryWithQuota)this).quotaString()); + final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature(); + if (q != null) { + out.print(", " + q); } if (this instanceof Snapshot.Root) { out.print(", snapshotId=" + snapshot.getId()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index bc5b8aa7736..5fc2095dad2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -50,13 +50,15 @@ public class INodeFile extends INodeWithAdditionalFields * A feature contains specific information for a type of INodeFile. E.g., * we can have separate features for Under-Construction and Snapshot. */ - public static abstract class Feature { + public static abstract class Feature implements INode.Feature { private Feature nextFeature; + @Override public Feature getNextFeature() { return nextFeature; } + @Override public void setNextFeature(Feature next) { this.nextFeature = next; } @@ -160,26 +162,12 @@ public class INodeFile extends INodeWithAdditionalFields return getFileUnderConstructionFeature() != null; } - void addFeature(Feature f) { - f.nextFeature = headFeature; - headFeature = f; + private void addFeature(Feature f) { + headFeature = INode.Feature.Util.addFeature(f, headFeature); } - void removeFeature(Feature f) { - if (f == headFeature) { - headFeature = headFeature.nextFeature; - return; - } else if (headFeature != null) { - Feature prev = headFeature; - Feature curr = headFeature.nextFeature; - for (; curr != null && curr != f; prev = curr, curr = curr.nextFeature) - ; - if (curr != null) { - prev.nextFeature = curr.nextFeature; - return; - } - } - throw new IllegalStateException("Feature " + f + " not found."); + private void removeFeature(Feature f) { + headFeature = INode.Feature.Util.removeFeature(f, headFeature); } /** @return true unconditionally. */ @@ -197,7 +185,7 @@ public class INodeFile extends INodeWithAdditionalFields /* Start of Under-Construction Feature */ /** Convert this file to an {@link INodeFileUnderConstruction}. */ - public INodeFile toUnderConstruction(String clientName, String clientMachine, + INodeFile toUnderConstruction(String clientName, String clientMachine, DatanodeDescriptor clientNode) { Preconditions.checkState(!isUnderConstruction(), "file is already an INodeFileUnderConstruction"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java index 4680d08eaf9..5fcd65d875e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes; -import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota; import org.apache.hadoop.hdfs.server.namenode.INodeMap; import org.apache.hadoop.hdfs.server.namenode.INodeReference; import org.apache.hadoop.hdfs.server.namenode.Quota; @@ -55,7 +54,7 @@ import com.google.common.base.Preconditions; * storing snapshot data. When there are modifications to the directory, the old * data is stored in the latest snapshot, if there is any. */ -public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota { +public class INodeDirectoryWithSnapshot extends INodeDirectory { /** * The difference between the current state and a previous snapshot * of the children list of an INodeDirectory. @@ -486,7 +485,7 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota { INodeDirectoryWithSnapshot(INodeDirectory that, boolean adopt, DirectoryDiffList diffs) { - super(that, adopt, that.getQuotaCounts()); + super(that, adopt, true); this.diffs = diffs != null? diffs: new DirectoryDiffList(); } @@ -771,8 +770,8 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota { removedINodes, priorDeleted, countDiffChange)); if (isQuotaSet()) { - this.addSpaceConsumed2Cache(-counts.get(Quota.NAMESPACE), - -counts.get(Quota.DISKSPACE)); + getDirectoryWithQuotaFeature().addSpaceConsumed2Cache( + -counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE)); } return counts; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java index dad498756e4..5408830bfed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java @@ -136,7 +136,7 @@ public class Snapshot implements Comparable { /** The root directory of the snapshot. */ static public class Root extends INodeDirectory { Root(INodeDirectory other) { - super(other, false); + super(other, false, false); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java index 13d54a3c995..d108d59233f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java @@ -368,10 +368,7 @@ public class TestQuota { // be identical. conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY, 2); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); - final FileSystem fs = cluster.getFileSystem(); - assertTrue("Not a HDFS: "+fs.getUri(), - fs instanceof DistributedFileSystem); - final DistributedFileSystem dfs = (DistributedFileSystem)fs; + final DistributedFileSystem dfs = cluster.getFileSystem(); try { // 1: create directory /nqdir0/qdir1/qdir20/nqdir30 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java index eb6191469d7..5cb047c89a3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java @@ -158,7 +158,7 @@ public class TestFSImageWithSnapshot { try { loader.load(imageFile); FSImage.updateCountForQuota( - (INodeDirectoryWithQuota)fsn.getFSDirectory().getINode("/")); + INodeDirectory.valueOf(fsn.getFSDirectory().getINode("/"), "/")); } finally { fsn.getFSDirectory().writeUnlock(); fsn.writeUnlock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java index 0353cd101e9..0cb6c7d12e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java @@ -49,7 +49,7 @@ public class TestFsLimits { static PermissionStatus perms = new PermissionStatus("admin", "admin", FsPermission.getDefault()); - static INodeDirectoryWithQuota rootInode; + static INodeDirectory rootInode; static private FSNamesystem getMockNamesystem() { FSNamesystem fsn = mock(FSNamesystem.class); @@ -75,8 +75,8 @@ public class TestFsLimits { fileAsURI(new File(MiniDFSCluster.getBaseDirectory(), "namenode")).toString()); - rootInode = new INodeDirectoryWithQuota(getMockNamesystem() - .allocateNewInodeId(), INodeDirectory.ROOT_NAME, perms); + rootInode = new INodeDirectory(getMockNamesystem().allocateNewInodeId(), + INodeDirectory.ROOT_NAME, perms, 0L); inodes = new INode[]{ rootInode, null }; fs = null; fsIsReady = true; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index fd6c0c735f8..2f921907927 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -211,9 +211,9 @@ public class TestINodeFile { // Call FSDirectory#unprotectedSetQuota which calls // INodeDirectory#replaceChild dfs.setQuota(dir, Long.MAX_VALUE - 1, replication * fileLen * 10); - INode dirNode = fsdir.getINode(dir.toString()); + INodeDirectory dirNode = getDir(fsdir, dir); assertEquals(dir.toString(), dirNode.getFullPathName()); - assertTrue(dirNode instanceof INodeDirectoryWithQuota); + assertTrue(dirNode.isWithQuota()); final Path newDir = new Path("/newdir"); final Path newFile = new Path(newDir, "file"); @@ -871,6 +871,12 @@ public class TestINodeFile { } } + private static INodeDirectory getDir(final FSDirectory fsdir, final Path dir) + throws IOException { + final String dirStr = dir.toString(); + return INodeDirectory.valueOf(fsdir.getINode(dirStr), dirStr); + } + /** * Test whether the inode in inodeMap has been replaced after regular inode * replacement @@ -887,21 +893,20 @@ public class TestINodeFile { final Path dir = new Path("/dir"); hdfs.mkdirs(dir); - INode dirNode = fsdir.getINode(dir.toString()); + INodeDirectory dirNode = getDir(fsdir, dir); INode dirNodeFromNode = fsdir.getInode(dirNode.getId()); assertSame(dirNode, dirNodeFromNode); // set quota to dir, which leads to node replacement hdfs.setQuota(dir, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1); - dirNode = fsdir.getINode(dir.toString()); - assertTrue(dirNode instanceof INodeDirectoryWithQuota); + dirNode = getDir(fsdir, dir); + assertTrue(dirNode.isWithQuota()); // the inode in inodeMap should also be replaced dirNodeFromNode = fsdir.getInode(dirNode.getId()); assertSame(dirNode, dirNodeFromNode); hdfs.setQuota(dir, -1, -1); - dirNode = fsdir.getINode(dir.toString()); - assertTrue(dirNode instanceof INodeDirectory); + dirNode = getDir(fsdir, dir); // the inode in inodeMap should also be replaced dirNodeFromNode = fsdir.getInode(dirNode.getId()); assertSame(dirNode, dirNodeFromNode); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java index 14e9aba5bf3..c0bd91cd00a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java @@ -1190,13 +1190,15 @@ public class TestRenameWithSnapshots { assertFalse(hdfs.exists(bar_s2)); restartClusterAndCheckImage(true); // make sure the whole referred subtree has been destroyed - assertEquals(4, fsdir.getRoot().getNamespace()); - assertEquals(0, fsdir.getRoot().getDiskspace()); + Quota.Counts q = fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed(); + assertEquals(4, q.get(Quota.NAMESPACE)); + assertEquals(0, q.get(Quota.DISKSPACE)); hdfs.deleteSnapshot(sdir1, "s1"); restartClusterAndCheckImage(true); - assertEquals(3, fsdir.getRoot().getNamespace()); - assertEquals(0, fsdir.getRoot().getDiskspace()); + q = fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed(); + assertEquals(3, q.get(Quota.NAMESPACE)); + assertEquals(0, q.get(Quota.DISKSPACE)); } /** @@ -1938,10 +1940,12 @@ public class TestRenameWithSnapshots { // check final INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir1.toString()); - assertEquals(4, dir1Node.getNamespace()); + Quota.Counts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed(); + assertEquals(4, q1.get(Quota.NAMESPACE)); final INodeDirectorySnapshottable dir2Node = (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir2.toString()); - assertEquals(2, dir2Node.getNamespace()); + Quota.Counts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed(); + assertEquals(2, q2.get(Quota.NAMESPACE)); final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", foo.getName()); @@ -2005,10 +2009,12 @@ public class TestRenameWithSnapshots { final INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir1.toString()); // sdir1 + s1 + foo_s1 (foo) + foo (foo + s1 + bar~bar3) - assertEquals(9, dir1Node.getNamespace()); + Quota.Counts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed(); + assertEquals(9, q1.get(Quota.NAMESPACE)); final INodeDirectorySnapshottable dir2Node = (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir2.toString()); - assertEquals(2, dir2Node.getNamespace()); + Quota.Counts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed(); + assertEquals(2, q2.get(Quota.NAMESPACE)); final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", foo.getName()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java index 0acad2bdf6d..01417e594a7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java @@ -46,7 +46,6 @@ import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; -import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota; import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; @@ -157,15 +156,21 @@ public class TestSnapshotDeletion { hdfs.delete(dir, true); } + private static INodeDirectory getDir(final FSDirectory fsdir, final Path dir) + throws IOException { + final String dirStr = dir.toString(); + return INodeDirectory.valueOf(fsdir.getINode(dirStr), dirStr); + } + private void checkQuotaUsageComputation(final Path dirPath, final long expectedNs, final long expectedDs) throws IOException { - INode node = fsdir.getINode(dirPath.toString()); - assertTrue(node.isDirectory() && node.isQuotaSet()); - INodeDirectoryWithQuota dirNode = (INodeDirectoryWithQuota) node; + INodeDirectory dirNode = getDir(fsdir, dirPath); + assertTrue(dirNode.isQuotaSet()); + Quota.Counts q = dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed(); assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs, - dirNode.getNamespace()); + q.get(Quota.NAMESPACE)); assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs, - dirNode.getDiskspace()); + q.get(Quota.DISKSPACE)); Quota.Counts counts = Quota.Counts.newInstance(); dirNode.computeQuotaUsage(counts, false); assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java index 22fc8998106..2705ab5252b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java @@ -305,7 +305,7 @@ public class TestDiff { final int i = Diff.search(current, inode.getKey()); Assert.assertTrue(i >= 0); final INodeDirectory oldinode = (INodeDirectory)current.get(i); - final INodeDirectory newinode = new INodeDirectory(oldinode, false); + final INodeDirectory newinode = new INodeDirectory(oldinode, false, true); newinode.setModificationTime(oldinode.getModificationTime() + 1); current.set(i, newinode); From 740cf232bd6206a5d31c0afa7b3e7a1e7bc333a1 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Tue, 26 Nov 2013 19:28:09 +0000 Subject: [PATCH 10/27] HDFS-5561. FSNameSystem#getNameJournalStatus() in JMX should return plain text instead of HTML. Contributed by Haohui Mai. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1545791 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../hdfs/qjournal/client/AsyncLogger.java | 2 +- .../hdfs/qjournal/client/AsyncLoggerSet.java | 20 ++++--- .../qjournal/client/IPCLoggerChannel.java | 2 +- .../qjournal/client/QuorumOutputStream.java | 6 +-- .../server/namenode/EditLogOutputStream.java | 7 ++- .../hdfs/server/namenode/FSNamesystem.java | 2 +- .../server/namenode/NamenodeJspHelper.java | 2 +- .../qjournal/client/TestIPCLoggerChannel.java | 1 - .../client/TestQuorumJournalManagerUnit.java | 14 ++++- .../namenode/TestEditLogFileOutputStream.java | 53 +++++++++++-------- .../server/namenode/TestNameNodeMXBean.java | 4 +- 12 files changed, 68 insertions(+), 48 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 2b1e6972d0a..44e63318b7b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -542,6 +542,9 @@ Release 2.3.0 - UNRELEASED HDFS-5525. Inline dust templates for new Web UI. (Haohui Mai via jing9) + HDFS-5561. FSNameSystem#getNameJournalStatus() in JMX should return plain + text instead of HTML. (Haohui Mai via jing9) + OPTIMIZATIONS HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java index 2501e009931..f98cf0251ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java @@ -150,5 +150,5 @@ interface AsyncLogger { * Append an HTML-formatted report for this logger's status to the provided * StringBuilder. This is displayed on the NN web UI. */ - public void appendHtmlReport(StringBuilder sb); + public void appendReport(StringBuilder sb); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java index 74131936bde..0abbd722189 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRe import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; -import org.apache.jasper.compiler.JspUtil; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; @@ -177,17 +176,16 @@ class AsyncLoggerSet { * state of the underlying loggers. * @param sb the StringBuilder to append to */ - void appendHtmlReport(StringBuilder sb) { - sb.append(""); - sb.append("\n"); - for (AsyncLogger l : loggers) { - sb.append(""); - sb.append(""); - sb.append("\n"); + void appendReport(StringBuilder sb) { + for (int i = 0, len = loggers.size(); i < len; ++i) { + AsyncLogger l = loggers.get(i); + if (i != 0) { + sb.append(", "); + } + sb.append(l).append(" ("); + l.appendReport(sb); + sb.append(")"); } - sb.append("
    JNStatus
    " + JspUtil.escapeXml(l.toString()) + ""); - l.appendHtmlReport(sb); - sb.append("
    "); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java index 4603dbd0207..323a5599d68 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java @@ -569,7 +569,7 @@ public class IPCLoggerChannel implements AsyncLogger { } @Override - public synchronized void appendHtmlReport(StringBuilder sb) { + public synchronized void appendReport(StringBuilder sb) { sb.append("Written txid ").append(highestAckedTxId); long behind = getLagTxns(); if (behind > 0) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumOutputStream.java index 7a9549d920b..f8dece05c80 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumOutputStream.java @@ -114,10 +114,10 @@ class QuorumOutputStream extends EditLogOutputStream { } @Override - public String generateHtmlReport() { + public String generateReport() { StringBuilder sb = new StringBuilder(); - sb.append("Writing segment beginning at txid " + segmentTxId + "
    \n"); - loggers.appendHtmlReport(sb); + sb.append("Writing segment beginning at txid " + segmentTxId + ". \n"); + loggers.appendReport(sb); return sb.toString(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java index d5b7bffd100..16ae5cd387e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java @@ -24,7 +24,6 @@ import static org.apache.hadoop.util.Time.now; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.jasper.compiler.JspUtil; /** * A generic abstract class to support journaling of edits logs into @@ -141,10 +140,10 @@ public abstract class EditLogOutputStream implements Closeable { } /** - * @return a short HTML snippet suitable for describing the current + * @return a short text snippet suitable for describing the current * status of the stream */ - public String generateHtmlReport() { - return JspUtil.escapeXml(this.toString()); + public String generateReport() { + return toString(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index abc770c9388..2576ce661a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -6635,7 +6635,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } else if (openForWrite) { EditLogOutputStream elos = jas.getCurrentStream(); if (elos != null) { - jasMap.put("stream", elos.generateHtmlReport()); + jasMap.put("stream", elos.generateReport()); } else { jasMap.put("stream", "not currently writing"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java index 6d9692de292..c58a1f32291 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java @@ -340,7 +340,7 @@ class NamenodeJspHelper { } else if (openForWrite) { EditLogOutputStream elos = jas.getCurrentStream(); if (elos != null) { - out.println(elos.generateHtmlReport()); + out.println(elos.generateReport()); } else { out.println("not currently writing"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java index 425bc507eb3..789e30842ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java @@ -91,7 +91,6 @@ public class TestIPCLoggerChannel { */ @Test public void testQueueLimiting() throws Exception { - // Block the underlying fake proxy from actually completing any calls. DelayAnswer delayer = new DelayAnswer(LOG); Mockito.doAnswer(delayer).when(mockProxy).journal( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java index 295eab1986c..42f2f79f2c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java @@ -25,6 +25,8 @@ import java.io.IOException; import java.net.URI; import java.util.List; +import junit.framework.Assert; + import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.qjournal.client.AsyncLogger; @@ -124,7 +126,7 @@ public class TestQuorumJournalManagerUnit { .when(spyLoggers.get(2)).startLogSegment(Mockito.anyLong()); qjm.startLogSegment(1); } - + @Test public void testQuorumOfLoggersFail() throws Exception { futureReturns(null).when(spyLoggers.get(0)).startLogSegment(Mockito.anyLong()); @@ -140,6 +142,16 @@ public class TestQuorumJournalManagerUnit { } } + @Test + public void testQuorumOutputStreamReport() throws Exception { + futureReturns(null).when(spyLoggers.get(0)).startLogSegment(Mockito.anyLong()); + futureReturns(null).when(spyLoggers.get(1)).startLogSegment(Mockito.anyLong()); + futureReturns(null).when(spyLoggers.get(2)).startLogSegment(Mockito.anyLong()); + QuorumOutputStream os = (QuorumOutputStream) qjm.startLogSegment(1); + String report = os.generateReport(); + Assert.assertFalse("Report should be plain text", report.contains("<")); + } + @Test public void testWriteEdits() throws Exception { EditLogOutputStream stm = createLogSegment(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java index e230d5affc6..1b9825edc34 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java @@ -26,24 +26,27 @@ import java.io.IOException; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.StringUtils; import org.junit.After; import org.junit.Before; +import org.junit.BeforeClass; import org.junit.Test; /** * Test the EditLogFileOutputStream */ public class TestEditLogFileOutputStream { - private final static File TEST_DIR = PathUtils.getTestDir(TestEditLogFileOutputStream.class); - private static final File TEST_EDITS = - new File(TEST_DIR, "testEditLogFileOutput.log"); - final static int MIN_PREALLOCATION_LENGTH = - EditLogFileOutputStream.MIN_PREALLOCATION_LENGTH; + private final static File TEST_DIR = PathUtils + .getTestDir(TestEditLogFileOutputStream.class); + private static final File TEST_EDITS = new File(TEST_DIR, + "testEditLogFileOutput.log"); + final static int MIN_PREALLOCATION_LENGTH = EditLogFileOutputStream.MIN_PREALLOCATION_LENGTH; private Configuration conf; - static { + @BeforeClass + public static void disableFsync() { // No need to fsync for the purposes of tests. This makes // the tests run much faster. EditLogFileOutputStream.setShouldSkipFsyncForTesting(true); @@ -52,7 +55,8 @@ public class TestEditLogFileOutputStream { @Before @After public void deleteEditsFile() { - if (TEST_EDITS.exists()) TEST_EDITS.delete(); + if (TEST_EDITS.exists()) + TEST_EDITS.delete(); } @Before @@ -66,17 +70,17 @@ public class TestEditLogFileOutputStream { elos.flushAndSync(true); assertEquals(expectedLength, elos.getFile().length()); } - + /** - * Tests writing to the EditLogFileOutputStream. Due to preallocation, the + * Tests writing to the EditLogFileOutputStream. Due to preallocation, the * length of the edit log will usually be longer than its valid contents. */ @Test public void testRawWrites() throws IOException { - EditLogFileOutputStream elos = new EditLogFileOutputStream(conf, TEST_EDITS, - 0); + EditLogFileOutputStream elos = new EditLogFileOutputStream(conf, + TEST_EDITS, 0); try { - byte[] small = new byte[] {1,2,3,4,5,8,7}; + byte[] small = new byte[] { 1, 2, 3, 4, 5, 8, 7 }; elos.create(); // The first (small) write we make extends the file by 1 MB due to // preallocation. @@ -101,7 +105,8 @@ public class TestEditLogFileOutputStream { } flushAndCheckLength(elos, 4 * MIN_PREALLOCATION_LENGTH); } finally { - if (elos != null) elos.close(); + if (elos != null) + elos.close(); } } @@ -112,8 +117,8 @@ public class TestEditLogFileOutputStream { @Test public void testEditLogFileOutputStreamCloseAbort() throws IOException { // abort after a close should just ignore - EditLogFileOutputStream editLogStream = - new EditLogFileOutputStream(conf, TEST_EDITS, 0); + EditLogFileOutputStream editLogStream = new EditLogFileOutputStream(conf, + TEST_EDITS, 0); editLogStream.close(); editLogStream.abort(); } @@ -125,8 +130,8 @@ public class TestEditLogFileOutputStream { @Test public void testEditLogFileOutputStreamCloseClose() throws IOException { // close after a close should result in an IOE - EditLogFileOutputStream editLogStream = - new EditLogFileOutputStream(conf, TEST_EDITS, 0); + EditLogFileOutputStream editLogStream = new EditLogFileOutputStream(conf, + TEST_EDITS, 0); editLogStream.close(); try { editLogStream.close(); @@ -135,7 +140,7 @@ public class TestEditLogFileOutputStream { assertTrue(msg, msg.contains("Trying to use aborted output stream")); } } - + /** * Tests EditLogFileOutputStream doesn't throw NullPointerException on being * abort/abort sequence. See HDFS-2011. @@ -143,9 +148,13 @@ public class TestEditLogFileOutputStream { @Test public void testEditLogFileOutputStreamAbortAbort() throws IOException { // abort after a close should just ignore - EditLogFileOutputStream editLogStream = - new EditLogFileOutputStream(conf, TEST_EDITS, 0); - editLogStream.abort(); - editLogStream.abort(); + EditLogFileOutputStream editLogStream = null; + try { + editLogStream = new EditLogFileOutputStream(conf, TEST_EDITS, 0); + editLogStream.abort(); + editLogStream.abort(); + } finally { + IOUtils.cleanup(null, editLogStream); + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java index 8d188d7b651..7538be09eb0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java @@ -46,7 +46,7 @@ public class TestNameNodeMXBean { */ private static final double DELTA = 0.000001; - @SuppressWarnings({ "unchecked", "deprecation" }) + @SuppressWarnings({ "unchecked" }) @Test public void testNameNodeMXBeanInfo() throws Exception { Configuration conf = new Configuration(); @@ -152,7 +152,7 @@ public class TestNameNodeMXBean { assertEquals(0, statusMap.get("failed").size()); // This will cause the first dir to fail. - File failedNameDir = new File(nameDirUris.toArray(new URI[0])[0]); + File failedNameDir = new File(nameDirUris.iterator().next()); assertEquals(0, FileUtil.chmod( new File(failedNameDir, "current").getAbsolutePath(), "000")); cluster.getNameNodeRpc().rollEditLog(); From 05a9a80bd42f2f0ee2205e11bc4d74208de2a9b1 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Tue, 26 Nov 2013 21:38:46 +0000 Subject: [PATCH 11/27] HDFS-5565. CacheAdmin help should match against non-dashed commands (wang via cmccabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1545850 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java | 7 ++++--- .../hadoop-hdfs/src/test/resources/testCacheAdminConf.xml | 2 +- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 44e63318b7b..65bae09ca3a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -405,6 +405,9 @@ Trunk (Unreleased) HDFS-5543. Fix narrow race condition in TestPathBasedCacheRequests (cmccabe) + HDFS-5565. CacheAdmin help should match against non-dashed commands + (wang via cmccabe) + Release 2.3.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java index 8c81c1cafd9..271ff681a67 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java @@ -822,14 +822,15 @@ public class CacheAdmin extends Configured implements Tool { return 0; } String commandName = args.get(0); - Command command = determineCommand(commandName); + // prepend a dash to match against the command names + Command command = determineCommand("-"+commandName); if (command == null) { System.err.print("Sorry, I don't know the command '" + commandName + "'.\n"); - System.err.print("Valid command names are:\n"); + System.err.print("Valid help command names are:\n"); String separator = ""; for (Command c : COMMANDS) { - System.err.print(separator + c.getName()); + System.err.print(separator + c.getName().substring(1)); separator = ", "; } System.err.print("\n"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml index 0662a6fb2bb..a6828b0bb35 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml @@ -387,7 +387,7 @@ Testing the help usage - -help -addPool + -help addPool From b6d483b1221296be408df66bc56b37765ce196de Mon Sep 17 00:00:00 2001 From: Uma Maheswara Rao G Date: Wed, 27 Nov 2013 10:59:54 +0000 Subject: [PATCH 12/27] HDFS-5568. Support includeSnapshots option with Fsck command. Contributed by Vinay git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1545987 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../hdfs/server/namenode/NamenodeFsck.java | 24 +++++++++++++++++ .../org/apache/hadoop/hdfs/tools/DFSck.java | 16 ++++++++--- .../hadoop/hdfs/server/namenode/TestFsck.java | 27 +++++++++++++++++++ 4 files changed, 66 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 65bae09ca3a..dbf5539f3e8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -649,6 +649,8 @@ Release 2.2.1 - UNRELEASED HDFS-5544. Adding Test case For Checking dfs.checksum type as NULL value. (Sathish via umamahesh) + HDFS-5568. Support includeSnapshots option with Fsck command. (Vinayakumar B via umamahesh) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index b933387a31b..7ed77585853 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -36,6 +36,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.hdfs.BlockReader; import org.apache.hadoop.hdfs.BlockReaderFactory; @@ -46,9 +47,11 @@ import org.apache.hadoop.hdfs.net.TcpPeerServer; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; @@ -139,6 +142,7 @@ public class NamenodeFsck { private final Configuration conf; private final PrintWriter out; + private List snapshottableDirs = null; /** * Filesystem checker. @@ -178,6 +182,8 @@ public class NamenodeFsck { } else if (key.equals("startblockafter")) { this.currentCookie[0] = pmap.get("startblockafter")[0]; + } else if (key.equals("includeSnapshots")) { + this.snapshottableDirs = new ArrayList(); } } } @@ -194,6 +200,16 @@ public class NamenodeFsck { out.println(msg); namenode.getNamesystem().logFsckEvent(path, remoteAddress); + if (snapshottableDirs != null) { + SnapshottableDirectoryStatus[] snapshotDirs = namenode.getRpcServer() + .getSnapshottableDirListing(); + if (snapshotDirs != null) { + for (SnapshottableDirectoryStatus dir : snapshotDirs) { + snapshottableDirs.add(dir.getFullPath().toString()); + } + } + } + final HdfsFileStatus file = namenode.getRpcServer().getFileInfo(path); if (file != null) { @@ -272,6 +288,14 @@ public class NamenodeFsck { boolean isOpen = false; if (file.isDir()) { + if (snapshottableDirs != null && snapshottableDirs.contains(path)) { + String snapshotPath = (path.endsWith(Path.SEPARATOR) ? path : path + + Path.SEPARATOR) + + HdfsConstants.DOT_SNAPSHOT_DIR; + HdfsFileStatus snapshotFileInfo = namenode.getRpcServer().getFileInfo( + snapshotPath); + check(snapshotPath, snapshotFileInfo, res); + } byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME; DirectoryListing thisListing; if (showFiles) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java index 3b846c3a397..fd7fe067806 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java @@ -83,15 +83,23 @@ public class DFSck extends Configured implements Tool { + "\t-delete\tdelete corrupted files\n" + "\t-files\tprint out files being checked\n" + "\t-openforwrite\tprint out files opened for write\n" + + "\t-includeSnapshots\tinclude snapshot data if the given path" + + " indicates a snapshottable directory or there are " + + "snapshottable directories under it\n" + "\t-list-corruptfileblocks\tprint out list of missing " + "blocks and files they belong to\n" + "\t-blocks\tprint out block report\n" + "\t-locations\tprint out locations for every block\n" - + "\t-racks\tprint out network topology for data-node locations\n" - + "\t\tBy default fsck ignores files opened for write, " + + "\t-racks\tprint out network topology for data-node locations\n\n" + + "Please Note:\n" + + "\t1. By default fsck ignores files opened for write, " + "use -openforwrite to report such files. They are usually " + " tagged CORRUPT or HEALTHY depending on their block " - + "allocation status"; + + "allocation status\n" + + "\t2. Option -includeSnapshots should not be used for comparing stats," + + " should be used only for HEALTH check, as this may contain duplicates" + + " if the same file present in both original fs tree " + + "and inside snapshots."; private final UserGroupInformation ugi; private final PrintStream out; @@ -266,6 +274,8 @@ public class DFSck extends Configured implements Tool { else if (args[idx].equals("-list-corruptfileblocks")) { url.append("&listcorruptfileblocks=1"); doListCorruptFileBlocks = true; + } else if (args[idx].equals("-includeSnapshots")) { + url.append("&includeSnapshots=1"); } else if (!args[idx].startsWith("-")) { if (null == dir) { dir = args[idx]; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index bcebce4e201..a6dd4fea1b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -1058,4 +1058,31 @@ public class TestFsck { if (cluster != null) { cluster.shutdown(); } } } + + /** + * Test for including the snapshot files in fsck report + */ + @Test + public void testFsckForSnapshotFiles() throws Exception { + final Configuration conf = new HdfsConfiguration(); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) + .build(); + try { + String runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots", + "-files"); + assertTrue(runFsck.contains("HEALTHY")); + final String fileName = "/srcdat"; + DistributedFileSystem hdfs = cluster.getFileSystem(); + Path file1 = new Path(fileName); + DFSTestUtil.createFile(hdfs, file1, 1024, (short) 1, 1000L); + hdfs.allowSnapshot(new Path("/")); + hdfs.createSnapshot(new Path("/"), "mySnapShot"); + runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots", "-files"); + assertTrue(runFsck.contains("/.snapshot/mySnapShot/srcdat")); + runFsck = runFsck(conf, 0, true, "/", "-files"); + assertFalse(runFsck.contains("mySnapShot")); + } finally { + cluster.shutdown(); + } + } } From 13edb391d06c479720202eb5ac81f1c71fe64748 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Wed, 27 Nov 2013 17:55:52 +0000 Subject: [PATCH 13/27] HDFS-5556. Add some more NameNode cache statistics, cache pool stats (cmccabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1546143 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../dev-support/findbugsExcludeFile.xml | 5 + .../org/apache/hadoop/hdfs/DFSClient.java | 3 +- .../hadoop/hdfs/DistributedFileSystem.java | 5 +- .../apache/hadoop/hdfs/client/HdfsAdmin.java | 5 +- .../hadoop/hdfs/protocol/CacheDirective.java | 87 +++++-- .../hdfs/protocol/CacheDirectiveStats.java | 6 +- .../hadoop/hdfs/protocol/CachePoolEntry.java | 45 ++++ .../hadoop/hdfs/protocol/CachePoolInfo.java | 6 +- .../hadoop/hdfs/protocol/CachePoolStats.java | 87 +++++++ .../hadoop/hdfs/protocol/ClientProtocol.java | 2 +- ...amenodeProtocolServerSideTranslatorPB.java | 15 +- .../ClientNamenodeProtocolTranslatorPB.java | 31 +-- .../hadoop/hdfs/protocolPB/PBHelper.java | 33 +++ .../CacheReplicationMonitor.java | 4 +- .../blockmanagement/DatanodeStatistics.java | 6 + .../blockmanagement/HeartbeatManager.java | 17 ++ .../fsdataset/impl/FsDatasetCache.java | 11 +- .../fsdataset/impl/FsDatasetImpl.java | 9 +- .../datanode/metrics/FSDatasetMBean.java | 5 + .../hdfs/server/namenode/CacheManager.java | 242 +++++++++--------- .../hdfs/server/namenode/CachePool.java | 46 +++- .../hdfs/server/namenode/FSNamesystem.java | 15 +- .../hdfs/server/namenode/NameNodeMXBean.java | 10 + .../server/namenode/NameNodeRpcServer.java | 11 +- .../apache/hadoop/hdfs/tools/CacheAdmin.java | 6 +- .../main/proto/ClientNamenodeProtocol.proto | 11 +- .../server/datanode/SimulatedFSDataset.java | 5 + .../server/datanode/TestFsDatasetCache.java | 42 +-- .../server/namenode/TestCacheDirectives.java | 30 ++- .../server/namenode/TestNameNodeMXBean.java | 13 + .../namenode/ha/TestRetryCacheWithHA.java | 9 +- 32 files changed, 600 insertions(+), 225 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolEntry.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolStats.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index dbf5539f3e8..9ed3e805159 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -218,6 +218,9 @@ Trunk (Unreleased) HDFS-5286. Flatten INodeDirectory hierarchy: Replace INodeDirectoryWithQuota with DirectoryWithQuotaFeature. (szetszwo) + HDFS-5556. Add some more NameNode cache statistics, cache pool stats + (cmccabe) + OPTIMIZATIONS HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe) diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml index 1c645050793..1245ebba61c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml @@ -352,6 +352,11 @@ + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 7a5f5a48d94..11cdb4f26d9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -109,6 +109,7 @@ import org.apache.hadoop.hdfs.client.ClientMmapManager; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; +import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; @@ -2358,7 +2359,7 @@ public class DFSClient implements java.io.Closeable { } } - public RemoteIterator listCachePools() throws IOException { + public RemoteIterator listCachePools() throws IOException { checkOpen(); try { return namenode.listCachePools(""); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 5a454ce2472..be44c13aa52 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -58,6 +58,7 @@ import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; +import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; @@ -1713,12 +1714,12 @@ public class DistributedFileSystem extends FileSystem { /** * List all cache pools. * - * @return A remote iterator from which you can get CachePoolInfo objects. + * @return A remote iterator from which you can get CachePoolEntry objects. * Requests will be made as needed. * @throws IOException * If there was an error listing cache pools. */ - public RemoteIterator listCachePools() throws IOException { + public RemoteIterator listCachePools() throws IOException { return dfs.listCachePools(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java index 36249822a9e..da6fa9c0de7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; +import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.tools.DFSAdmin; @@ -213,12 +214,12 @@ public class HdfsAdmin { /** * List all cache pools. * - * @return A remote iterator from which you can get CachePoolInfo objects. + * @return A remote iterator from which you can get CachePoolEntry objects. * Requests will be made as needed. * @throws IOException * If there was an error listing cache pools. */ - public RemoteIterator listCachePools() throws IOException { + public RemoteIterator listCachePools() throws IOException { return dfs.listCachePools(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java index f30ce026302..1fa1c289d7f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java @@ -21,6 +21,8 @@ import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.namenode.CachePool; +import org.apache.hadoop.util.IntrusiveCollection; +import org.apache.hadoop.util.IntrusiveCollection.Element; import com.google.common.base.Preconditions; @@ -30,32 +32,32 @@ import com.google.common.base.Preconditions; * This is an implementation class, not part of the public API. */ @InterfaceAudience.Private -public final class CacheDirective { - private final long entryId; +public final class CacheDirective implements IntrusiveCollection.Element { + private final long id; private final String path; private final short replication; - private final CachePool pool; + private CachePool pool; private long bytesNeeded; private long bytesCached; private long filesAffected; + private Element prev; + private Element next; - public CacheDirective(long entryId, String path, - short replication, CachePool pool) { - Preconditions.checkArgument(entryId > 0); - this.entryId = entryId; + public CacheDirective(long id, String path, + short replication) { + Preconditions.checkArgument(id > 0); + this.id = id; Preconditions.checkArgument(replication > 0); this.path = path; - Preconditions.checkNotNull(pool); this.replication = replication; Preconditions.checkNotNull(path); - this.pool = pool; this.bytesNeeded = 0; this.bytesCached = 0; this.filesAffected = 0; } - public long getEntryId() { - return entryId; + public long getId() { + return id; } public String getPath() { @@ -70,9 +72,9 @@ public final class CacheDirective { return replication; } - public CacheDirectiveInfo toDirective() { + public CacheDirectiveInfo toInfo() { return new CacheDirectiveInfo.Builder(). - setId(entryId). + setId(id). setPath(new Path(path)). setReplication(replication). setPool(pool.getPoolName()). @@ -88,13 +90,13 @@ public final class CacheDirective { } public CacheDirectiveEntry toEntry() { - return new CacheDirectiveEntry(toDirective(), toStats()); + return new CacheDirectiveEntry(toInfo(), toStats()); } @Override public String toString() { StringBuilder builder = new StringBuilder(); - builder.append("{ entryId:").append(entryId). + builder.append("{ id:").append(id). append(", path:").append(path). append(", replication:").append(replication). append(", pool:").append(pool). @@ -113,12 +115,12 @@ public final class CacheDirective { return false; } CacheDirective other = (CacheDirective)o; - return entryId == other.entryId; + return id == other.id; } @Override public int hashCode() { - return new HashCodeBuilder().append(entryId).toHashCode(); + return new HashCodeBuilder().append(id).toHashCode(); } public long getBytesNeeded() { @@ -156,4 +158,55 @@ public final class CacheDirective { public void incrementFilesAffected() { this.filesAffected++; } + + @SuppressWarnings("unchecked") + @Override // IntrusiveCollection.Element + public void insertInternal(IntrusiveCollection list, + Element prev, Element next) { + assert this.pool == null; + this.pool = ((CachePool.DirectiveList)list).getCachePool(); + this.prev = prev; + this.next = next; + } + + @Override // IntrusiveCollection.Element + public void setPrev(IntrusiveCollection list, Element prev) { + assert list == pool.getDirectiveList(); + this.prev = prev; + } + + @Override // IntrusiveCollection.Element + public void setNext(IntrusiveCollection list, Element next) { + assert list == pool.getDirectiveList(); + this.next = next; + } + + @Override // IntrusiveCollection.Element + public void removeInternal(IntrusiveCollection list) { + assert list == pool.getDirectiveList(); + this.pool = null; + this.prev = null; + this.next = null; + } + + @Override // IntrusiveCollection.Element + public Element getPrev(IntrusiveCollection list) { + if (list != pool.getDirectiveList()) { + return null; + } + return this.prev; + } + + @Override // IntrusiveCollection.Element + public Element getNext(IntrusiveCollection list) { + if (list != pool.getDirectiveList()) { + return null; + } + return this.next; + } + + @Override // IntrusiveCollection.Element + public boolean isInList(IntrusiveCollection list) { + return pool == null ? false : list == pool.getDirectiveList(); + } }; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java index a69d175d923..b0f58b51bac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java @@ -94,21 +94,21 @@ public class CacheDirectiveStats { /** * @return The bytes needed. */ - public Long getBytesNeeded() { + public long getBytesNeeded() { return bytesNeeded; } /** * @return The bytes cached. */ - public Long getBytesCached() { + public long getBytesCached() { return bytesCached; } /** * @return The files affected. */ - public Long getFilesAffected() { + public long getFilesAffected() { return filesAffected; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolEntry.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolEntry.java new file mode 100644 index 00000000000..3c1e345724a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolEntry.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.protocol; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Describes a Cache Pool entry. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class CachePoolEntry { + private final CachePoolInfo info; + private final CachePoolStats stats; + + public CachePoolEntry(CachePoolInfo info, CachePoolStats stats) { + this.info = info; + this.stats = stats; + } + + public CachePoolInfo getInfo() { + return info; + } + + public CachePoolStats getStats() { + return stats; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java index 198785f8ad1..a7c15ecfb22 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java @@ -30,6 +30,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.InvalidRequestException; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp; @@ -150,7 +151,10 @@ public class CachePoolInfo { public static void validate(CachePoolInfo info) throws IOException { if (info == null) { - throw new IOException("CachePoolInfo is null"); + throw new InvalidRequestException("CachePoolInfo is null"); + } + if ((info.getWeight() != null) && (info.getWeight() < 0)) { + throw new InvalidRequestException("CachePool weight is negative."); } validateName(info.poolName); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolStats.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolStats.java new file mode 100644 index 00000000000..2235447b6f8 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolStats.java @@ -0,0 +1,87 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.protocol; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * CachePoolStats describes cache pool statistics. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class CachePoolStats { + public static class Builder { + private long bytesNeeded; + private long bytesCached; + private long filesAffected; + + public Builder() { + } + + public Builder setBytesNeeded(long bytesNeeded) { + this.bytesNeeded = bytesNeeded; + return this; + } + + public Builder setBytesCached(long bytesCached) { + this.bytesCached = bytesCached; + return this; + } + + public Builder setFilesAffected(long filesAffected) { + this.filesAffected = filesAffected; + return this; + } + + public CachePoolStats build() { + return new CachePoolStats(bytesNeeded, bytesCached, filesAffected); + } + }; + + private final long bytesNeeded; + private final long bytesCached; + private final long filesAffected; + + private CachePoolStats(long bytesNeeded, long bytesCached, long filesAffected) { + this.bytesNeeded = bytesNeeded; + this.bytesCached = bytesCached; + this.filesAffected = filesAffected; + } + + public long getBytesNeeded() { + return bytesNeeded; + } + + public long getBytesCached() { + return bytesNeeded; + } + + public long getFilesAffected() { + return filesAffected; + } + + public String toString() { + return new StringBuilder().append("{"). + append("bytesNeeded:").append(bytesNeeded). + append(", bytesCached:").append(bytesCached). + append(", filesAffected:").append(filesAffected). + append("}").toString(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 8a0ae1cf3a9..8852f818f87 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -1178,6 +1178,6 @@ public interface ClientProtocol { * @return A RemoteIterator which returns CachePool objects. */ @Idempotent - public RemoteIterator listCachePools(String prevPool) + public RemoteIterator listCachePools(String prevPool) throws IOException; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index bff2066cb95..6529ca51b8b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; +import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DirectoryListing; @@ -51,6 +52,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowS import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto; @@ -103,7 +106,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSna import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseElementProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto; @@ -1136,18 +1138,15 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements public ListCachePoolsResponseProto listCachePools(RpcController controller, ListCachePoolsRequestProto request) throws ServiceException { try { - RemoteIterator iter = + RemoteIterator iter = server.listCachePools(request.getPrevPoolName()); ListCachePoolsResponseProto.Builder responseBuilder = ListCachePoolsResponseProto.newBuilder(); String prevPoolName = null; while (iter.hasNext()) { - CachePoolInfo pool = iter.next(); - ListCachePoolsResponseElementProto.Builder elemBuilder = - ListCachePoolsResponseElementProto.newBuilder(); - elemBuilder.setInfo(PBHelper.convert(pool)); - responseBuilder.addElements(elemBuilder.build()); - prevPoolName = pool.getPoolName(); + CachePoolEntry entry = iter.next(); + responseBuilder.addEntries(PBHelper.convert(entry)); + prevPoolName = entry.getInfo().getPoolName(); } // fill in hasNext if (prevPoolName == null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index c1e5b3df8f9..1cf25d36074 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -38,6 +38,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; +import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; @@ -61,6 +62,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCac import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto; @@ -96,7 +98,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSna import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseElementProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto; @@ -1138,23 +1139,23 @@ public class ClientNamenodeProtocolTranslatorPB implements } } - private static class BatchedCachePoolInfo - implements BatchedEntries { + private static class BatchedCachePoolEntries + implements BatchedEntries { private final ListCachePoolsResponseProto proto; - public BatchedCachePoolInfo(ListCachePoolsResponseProto proto) { + public BatchedCachePoolEntries(ListCachePoolsResponseProto proto) { this.proto = proto; } @Override - public CachePoolInfo get(int i) { - ListCachePoolsResponseElementProto elem = proto.getElements(i); - return PBHelper.convert(elem.getInfo()); + public CachePoolEntry get(int i) { + CachePoolEntryProto elem = proto.getEntries(i); + return PBHelper.convert(elem); } @Override public int size() { - return proto.getElementsCount(); + return proto.getEntriesCount(); } @Override @@ -1162,19 +1163,19 @@ public class ClientNamenodeProtocolTranslatorPB implements return proto.getHasMore(); } } - + private class CachePoolIterator - extends BatchedRemoteIterator { + extends BatchedRemoteIterator { public CachePoolIterator(String prevKey) { super(prevKey); } @Override - public BatchedEntries makeRequest(String prevKey) + public BatchedEntries makeRequest(String prevKey) throws IOException { try { - return new BatchedCachePoolInfo( + return new BatchedCachePoolEntries( rpcProxy.listCachePools(null, ListCachePoolsRequestProto.newBuilder(). setPrevPoolName(prevKey).build())); @@ -1184,13 +1185,13 @@ public class ClientNamenodeProtocolTranslatorPB implements } @Override - public String elementToPrevKey(CachePoolInfo element) { - return element.getPoolName(); + public String elementToPrevKey(CachePoolEntry entry) { + return entry.getInfo().getPoolName(); } } @Override - public RemoteIterator listCachePools(String prevKey) + public RemoteIterator listCachePools(String prevKey) throws IOException { return new CachePoolIterator(prevKey); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 4884cde2750..1aff12a605d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -38,7 +38,9 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats; +import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; +import org.apache.hadoop.hdfs.protocol.CachePoolStats; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DatanodeID; @@ -60,7 +62,9 @@ import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto; @@ -1678,6 +1682,35 @@ public class PBHelper { return info; } + public static CachePoolStatsProto convert(CachePoolStats stats) { + CachePoolStatsProto.Builder builder = CachePoolStatsProto.newBuilder(); + builder.setBytesNeeded(stats.getBytesNeeded()); + builder.setBytesCached(stats.getBytesCached()); + builder.setFilesAffected(stats.getFilesAffected()); + return builder.build(); + } + + public static CachePoolStats convert (CachePoolStatsProto proto) { + CachePoolStats.Builder builder = new CachePoolStats.Builder(); + builder.setBytesNeeded(proto.getBytesNeeded()); + builder.setBytesCached(proto.getBytesCached()); + builder.setFilesAffected(proto.getFilesAffected()); + return builder.build(); + } + + public static CachePoolEntryProto convert(CachePoolEntry entry) { + CachePoolEntryProto.Builder builder = CachePoolEntryProto.newBuilder(); + builder.setInfo(PBHelper.convert(entry.getInfo())); + builder.setStats(PBHelper.convert(entry.getStats())); + return builder.build(); + } + + public static CachePoolEntry convert (CachePoolEntryProto proto) { + CachePoolInfo info = PBHelper.convert(proto.getInfo()); + CachePoolStats stats = PBHelper.convert(proto.getStats()); + return new CachePoolEntry(info, stats); + } + public static HdfsProtos.ChecksumTypeProto convert(DataChecksum.Type type) { return HdfsProtos.ChecksumTypeProto.valueOf(type.id); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java index 351cfa1422a..86e71fb1c18 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java @@ -208,8 +208,6 @@ public class CacheReplicationMonitor extends Thread implements Closeable { /** * Scan all CacheDirectives. Use the information to figure out * what cache replication factor each block should have. - * - * @param mark Whether the current scan is setting or clearing the mark */ private void rescanCacheDirectives() { FSDirectory fsDir = namesystem.getFSDirectory(); @@ -301,7 +299,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable { pce.addBytesNeeded(neededTotal); pce.addBytesCached(cachedTotal); if (LOG.isTraceEnabled()) { - LOG.debug("Directive " + pce.getEntryId() + " is caching " + + LOG.debug("Directive " + pce.getId() + " is caching " + file.getFullPathName() + ": " + cachedTotal + "/" + neededTotal); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStatistics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStatistics.java index 24ec9b15524..9ccc5b1ff4a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStatistics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStatistics.java @@ -42,6 +42,12 @@ public interface DatanodeStatistics { /** @return the percentage of the block pool used space over the total capacity. */ public float getPercentBlockPoolUsed(); + + /** @return the total cache capacity of all DataNodes */ + public long getCacheCapacity(); + + /** @return the total cache used by all DataNodes */ + public long getCacheUsed(); /** @return the xceiver count */ public int getXceiverCount(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java index f9c28e99692..cef70f0a794 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java @@ -148,6 +148,17 @@ class HeartbeatManager implements DatanodeStatistics { public synchronized int getXceiverCount() { return stats.xceiverCount; } + + @Override + public synchronized long getCacheCapacity() { + return stats.cacheCapacity; + } + + @Override + public synchronized long getCacheUsed() { + return stats.cacheUsed; + } + @Override public synchronized long[] getStats() { @@ -308,6 +319,8 @@ class HeartbeatManager implements DatanodeStatistics { private long capacityRemaining = 0L; private long blockPoolUsed = 0L; private int xceiverCount = 0; + private long cacheCapacity = 0L; + private long cacheUsed = 0L; private int expiredHeartbeats = 0; @@ -321,6 +334,8 @@ class HeartbeatManager implements DatanodeStatistics { } else { capacityTotal += node.getDfsUsed(); } + cacheCapacity += node.getCacheCapacity(); + cacheUsed += node.getCacheUsed(); } private void subtract(final DatanodeDescriptor node) { @@ -333,6 +348,8 @@ class HeartbeatManager implements DatanodeStatistics { } else { capacityTotal -= node.getDfsUsed(); } + cacheCapacity -= node.getCacheCapacity(); + cacheUsed -= node.getCacheUsed(); } /** Increment expired heartbeat counter. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java index b4d7e5a4e8d..fc77b0570fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java @@ -145,6 +145,8 @@ public class FsDatasetCache { */ private final HashMap mappableBlockMap = new HashMap(); + private final AtomicLong numBlocksCached = new AtomicLong(0); + private final FsDatasetImpl dataset; private final ThreadPoolExecutor uncachingExecutor; @@ -417,6 +419,7 @@ public class FsDatasetCache { LOG.debug("Successfully cached block " + key.id + " in " + key.bpid + ". We are now caching " + newUsedBytes + " bytes in total."); } + numBlocksCached.addAndGet(1); success = true; } finally { if (!success) { @@ -465,6 +468,7 @@ public class FsDatasetCache { } long newUsedBytes = usedBytesCount.release(value.mappableBlock.getLength()); + numBlocksCached.addAndGet(-1); if (LOG.isDebugEnabled()) { LOG.debug("Uncaching of block " + key.id + " in " + key.bpid + " completed. usedBytes = " + newUsedBytes); @@ -477,14 +481,14 @@ public class FsDatasetCache { /** * Get the approximate amount of cache space used. */ - public long getDnCacheUsed() { + public long getCacheUsed() { return usedBytesCount.get(); } /** * Get the maximum amount of bytes we can cache. This is a constant. */ - public long getDnCacheCapacity() { + public long getCacheCapacity() { return maxBytes; } @@ -496,4 +500,7 @@ public class FsDatasetCache { return numBlocksFailedToUncache.get(); } + public long getNumBlocksCached() { + return numBlocksCached.get(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index d8c0155f719..a9740b8c93c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -292,12 +292,12 @@ class FsDatasetImpl implements FsDatasetSpi { @Override // FSDatasetMBean public long getCacheUsed() { - return cacheManager.getDnCacheUsed(); + return cacheManager.getCacheUsed(); } @Override // FSDatasetMBean public long getCacheCapacity() { - return cacheManager.getDnCacheCapacity(); + return cacheManager.getCacheCapacity(); } @Override // FSDatasetMBean @@ -310,6 +310,11 @@ class FsDatasetImpl implements FsDatasetSpi { return cacheManager.getNumBlocksFailedToUncache(); } + @Override // FSDatasetMBean + public long getNumBlocksCached() { + return cacheManager.getNumBlocksCached(); + } + /** * Find the block's on-disk length */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java index 40ccefb6c3a..8388c0b38df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java @@ -88,6 +88,11 @@ public interface FSDatasetMBean { */ public long getCacheCapacity(); + /** + * Returns the number of blocks cached. + */ + public long getNumBlocksCached(); + /** * Returns the number of blocks that the datanode was unable to cache */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java index fe8052135ff..af549c50b5a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -49,6 +49,7 @@ import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; +import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -99,24 +100,24 @@ public final class CacheManager { private final BlockManager blockManager; /** - * Cache entries, sorted by ID. + * Cache directives, sorted by ID. * * listCacheDirectives relies on the ordering of elements in this map * to track what has already been listed by the client. */ - private final TreeMap entriesById = + private final TreeMap directivesById = new TreeMap(); /** - * The entry ID to use for a new entry. Entry IDs always increase, and are + * The directive ID to use for a new directive. IDs always increase, and are * never reused. */ - private long nextEntryId; + private long nextDirectiveId; /** - * Cache entries, sorted by path + * Cache directives, sorted by path */ - private final TreeMap> entriesByPath = + private final TreeMap> directivesByPath = new TreeMap>(); /** @@ -177,7 +178,7 @@ public final class CacheManager { BlockManager blockManager) { this.namesystem = namesystem; this.blockManager = blockManager; - this.nextEntryId = 1; + this.nextDirectiveId = 1; this.maxListCachePoolsResponses = conf.getInt( DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT); @@ -239,7 +240,7 @@ public final class CacheManager { public TreeMap getEntriesById() { assert namesystem.hasReadLock(); - return entriesById; + return directivesById; } @VisibleForTesting @@ -250,10 +251,10 @@ public final class CacheManager { private long getNextEntryId() throws IOException { assert namesystem.hasWriteLock(); - if (nextEntryId >= Long.MAX_VALUE - 1) { + if (nextDirectiveId >= Long.MAX_VALUE - 1) { throw new IOException("No more available IDs."); } - return nextEntryId++; + return nextDirectiveId++; } // Helper getter / validation methods @@ -301,7 +302,7 @@ public final class CacheManager { } /** - * Get a CacheDirective by ID, validating the ID and that the entry + * Get a CacheDirective by ID, validating the ID and that the directive * exists. */ private CacheDirective getById(long id) throws InvalidRequestException { @@ -309,13 +310,13 @@ public final class CacheManager { if (id <= 0) { throw new InvalidRequestException("Invalid negative ID."); } - // Find the entry. - CacheDirective entry = entriesById.get(id); - if (entry == null) { + // Find the directive. + CacheDirective directive = directivesById.get(id); + if (directive == null) { throw new InvalidRequestException("No directive with ID " + id + " found."); } - return entry; + return directive; } /** @@ -332,32 +333,34 @@ public final class CacheManager { // RPC handlers - private void addInternal(CacheDirective entry) { - entriesById.put(entry.getEntryId(), entry); - String path = entry.getPath(); - List entryList = entriesByPath.get(path); - if (entryList == null) { - entryList = new ArrayList(1); - entriesByPath.put(path, entryList); + private void addInternal(CacheDirective directive, CachePool pool) { + boolean addedDirective = pool.getDirectiveList().add(directive); + assert addedDirective; + directivesById.put(directive.getId(), directive); + String path = directive.getPath(); + List directives = directivesByPath.get(path); + if (directives == null) { + directives = new ArrayList(1); + directivesByPath.put(path, directives); } - entryList.add(entry); + directives.add(directive); } public CacheDirectiveInfo addDirective( - CacheDirectiveInfo directive, FSPermissionChecker pc) + CacheDirectiveInfo info, FSPermissionChecker pc) throws IOException { assert namesystem.hasWriteLock(); - CacheDirective entry; + CacheDirective directive; try { - CachePool pool = getCachePool(validatePoolName(directive)); + CachePool pool = getCachePool(validatePoolName(info)); checkWritePermission(pc, pool); - String path = validatePath(directive); - short replication = validateReplication(directive, (short)1); + String path = validatePath(info); + short replication = validateReplication(info, (short)1); long id; - if (directive.getId() != null) { - // We are loading an entry from the edit log. + if (info.getId() != null) { + // We are loading a directive from the edit log. // Use the ID from the edit log. - id = directive.getId(); + id = info.getId(); if (id <= 0) { throw new InvalidRequestException("can't add an ID " + "of " + id + ": it is not positive."); @@ -366,88 +369,90 @@ public final class CacheManager { throw new InvalidRequestException("can't add an ID " + "of " + id + ": it is too big."); } - if (nextEntryId <= id) { - nextEntryId = id + 1; + if (nextDirectiveId <= id) { + nextDirectiveId = id + 1; } } else { - // Add a new entry with the next available ID. + // Add a new directive with the next available ID. id = getNextEntryId(); } - entry = new CacheDirective(id, path, replication, pool); - addInternal(entry); + directive = new CacheDirective(id, path, replication); + addInternal(directive, pool); } catch (IOException e) { - LOG.warn("addDirective of " + directive + " failed: ", e); + LOG.warn("addDirective of " + info + " failed: ", e); throw e; } - LOG.info("addDirective of " + directive + " successful."); + LOG.info("addDirective of " + info + " successful."); if (monitor != null) { monitor.kick(); } - return entry.toDirective(); + return directive.toInfo(); } - public void modifyDirective(CacheDirectiveInfo directive, + public void modifyDirective(CacheDirectiveInfo info, FSPermissionChecker pc) throws IOException { assert namesystem.hasWriteLock(); String idString = - (directive.getId() == null) ? - "(null)" : directive.getId().toString(); + (info.getId() == null) ? + "(null)" : info.getId().toString(); try { // Check for invalid IDs. - Long id = directive.getId(); + Long id = info.getId(); if (id == null) { throw new InvalidRequestException("Must supply an ID."); } CacheDirective prevEntry = getById(id); checkWritePermission(pc, prevEntry.getPool()); String path = prevEntry.getPath(); - if (directive.getPath() != null) { - path = validatePath(directive); + if (info.getPath() != null) { + path = validatePath(info); } short replication = prevEntry.getReplication(); - if (directive.getReplication() != null) { - replication = validateReplication(directive, replication); + if (info.getReplication() != null) { + replication = validateReplication(info, replication); } CachePool pool = prevEntry.getPool(); - if (directive.getPool() != null) { - pool = getCachePool(validatePoolName(directive)); + if (info.getPool() != null) { + pool = getCachePool(validatePoolName(info)); checkWritePermission(pc, pool); } removeInternal(prevEntry); CacheDirective newEntry = - new CacheDirective(id, path, replication, pool); - addInternal(newEntry); + new CacheDirective(id, path, replication); + addInternal(newEntry, pool); } catch (IOException e) { LOG.warn("modifyDirective of " + idString + " failed: ", e); throw e; } LOG.info("modifyDirective of " + idString + " successfully applied " + - directive + "."); + info+ "."); } - public void removeInternal(CacheDirective existing) + public void removeInternal(CacheDirective directive) throws InvalidRequestException { assert namesystem.hasWriteLock(); - // Remove the corresponding entry in entriesByPath. - String path = existing.getPath(); - List entries = entriesByPath.get(path); - if (entries == null || !entries.remove(existing)) { + // Remove the corresponding entry in directivesByPath. + String path = directive.getPath(); + List directives = directivesByPath.get(path); + if (directives == null || !directives.remove(directive)) { throw new InvalidRequestException("Failed to locate entry " + - existing.getEntryId() + " by path " + existing.getPath()); + directive.getId() + " by path " + directive.getPath()); } - if (entries.size() == 0) { - entriesByPath.remove(path); + if (directives.size() == 0) { + directivesByPath.remove(path); } - entriesById.remove(existing.getEntryId()); + directivesById.remove(directive.getId()); + directive.getPool().getDirectiveList().remove(directive); + assert directive.getPool() == null; } public void removeDirective(long id, FSPermissionChecker pc) throws IOException { assert namesystem.hasWriteLock(); try { - CacheDirective existing = getById(id); - checkWritePermission(pc, existing.getPool()); - removeInternal(existing); + CacheDirective directive = getById(id); + checkWritePermission(pc, directive.getPool()); + removeInternal(directive); } catch (IOException e) { LOG.warn("removeDirective of " + id + " failed: ", e); throw e; @@ -478,13 +483,13 @@ public final class CacheManager { new ArrayList(NUM_PRE_ALLOCATED_ENTRIES); int numReplies = 0; SortedMap tailMap = - entriesById.tailMap(prevId + 1); + directivesById.tailMap(prevId + 1); for (Entry cur : tailMap.entrySet()) { if (numReplies >= maxListCacheDirectivesNumResponses) { return new BatchedListEntries(replies, true); } - CacheDirective curEntry = cur.getValue(); - CacheDirectiveInfo info = cur.getValue().toDirective(); + CacheDirective curDirective = cur.getValue(); + CacheDirectiveInfo info = cur.getValue().toInfo(); if (filter.getPool() != null && !info.getPool().equals(filter.getPool())) { continue; @@ -496,7 +501,7 @@ public final class CacheManager { boolean hasPermission = true; if (pc != null) { try { - pc.checkPermission(curEntry.getPool(), FsAction.READ); + pc.checkPermission(curDirective.getPool(), FsAction.READ); } catch (AccessControlException e) { hasPermission = false; } @@ -530,7 +535,7 @@ public final class CacheManager { pool = CachePool.createFromInfoAndDefaults(info); cachePools.put(pool.getPoolName(), pool); LOG.info("Created new cache pool " + pool); - return pool.getInfo(null); + return pool.getInfo(true); } /** @@ -599,39 +604,34 @@ public final class CacheManager { throw new InvalidRequestException( "Cannot remove non-existent cache pool " + poolName); } - - // Remove entries using this pool - // TODO: could optimize this somewhat to avoid the need to iterate - // over all entries in entriesById - Iterator> iter = - entriesById.entrySet().iterator(); + // Remove all directives in this pool. + Iterator iter = pool.getDirectiveList().iterator(); while (iter.hasNext()) { - Entry entry = iter.next(); - if (entry.getValue().getPool() == pool) { - entriesByPath.remove(entry.getValue().getPath()); - iter.remove(); - } + CacheDirective directive = iter.next(); + directivesByPath.remove(directive.getPath()); + directivesById.remove(directive.getId()); + iter.remove(); } if (monitor != null) { monitor.kick(); } } - public BatchedListEntries + public BatchedListEntries listCachePools(FSPermissionChecker pc, String prevKey) { assert namesystem.hasReadLock(); final int NUM_PRE_ALLOCATED_ENTRIES = 16; - ArrayList results = - new ArrayList(NUM_PRE_ALLOCATED_ENTRIES); + ArrayList results = + new ArrayList(NUM_PRE_ALLOCATED_ENTRIES); SortedMap tailMap = cachePools.tailMap(prevKey, false); int numListed = 0; for (Entry cur : tailMap.entrySet()) { if (numListed++ >= maxListCachePoolsResponses) { - return new BatchedListEntries(results, true); + return new BatchedListEntries(results, true); } - results.add(cur.getValue().getInfo(pc)); + results.add(cur.getValue().getEntry(pc)); } - return new BatchedListEntries(results, false); + return new BatchedListEntries(results, false); } public void setCachedLocations(LocatedBlock block) { @@ -693,13 +693,6 @@ public final class CacheManager { for (Iterator iter = blockIds.iterator(); iter.hasNext(); ) { Block block = new Block(iter.next()); BlockInfo blockInfo = blockManager.getStoredBlock(block); - if (blockInfo.getGenerationStamp() < block.getGenerationStamp()) { - // The NameNode will eventually remove or update the out-of-date block. - // Until then, we pretend that it isn't cached. - LOG.warn("Genstamp in cache report disagrees with our genstamp for " + - block + ": expected genstamp " + blockInfo.getGenerationStamp()); - continue; - } if (!blockInfo.isComplete()) { LOG.warn("Ignoring block id " + block.getBlockId() + ", because " + "it is in not complete yet. It is in state " + @@ -743,9 +736,9 @@ public final class CacheManager { */ public void saveState(DataOutput out, String sdPath) throws IOException { - out.writeLong(nextEntryId); + out.writeLong(nextDirectiveId); savePools(out, sdPath); - saveEntries(out, sdPath); + saveDirectives(out, sdPath); } /** @@ -755,10 +748,10 @@ public final class CacheManager { * @throws IOException */ public void loadState(DataInput in) throws IOException { - nextEntryId = in.readLong(); - // pools need to be loaded first since entries point to their parent pool + nextDirectiveId = in.readLong(); + // pools need to be loaded first since directives point to their parent pool loadPools(in); - loadEntries(in); + loadDirectives(in); } /** @@ -773,7 +766,7 @@ public final class CacheManager { Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step); out.writeInt(cachePools.size()); for (CachePool pool: cachePools.values()) { - pool.getInfo(null).writeTo(out); + pool.getInfo(true).writeTo(out); counter.increment(); } prog.endStep(Phase.SAVING_CHECKPOINT, step); @@ -782,19 +775,19 @@ public final class CacheManager { /* * Save cache entries to fsimage */ - private void saveEntries(DataOutput out, String sdPath) + private void saveDirectives(DataOutput out, String sdPath) throws IOException { StartupProgress prog = NameNode.getStartupProgress(); Step step = new Step(StepType.CACHE_ENTRIES, sdPath); prog.beginStep(Phase.SAVING_CHECKPOINT, step); - prog.setTotal(Phase.SAVING_CHECKPOINT, step, entriesById.size()); + prog.setTotal(Phase.SAVING_CHECKPOINT, step, directivesById.size()); Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step); - out.writeInt(entriesById.size()); - for (CacheDirective entry: entriesById.values()) { - out.writeLong(entry.getEntryId()); - Text.writeString(out, entry.getPath()); - out.writeShort(entry.getReplication()); - Text.writeString(out, entry.getPool().getPoolName()); + out.writeInt(directivesById.size()); + for (CacheDirective directive : directivesById.values()) { + out.writeLong(directive.getId()); + Text.writeString(out, directive.getPath()); + out.writeShort(directive.getReplication()); + Text.writeString(out, directive.getPool().getPoolName()); counter.increment(); } prog.endStep(Phase.SAVING_CHECKPOINT, step); @@ -819,38 +812,41 @@ public final class CacheManager { } /** - * Load cache entries from the fsimage + * Load cache directives from the fsimage */ - private void loadEntries(DataInput in) throws IOException { + private void loadDirectives(DataInput in) throws IOException { StartupProgress prog = NameNode.getStartupProgress(); Step step = new Step(StepType.CACHE_ENTRIES); prog.beginStep(Phase.LOADING_FSIMAGE, step); - int numberOfEntries = in.readInt(); - prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfEntries); + int numDirectives = in.readInt(); + prog.setTotal(Phase.LOADING_FSIMAGE, step, numDirectives); Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step); - for (int i = 0; i < numberOfEntries; i++) { - long entryId = in.readLong(); + for (int i = 0; i < numDirectives; i++) { + long directiveId = in.readLong(); String path = Text.readString(in); short replication = in.readShort(); String poolName = Text.readString(in); // Get pool reference by looking it up in the map CachePool pool = cachePools.get(poolName); if (pool == null) { - throw new IOException("Entry refers to pool " + poolName + + throw new IOException("Directive refers to pool " + poolName + ", which does not exist."); } - CacheDirective entry = - new CacheDirective(entryId, path, replication, pool); - if (entriesById.put(entry.getEntryId(), entry) != null) { - throw new IOException("An entry with ID " + entry.getEntryId() + + CacheDirective directive = + new CacheDirective(directiveId, path, replication); + boolean addedDirective = pool.getDirectiveList().add(directive); + assert addedDirective; + if (directivesById.put(directive.getId(), directive) != null) { + throw new IOException("A directive with ID " + directive.getId() + " already exists"); } - List entries = entriesByPath.get(entry.getPath()); - if (entries == null) { - entries = new LinkedList(); - entriesByPath.put(entry.getPath(), entries); + List directives = + directivesByPath.get(directive.getPath()); + if (directives == null) { + directives = new LinkedList(); + directivesByPath.put(directive.getPath(), directives); } - entries.add(entry); + directives.add(directive); counter.increment(); } prog.endStep(Phase.LOADING_FSIMAGE, step); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java index c8d4221c386..af5a3ae093c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java @@ -26,9 +26,13 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.protocol.CacheDirective; +import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; +import org.apache.hadoop.hdfs.protocol.CachePoolStats; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.IntrusiveCollection; import com.google.common.base.Preconditions; @@ -69,6 +73,22 @@ public final class CachePool { private int weight; + public final static class DirectiveList + extends IntrusiveCollection { + private CachePool cachePool; + + private DirectiveList(CachePool cachePool) { + this.cachePool = cachePool; + } + + public CachePool getCachePool() { + return cachePool; + } + } + + @Nonnull + private final DirectiveList directiveList = new DirectiveList(this); + /** * Create a new cache pool based on a CachePoolInfo object and the defaults. * We will fill in information that was not supplied according to the @@ -171,7 +191,7 @@ public final class CachePool { * @return * Cache pool information. */ - private CachePoolInfo getInfo(boolean fullInfo) { + CachePoolInfo getInfo(boolean fullInfo) { CachePoolInfo info = new CachePoolInfo(poolName); if (!fullInfo) { return info; @@ -182,6 +202,19 @@ public final class CachePool { setWeight(weight); } + /** + * Get statistics about this CachePool. + * + * @return Cache pool statistics. + */ + private CachePoolStats getStats() { + return new CachePoolStats.Builder(). + setBytesNeeded(0). + setBytesCached(0). + setFilesAffected(0). + build(); + } + /** * Returns a CachePoolInfo describing this CachePool based on the permissions * of the calling user. Unprivileged users will see only minimal descriptive @@ -189,9 +222,9 @@ public final class CachePool { * * @param pc Permission checker to be used to validate the user's permissions, * or null - * @return CachePoolInfo describing this CachePool + * @return CachePoolEntry describing this CachePool */ - public CachePoolInfo getInfo(FSPermissionChecker pc) { + public CachePoolEntry getEntry(FSPermissionChecker pc) { boolean hasPermission = true; if (pc != null) { try { @@ -200,7 +233,8 @@ public final class CachePool { hasPermission = false; } } - return getInfo(hasPermission); + return new CachePoolEntry(getInfo(hasPermission), + hasPermission ? getStats() : new CachePoolStats.Builder().build()); } public String toString() { @@ -212,4 +246,8 @@ public final class CachePool { append(", weight:").append(weight). append(" }").toString(); } + + public DirectiveList getDirectiveList() { + return directiveList; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 2576ce661a4..6f9e1a48a1b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -164,6 +164,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; @@ -6429,6 +6430,16 @@ public class FSNamesystem implements Namesystem, FSClusterStats, return datanodeStatistics.getCapacityRemainingPercent(); } + @Override // NameNodeMXBean + public long getCacheCapacity() { + return datanodeStatistics.getCacheCapacity(); + } + + @Override // NameNodeMXBean + public long getCacheUsed() { + return datanodeStatistics.getCacheUsed(); + } + @Override // NameNodeMXBean public long getTotalBlocks() { return getBlocksTotal(); @@ -7285,11 +7296,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats, getEditLog().logSync(); } - public BatchedListEntries listCachePools(String prevKey) + public BatchedListEntries listCachePools(String prevKey) throws IOException { final FSPermissionChecker pc = isPermissionEnabled ? getPermissionChecker() : null; - BatchedListEntries results; + BatchedListEntries results; checkOperation(OperationCategory.READ); boolean success = false; readLock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java index ff2e3ea10dd..fd46d546226 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java @@ -101,6 +101,16 @@ public interface NameNodeMXBean { * @return the percentage of the remaining space on the cluster */ public float getPercentRemaining(); + + /** + * Returns the amount of cache used by the datanode (in bytes). + */ + public long getCacheUsed(); + + /** + * Returns the total cache capacity of the datanode (in bytes). + */ + public long getCacheCapacity(); /** * Get the total space used by the block pools of this namenode diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index e9981ebfd6b..aa42ec676e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -64,6 +64,7 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; +import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -1298,26 +1299,26 @@ class NameNodeRpcServer implements NamenodeProtocols { } private class ServerSideCachePoolIterator - extends BatchedRemoteIterator { + extends BatchedRemoteIterator { public ServerSideCachePoolIterator(String prevKey) { super(prevKey); } @Override - public BatchedEntries makeRequest(String prevKey) + public BatchedEntries makeRequest(String prevKey) throws IOException { return namesystem.listCachePools(prevKey); } @Override - public String elementToPrevKey(CachePoolInfo element) { - return element.getPoolName(); + public String elementToPrevKey(CachePoolEntry entry) { + return entry.getInfo().getPoolName(); } } @Override - public RemoteIterator listCachePools(String prevKey) + public RemoteIterator listCachePools(String prevKey) throws IOException { return new ServerSideCachePoolIterator(prevKey); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java index 271ff681a67..d814fa560bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java @@ -32,6 +32,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats; +import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.server.namenode.CachePool; @@ -755,9 +756,10 @@ public class CacheAdmin extends Configured implements Tool { build(); int numResults = 0; try { - RemoteIterator iter = dfs.listCachePools(); + RemoteIterator iter = dfs.listCachePools(); while (iter.hasNext()) { - CachePoolInfo info = iter.next(); + CachePoolEntry entry = iter.next(); + CachePoolInfo info = entry.getInfo(); String[] row = new String[5]; if (name == null || info.getPoolName().equals(name)) { row[0] = info.getPoolName(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index 4eed2d6302e..a2b1b735732 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -421,6 +421,12 @@ message CachePoolInfoProto { optional int32 weight = 5; } +message CachePoolStatsProto { + required int64 bytesNeeded = 1; + required int64 bytesCached = 2; + required int64 filesAffected = 3; +} + message AddCachePoolRequestProto { required CachePoolInfoProto info = 1; } @@ -447,12 +453,13 @@ message ListCachePoolsRequestProto { } message ListCachePoolsResponseProto { - repeated ListCachePoolsResponseElementProto elements = 1; + repeated CachePoolEntryProto entries = 1; required bool hasMore = 2; } -message ListCachePoolsResponseElementProto { +message CachePoolEntryProto { required CachePoolInfoProto info = 1; + required CachePoolStatsProto stats = 2; } message GetFileLinkInfoRequestProto { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java index a855f126420..61bca7db197 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java @@ -505,6 +505,11 @@ public class SimulatedFSDataset implements FsDatasetSpi { return 0l; } + @Override // FSDatasetMBean + public long getNumBlocksCached() { + return 0l; + } + @Override public long getNumBlocksFailedToCache() { return 0l; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java index 7e3a32742d8..d439fab6aa8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java @@ -49,8 +49,8 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetCache; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetCache.PageRounder; -import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.MappableBlock; import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; import org.apache.hadoop.hdfs.server.namenode.FSImage; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -72,6 +72,8 @@ import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.apache.log4j.Level; +import org.apache.log4j.LogManager; import com.google.common.base.Supplier; @@ -95,6 +97,7 @@ public class TestFsDatasetCache { static { EditLogFileOutputStream.setShouldSkipFsyncForTesting(false); + LogManager.getLogger(FsDatasetCache.class).setLevel(Level.DEBUG); } @Before @@ -201,17 +204,21 @@ public class TestFsDatasetCache { /** * Blocks until cache usage hits the expected new value. */ - private long verifyExpectedCacheUsage(final long expected) throws Exception { + private long verifyExpectedCacheUsage(final long expectedCacheUsed, + final long expectedBlocks) throws Exception { GenericTestUtils.waitFor(new Supplier() { private int tries = 0; @Override public Boolean get() { - long curDnCacheUsed = fsd.getCacheUsed(); - if (curDnCacheUsed != expected) { + long curCacheUsed = fsd.getCacheUsed(); + long curBlocks = fsd.getNumBlocksCached(); + if ((curCacheUsed != expectedCacheUsed) || + (curBlocks != expectedBlocks)) { if (tries++ > 10) { - LOG.info("verifyExpectedCacheUsage: expected " + - expected + ", got " + curDnCacheUsed + "; " + + LOG.info("verifyExpectedCacheUsage: have " + + curCacheUsed + "/" + expectedCacheUsed + " bytes cached; " + + curBlocks + "/" + expectedBlocks + " blocks cached. " + "memlock limit = " + NativeIO.POSIX.getCacheManipulator().getMemlockLimit() + ". Waiting..."); @@ -221,14 +228,15 @@ public class TestFsDatasetCache { return true; } }, 100, 60000); - return expected; + return expectedCacheUsed; } private void testCacheAndUncacheBlock() throws Exception { LOG.info("beginning testCacheAndUncacheBlock"); final int NUM_BLOCKS = 5; - verifyExpectedCacheUsage(0); + verifyExpectedCacheUsage(0, 0); + assertEquals(0, fsd.getNumBlocksCached()); // Write a test file final Path testFile = new Path("/testCacheBlock"); @@ -255,7 +263,7 @@ public class TestFsDatasetCache { // Cache each block in succession, checking each time for (int i=0; i iter = dfs.listCachePools(); - CachePoolInfo info = iter.next(); + RemoteIterator iter = dfs.listCachePools(); + CachePoolInfo info = iter.next().getInfo(); assertEquals(poolName, info.getPoolName()); assertEquals(ownerName, info.getOwnerName()); assertEquals(groupName, info.getGroupName()); @@ -278,7 +279,7 @@ public class TestCacheDirectives { setMode(mode).setWeight(weight)); iter = dfs.listCachePools(); - info = iter.next(); + info = iter.next().getInfo(); assertEquals(poolName, info.getPoolName()); assertEquals(ownerName, info.getOwnerName()); assertEquals(groupName, info.getGroupName()); @@ -507,9 +508,9 @@ public class TestCacheDirectives { .setGroupName(groupName) .setMode(mode) .setWeight(weight)); - RemoteIterator pit = dfs.listCachePools(); + RemoteIterator pit = dfs.listCachePools(); assertTrue("No cache pools found", pit.hasNext()); - CachePoolInfo info = pit.next(); + CachePoolInfo info = pit.next().getInfo(); assertEquals(pool, info.getPoolName()); assertEquals(groupName, info.getGroupName()); assertEquals(mode, info.getMode()); @@ -542,7 +543,7 @@ public class TestCacheDirectives { // Check that state came back up pit = dfs.listCachePools(); assertTrue("No cache pools found", pit.hasNext()); - info = pit.next(); + info = pit.next().getInfo(); assertEquals(pool, info.getPoolName()); assertEquals(pool, info.getPoolName()); assertEquals(groupName, info.getGroupName()); @@ -713,7 +714,16 @@ public class TestCacheDirectives { try { cluster.waitActive(); DistributedFileSystem dfs = cluster.getFileSystem(); - NameNode namenode = cluster.getNameNode(); + final NameNode namenode = cluster.getNameNode(); + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + return ((namenode.getNamesystem().getCacheCapacity() == + (NUM_DATANODES * CACHE_CAPACITY)) && + (namenode.getNamesystem().getCacheUsed() == 0)); + } + }, 500, 60000); + NamenodeProtocols nnRpc = namenode.getRpcServer(); Path rootDir = helper.getDefaultWorkingDirectory(dfs); // Create the pool @@ -967,8 +977,8 @@ public class TestCacheDirectives { dfs.addCachePool(new CachePoolInfo(poolName) .setMode(new FsPermission((short)0700))); // Should only see partial info - RemoteIterator it = myDfs.listCachePools(); - CachePoolInfo info = it.next(); + RemoteIterator it = myDfs.listCachePools(); + CachePoolInfo info = it.next().getInfo(); assertFalse(it.hasNext()); assertEquals("Expected pool name", poolName, info.getPoolName()); assertNull("Unexpected owner name", info.getOwnerName()); @@ -981,7 +991,7 @@ public class TestCacheDirectives { .setWeight(99)); // Should see full info it = myDfs.listCachePools(); - info = it.next(); + info = it.next().getInfo(); assertFalse(it.hasNext()); assertEquals("Expected pool name", poolName, info.getPoolName()); assertEquals("Mismatched owner name", myUser.getShortUserName(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java index 7538be09eb0..d459d30dc55 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java @@ -31,7 +31,10 @@ import javax.management.ObjectName; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.io.nativeio.NativeIO; +import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator; import org.apache.hadoop.util.VersionInfo; import org.junit.Test; import org.mortbay.util.ajax.JSON; @@ -46,10 +49,16 @@ public class TestNameNodeMXBean { */ private static final double DELTA = 0.000001; + static { + NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator()); + } + @SuppressWarnings({ "unchecked" }) @Test public void testNameNodeMXBeanInfo() throws Exception { Configuration conf = new Configuration(); + conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, + NativeIO.POSIX.getCacheManipulator().getMemlockLimit()); MiniDFSCluster cluster = null; try { @@ -171,6 +180,10 @@ public class TestNameNodeMXBean { } assertEquals(1, statusMap.get("active").size()); assertEquals(1, statusMap.get("failed").size()); + assertEquals(0L, mbs.getAttribute(mxbeanName, "CacheUsed")); + assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() * + cluster.getDataNodes().size(), + mbs.getAttribute(mxbeanName, "CacheCapacity")); } finally { if (cluster != null) { for (URI dir : cluster.getNameDirs(0)) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java index aeaee8d036b..b4755a97453 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag; +import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -904,7 +905,7 @@ public class TestRetryCacheWithHA { @Override boolean checkNamenodeBeforeReturn() throws Exception { for (int i = 0; i < CHECKTIMES; i++) { - RemoteIterator iter = dfs.listCachePools(); + RemoteIterator iter = dfs.listCachePools(); if (iter.hasNext()) { return true; } @@ -941,8 +942,8 @@ public class TestRetryCacheWithHA { @Override boolean checkNamenodeBeforeReturn() throws Exception { for (int i = 0; i < CHECKTIMES; i++) { - RemoteIterator iter = dfs.listCachePools(); - if (iter.hasNext() && iter.next().getWeight() == 99) { + RemoteIterator iter = dfs.listCachePools(); + if (iter.hasNext() && iter.next().getInfo().getWeight() == 99) { return true; } Thread.sleep(1000); @@ -978,7 +979,7 @@ public class TestRetryCacheWithHA { @Override boolean checkNamenodeBeforeReturn() throws Exception { for (int i = 0; i < CHECKTIMES; i++) { - RemoteIterator iter = dfs.listCachePools(); + RemoteIterator iter = dfs.listCachePools(); if (!iter.hasNext()) { return true; } From 2214871d916fdcae62aa51afbb5fd571f2808745 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Wed, 27 Nov 2013 18:20:14 +0000 Subject: [PATCH 14/27] HDFS-5545. Allow specifying endpoints for listeners in HttpServer. Contributed by Haohui Mai. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1546151 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/hadoop/http/HttpServer.java | 512 ++++++++++-------- .../hadoop/http/HttpServerFunctionalTest.java | 29 +- .../apache/hadoop/http/TestGlobalFilter.java | 4 +- .../apache/hadoop/http/TestHttpServer.java | 48 +- .../apache/hadoop/http/TestPathFilter.java | 4 +- .../apache/hadoop/http/TestSSLHttpServer.java | 118 ++-- .../apache/hadoop/http/TestServletFilter.java | 6 +- .../org/apache/hadoop/log/TestLogLevel.java | 13 +- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../java/org/apache/hadoop/hdfs/DFSUtil.java | 16 + .../server/JournalNodeHttpServer.java | 15 +- .../hadoop/hdfs/server/datanode/DataNode.java | 29 +- .../hadoop/hdfs/server/namenode/NameNode.java | 1 + .../server/namenode/NameNodeHttpServer.java | 58 +- .../server/namenode/SecondaryNameNode.java | 14 +- .../mapreduce/v2/app/TestJobEndNotifier.java | 6 +- .../hadoop/mapred/TestJobEndNotifier.java | 5 +- .../org/apache/hadoop/yarn/webapp/WebApp.java | 6 +- .../apache/hadoop/yarn/webapp/WebApps.java | 46 +- .../apache/hadoop/yarn/webapp/TestWebApp.java | 14 - .../server/nodemanager/webapp/WebServer.java | 2 +- .../yarn/server/webproxy/WebAppProxy.java | 5 +- .../webproxy/TestWebAppProxyServlet.java | 8 +- 23 files changed, 550 insertions(+), 412 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java index f940ac99b16..50a8d47260d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java @@ -19,12 +19,13 @@ package org.apache.hadoop.http; import java.io.FileNotFoundException; import java.io.IOException; -import java.io.PrintWriter; import java.io.InterruptedIOException; +import java.io.PrintWriter; import java.net.BindException; import java.net.InetSocketAddress; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; -import java.security.GeneralSecurityException; import java.util.ArrayList; import java.util.Collections; import java.util.Enumeration; @@ -32,7 +33,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import javax.net.ssl.SSLServerSocketFactory; import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -60,7 +60,6 @@ import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.server.AuthenticationFilter; import org.apache.hadoop.security.authorize.AccessControlList; -import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Shell; import org.mortbay.io.Buffer; @@ -71,8 +70,8 @@ import org.mortbay.jetty.RequestLog; import org.mortbay.jetty.Server; import org.mortbay.jetty.handler.ContextHandler; import org.mortbay.jetty.handler.ContextHandlerCollection; -import org.mortbay.jetty.handler.RequestLogHandler; import org.mortbay.jetty.handler.HandlerCollection; +import org.mortbay.jetty.handler.RequestLogHandler; import org.mortbay.jetty.nio.SelectChannelConnector; import org.mortbay.jetty.security.SslSocketConnector; import org.mortbay.jetty.servlet.Context; @@ -86,6 +85,7 @@ import org.mortbay.thread.QueuedThreadPool; import org.mortbay.util.MultiException; import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; import com.sun.jersey.spi.container.servlet.ServletContainer; /** @@ -114,11 +114,25 @@ public class HttpServer implements FilterContainer { public static final String BIND_ADDRESS = "bind.address"; - private AccessControlList adminsAcl; + private final AccessControlList adminsAcl; - private SSLFactory sslFactory; protected final Server webServer; - protected final Connector listener; + + private static class ListenerInfo { + /** + * Boolean flag to determine whether the HTTP server should clean up the + * listener in stop(). + */ + private final boolean isManaged; + private final Connector listener; + private ListenerInfo(boolean isManaged, Connector listener) { + this.isManaged = isManaged; + this.listener = listener; + } + } + + private final List listeners = Lists.newArrayList(); + protected final WebAppContext webAppContext; protected final boolean findPort; protected final Map defaultContexts = @@ -127,34 +141,111 @@ public class HttpServer implements FilterContainer { static final String STATE_DESCRIPTION_ALIVE = " - alive"; static final String STATE_DESCRIPTION_NOT_LIVE = " - not live"; - private final boolean listenerStartedExternally; - /** * Class to construct instances of HTTP server with specific options. */ public static class Builder { - String name; - String bindAddress; - Integer port; - Boolean findPort; - Configuration conf; - Connector connector; - String[] pathSpecs; - AccessControlList adminsAcl; - boolean securityEnabled = false; - String usernameConfKey = null; - String keytabConfKey = null; - + private ArrayList endpoints = Lists.newArrayList(); + private Connector connector; + private String name; + private Configuration conf; + private String[] pathSpecs; + private AccessControlList adminsAcl; + private boolean securityEnabled = false; + private String usernameConfKey; + private String keytabConfKey; + private boolean needsClientAuth; + private String trustStore; + private String trustStorePassword; + private String trustStoreType; + + private String keyStore; + private String keyStorePassword; + private String keyStoreType; + + // The -keypass option in keytool + private String keyPassword; + + @Deprecated + private String bindAddress; + @Deprecated + private int port = -1; + + private boolean findPort; + + private String hostName; + public Builder setName(String name){ this.name = name; return this; } + + /** + * Add an endpoint that the HTTP server should listen to. + * + * @param endpoint + * the endpoint of that the HTTP server should listen to. The + * scheme specifies the protocol (i.e. HTTP / HTTPS), the host + * specifies the binding address, and the port specifies the + * listening port. Unspecified or zero port means that the server + * can listen to any port. + */ + public Builder addEndpoint(URI endpoint) { + endpoints.add(endpoint); + return this; + } + + /** + * Set the hostname of the http server. The host name is used to resolve the + * _HOST field in Kerberos principals. The hostname of the first listener + * will be used if the name is unspecified. + */ + public Builder hostName(String hostName) { + this.hostName = hostName; + return this; + } + public Builder trustStore(String location, String password, String type) { + this.trustStore = location; + this.trustStorePassword = password; + this.trustStoreType = type; + return this; + } + + public Builder keyStore(String location, String password, String type) { + this.keyStore = location; + this.keyStorePassword = password; + this.keyStoreType = type; + return this; + } + + public Builder keyPassword(String password) { + this.keyPassword = password; + return this; + } + + /** + * Specify whether the server should authorize the client in SSL + * connections. + */ + public Builder needsClientAuth(boolean value) { + this.needsClientAuth = value; + return this; + } + + /** + * Use addEndpoint() instead. + */ + @Deprecated public Builder setBindAddress(String bindAddress){ this.bindAddress = bindAddress; return this; } - + + /** + * Use addEndpoint() instead. + */ + @Deprecated public Builder setPort(int port) { this.port = port; return this; @@ -204,25 +295,70 @@ public class HttpServer implements FilterContainer { if (this.name == null) { throw new HadoopIllegalArgumentException("name is not set"); } - if (this.bindAddress == null) { - throw new HadoopIllegalArgumentException("bindAddress is not set"); + + // Make the behavior compatible with deprecated interfaces + if (bindAddress != null && port != -1) { + try { + endpoints.add(0, new URI("http", "", bindAddress, port, "", "", "")); + } catch (URISyntaxException e) { + throw new HadoopIllegalArgumentException("Invalid endpoint: "+ e); + } } - if (this.port == null) { - throw new HadoopIllegalArgumentException("port is not set"); + + if (endpoints.size() == 0) { + throw new HadoopIllegalArgumentException("No endpoints specified"); } - if (this.findPort == null) { - throw new HadoopIllegalArgumentException("findPort is not set"); + + if (hostName == null) { + hostName = endpoints.get(0).getHost(); } if (this.conf == null) { conf = new Configuration(); } - HttpServer server = new HttpServer(this.name, this.bindAddress, this.port, - this.findPort, this.conf, this.adminsAcl, this.connector, this.pathSpecs); + HttpServer server = new HttpServer(this); + if (this.securityEnabled) { - server.initSpnego(this.conf, this.usernameConfKey, this.keytabConfKey); + server.initSpnego(conf, hostName, usernameConfKey, keytabConfKey); } + + if (connector != null) { + server.addUnmanagedListener(connector); + } + + for (URI ep : endpoints) { + Connector listener = null; + String scheme = ep.getScheme(); + if ("http".equals(scheme)) { + listener = HttpServer.createDefaultChannelConnector(); + } else if ("https".equals(scheme)) { + SslSocketConnector c = new SslSocketConnector(); + c.setNeedClientAuth(needsClientAuth); + c.setKeyPassword(keyPassword); + + if (keyStore != null) { + c.setKeystore(keyStore); + c.setKeystoreType(keyStoreType); + c.setPassword(keyStorePassword); + } + + if (trustStore != null) { + c.setTruststore(trustStore); + c.setTruststoreType(trustStoreType); + c.setTrustPassword(trustStorePassword); + } + listener = c; + + } else { + throw new HadoopIllegalArgumentException( + "unknown scheme for endpoint:" + ep); + } + listener.setHost(ep.getHost()); + listener.setPort(ep.getPort() == -1 ? 0 : ep.getPort()); + server.addManagedListener(listener); + } + server.loadListeners(); return server; } } @@ -233,7 +369,7 @@ public class HttpServer implements FilterContainer { ) throws IOException { this(name, bindAddress, port, findPort, new Configuration()); } - + @Deprecated public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, Connector connector) throws IOException { @@ -314,51 +450,39 @@ public class HttpServer implements FilterContainer { * @param pathSpecs Path specifications that this httpserver will be serving. * These will be added to any filters. */ + @Deprecated public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, AccessControlList adminsAcl, Connector connector, String[] pathSpecs) throws IOException { - webServer = new Server(); - this.findPort = findPort; - this.adminsAcl = adminsAcl; - - if(connector == null) { - listenerStartedExternally = false; - if (HttpConfig.isSecure()) { - sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf); - try { - sslFactory.init(); - } catch (GeneralSecurityException ex) { - throw new IOException(ex); - } - SslSocketConnector sslListener = new SslSocketConnector() { - @Override - protected SSLServerSocketFactory createFactory() throws Exception { - return sslFactory.createSSLServerSocketFactory(); - } - }; - listener = sslListener; - } else { - listener = createBaseListener(conf); - } - listener.setHost(bindAddress); - listener.setPort(port); - LOG.info("SSL is enabled on " + toString()); - } else { - listenerStartedExternally = true; - listener = connector; - } - - webServer.addConnector(listener); + this(new Builder().setName(name) + .addEndpoint(URI.create("http://" + bindAddress + ":" + port)) + .setFindPort(findPort).setConf(conf).setACL(adminsAcl) + .setConnector(connector).setPathSpec(pathSpecs)); + } + + private HttpServer(final Builder b) throws IOException { + final String appDir = getWebAppsPath(b.name); + this.webServer = new Server(); + this.adminsAcl = b.adminsAcl; + this.webAppContext = createWebAppContext(b.name, b.conf, adminsAcl, appDir); + this.findPort = b.findPort; + initializeWebServer(b.name, b.hostName, b.conf, b.pathSpecs); + } + + private void initializeWebServer(String name, String hostName, + Configuration conf, String[] pathSpecs) + throws FileNotFoundException, IOException { + + Preconditions.checkNotNull(webAppContext); int maxThreads = conf.getInt(HTTP_MAX_THREADS, -1); // If HTTP_MAX_THREADS is not configured, QueueThreadPool() will use the // default value (currently 250). - QueuedThreadPool threadPool = maxThreads == -1 ? - new QueuedThreadPool() : new QueuedThreadPool(maxThreads); + QueuedThreadPool threadPool = maxThreads == -1 ? new QueuedThreadPool() + : new QueuedThreadPool(maxThreads); threadPool.setDaemon(true); webServer.setThreadPool(threadPool); - final String appDir = getWebAppsPath(name); ContextHandlerCollection contexts = new ContextHandlerCollection(); RequestLog requestLog = HttpRequestLog.getRequestLog(name); @@ -366,30 +490,24 @@ public class HttpServer implements FilterContainer { RequestLogHandler requestLogHandler = new RequestLogHandler(); requestLogHandler.setRequestLog(requestLog); HandlerCollection handlers = new HandlerCollection(); - handlers.setHandlers(new Handler[] {requestLogHandler, contexts}); + handlers.setHandlers(new Handler[] { requestLogHandler, contexts }); webServer.setHandler(handlers); - } - else { + } else { webServer.setHandler(contexts); } - webAppContext = new WebAppContext(); - webAppContext.setDisplayName(name); - webAppContext.setContextPath("/"); - webAppContext.setWar(appDir + "/" + name); - webAppContext.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, conf); - webAppContext.getServletContext().setAttribute(ADMINS_ACL, adminsAcl); - addNoCacheFilter(webAppContext); + final String appDir = getWebAppsPath(name); + webServer.addHandler(webAppContext); addDefaultApps(contexts, appDir, conf); - + addGlobalFilter("safety", QuotingInputFilter.class.getName(), null); - final FilterInitializer[] initializers = getFilterInitializers(conf); + final FilterInitializer[] initializers = getFilterInitializers(conf); if (initializers != null) { conf = new Configuration(conf); - conf.set(BIND_ADDRESS, bindAddress); - for(FilterInitializer c : initializers) { + conf.set(BIND_ADDRESS, hostName); + for (FilterInitializer c : initializers) { c.initFilter(this, conf); } } @@ -404,10 +522,29 @@ public class HttpServer implements FilterContainer { } } - @SuppressWarnings("unchecked") - private void addNoCacheFilter(WebAppContext ctxt) { - defineFilter(ctxt, NO_CACHE_FILTER, - NoCacheFilter.class.getName(), Collections.EMPTY_MAP, new String[] { "/*"}); + private void addUnmanagedListener(Connector connector) { + listeners.add(new ListenerInfo(false, connector)); + } + + private void addManagedListener(Connector connector) { + listeners.add(new ListenerInfo(true, connector)); + } + + private static WebAppContext createWebAppContext(String name, + Configuration conf, AccessControlList adminsAcl, final String appDir) { + WebAppContext ctx = new WebAppContext(); + ctx.setDisplayName(name); + ctx.setContextPath("/"); + ctx.setWar(appDir + "/" + name); + ctx.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, conf); + ctx.getServletContext().setAttribute(ADMINS_ACL, adminsAcl); + addNoCacheFilter(ctx); + return ctx; + } + + private static void addNoCacheFilter(WebAppContext ctxt) { + defineFilter(ctxt, NO_CACHE_FILTER, NoCacheFilter.class.getName(), + Collections. emptyMap(), new String[] { "/*" }); } /** @@ -651,7 +788,7 @@ public class HttpServer implements FilterContainer { /** * Define a filter for a context and set up default url mappings. */ - public void defineFilter(Context ctx, String name, + public static void defineFilter(Context ctx, String name, String classname, Map parameters, String[] urls) { FilterHolder holder = new FilterHolder(); @@ -715,93 +852,47 @@ public class HttpServer implements FilterContainer { * Get the port that the server is on * @return the port */ + @Deprecated public int getPort() { return webServer.getConnectors()[0].getLocalPort(); } /** - * Get the port that corresponds to a particular connector. In the case of - * HDFS, the second connector corresponds to the HTTPS connector. + * Get the address that corresponds to a particular connector. * - * @return the corresponding port for the connector, or -1 if there's no such - * connector. + * @return the corresponding address for the connector, or null if there's no + * such connector or the connector is not bounded. */ - public int getConnectorPort(int index) { + public InetSocketAddress getConnectorAddress(int index) { Preconditions.checkArgument(index >= 0); - return index < webServer.getConnectors().length ? - webServer.getConnectors()[index].getLocalPort() : -1; + if (index > webServer.getConnectors().length) + return null; + + Connector c = webServer.getConnectors()[index]; + if (c.getLocalPort() == -1) { + // The connector is not bounded + return null; + } + + return new InetSocketAddress(c.getHost(), c.getLocalPort()); } /** * Set the min, max number of worker threads (simultaneous connections). */ public void setThreads(int min, int max) { - QueuedThreadPool pool = (QueuedThreadPool) webServer.getThreadPool() ; + QueuedThreadPool pool = (QueuedThreadPool) webServer.getThreadPool(); pool.setMinThreads(min); pool.setMaxThreads(max); } - /** - * Configure an ssl listener on the server. - * @param addr address to listen on - * @param keystore location of the keystore - * @param storPass password for the keystore - * @param keyPass password for the key - * @deprecated Use {@link #addSslListener(InetSocketAddress, Configuration, boolean)} - */ - @Deprecated - public void addSslListener(InetSocketAddress addr, String keystore, - String storPass, String keyPass) throws IOException { - if (webServer.isStarted()) { - throw new IOException("Failed to add ssl listener"); - } - SslSocketConnector sslListener = new SslSocketConnector(); - sslListener.setHost(addr.getHostName()); - sslListener.setPort(addr.getPort()); - sslListener.setKeystore(keystore); - sslListener.setPassword(storPass); - sslListener.setKeyPassword(keyPass); - webServer.addConnector(sslListener); - } - - /** - * Configure an ssl listener on the server. - * @param addr address to listen on - * @param sslConf conf to retrieve ssl options - * @param needCertsAuth whether x509 certificate authentication is required - */ - public void addSslListener(InetSocketAddress addr, Configuration sslConf, - boolean needCertsAuth) throws IOException { - if (webServer.isStarted()) { - throw new IOException("Failed to add ssl listener"); - } - if (needCertsAuth) { - // setting up SSL truststore for authenticating clients - System.setProperty("javax.net.ssl.trustStore", sslConf.get( - "ssl.server.truststore.location", "")); - System.setProperty("javax.net.ssl.trustStorePassword", sslConf.get( - "ssl.server.truststore.password", "")); - System.setProperty("javax.net.ssl.trustStoreType", sslConf.get( - "ssl.server.truststore.type", "jks")); - } - SslSocketConnector sslListener = new SslSocketConnector(); - sslListener.setHost(addr.getHostName()); - sslListener.setPort(addr.getPort()); - sslListener.setKeystore(sslConf.get("ssl.server.keystore.location")); - sslListener.setPassword(sslConf.get("ssl.server.keystore.password", "")); - sslListener.setKeyPassword(sslConf.get("ssl.server.keystore.keypassword", "")); - sslListener.setKeystoreType(sslConf.get("ssl.server.keystore.type", "jks")); - sslListener.setNeedClientAuth(needCertsAuth); - webServer.addConnector(sslListener); - } - - protected void initSpnego(Configuration conf, + private void initSpnego(Configuration conf, String hostName, String usernameConfKey, String keytabConfKey) throws IOException { Map params = new HashMap(); String principalInConf = conf.get(usernameConfKey); if (principalInConf != null && !principalInConf.isEmpty()) { - params.put("kerberos.principal", - SecurityUtil.getServerPrincipal(principalInConf, listener.getHost())); + params.put("kerberos.principal", SecurityUtil.getServerPrincipal( + principalInConf, hostName)); } String httpKeytab = conf.get(keytabConfKey); if (httpKeytab != null && !httpKeytab.isEmpty()) { @@ -819,8 +910,7 @@ public class HttpServer implements FilterContainer { public void start() throws IOException { try { try { - openListener(); - LOG.info("Jetty bound to port " + listener.getLocalPort()); + openListeners(); webServer.start(); } catch (IOException ex) { LOG.info("HttpServer.start() threw a non Bind IOException", ex); @@ -856,73 +946,65 @@ public class HttpServer implements FilterContainer { } } + private void loadListeners() { + for (ListenerInfo li : listeners) { + webServer.addConnector(li.listener); + } + } + /** * Open the main listener for the server * @throws Exception */ - void openListener() throws Exception { - if (listener.getLocalPort() != -1) { // it's already bound - return; - } - if (listenerStartedExternally) { // Expect that listener was started securely - throw new Exception("Expected webserver's listener to be started " + - "previously but wasn't"); - } - int port = listener.getPort(); - while (true) { - // jetty has a bug where you can't reopen a listener that previously - // failed to open w/o issuing a close first, even if the port is changed - try { - listener.close(); - listener.open(); - break; - } catch (BindException ex) { - if (port == 0 || !findPort) { - BindException be = new BindException( - "Port in use: " + listener.getHost() + ":" + listener.getPort()); - be.initCause(ex); - throw be; - } + void openListeners() throws Exception { + for (ListenerInfo li : listeners) { + Connector listener = li.listener; + if (!li.isManaged || li.listener.getLocalPort() != -1) { + // This listener is either started externally or has been bound + continue; + } + int port = listener.getPort(); + while (true) { + // jetty has a bug where you can't reopen a listener that previously + // failed to open w/o issuing a close first, even if the port is changed + try { + listener.close(); + listener.open(); + LOG.info("Jetty bound to port " + listener.getLocalPort()); + break; + } catch (BindException ex) { + if (port == 0 || !findPort) { + BindException be = new BindException("Port in use: " + + listener.getHost() + ":" + listener.getPort()); + be.initCause(ex); + throw be; + } + } + // try the next port number + listener.setPort(++port); + Thread.sleep(100); } - // try the next port number - listener.setPort(++port); - Thread.sleep(100); } } - /** - * Return the bind address of the listener. - * @return InetSocketAddress of the listener - */ - public InetSocketAddress getListenerAddress() { - int port = listener.getLocalPort(); - if (port == -1) { // not bound, return requested port - port = listener.getPort(); - } - return new InetSocketAddress(listener.getHost(), port); - } - /** * stop the server */ public void stop() throws Exception { MultiException exception = null; - try { - listener.close(); - } catch (Exception e) { - LOG.error("Error while stopping listener for webapp" - + webAppContext.getDisplayName(), e); - exception = addMultiException(exception, e); - } - - try { - if (sslFactory != null) { - sslFactory.destroy(); + for (ListenerInfo li : listeners) { + if (!li.isManaged) { + continue; + } + + try { + li.listener.close(); + } catch (Exception e) { + LOG.error( + "Error while stopping listener for webapp" + + webAppContext.getDisplayName(), e); + exception = addMultiException(exception, e); } - } catch (Exception e) { - LOG.error("Error while destroying the SSLFactory" - + webAppContext.getDisplayName(), e); - exception = addMultiException(exception, e); } try { @@ -934,6 +1016,7 @@ public class HttpServer implements FilterContainer { + webAppContext.getDisplayName(), e); exception = addMultiException(exception, e); } + try { webServer.stop(); } catch (Exception e) { @@ -974,10 +1057,17 @@ public class HttpServer implements FilterContainer { */ @Override public String toString() { - return listener != null ? - ("HttpServer at http://" + listener.getHost() + ":" + listener.getLocalPort() + "/" - + (isAlive() ? STATE_DESCRIPTION_ALIVE : STATE_DESCRIPTION_NOT_LIVE)) - : "Inactive HttpServer"; + if (listeners.size() == 0) { + return "Inactive HttpServer"; + } else { + StringBuilder sb = new StringBuilder("HttpServer (") + .append(isAlive() ? STATE_DESCRIPTION_ALIVE : STATE_DESCRIPTION_NOT_LIVE).append("), listening at:"); + for (ListenerInfo li : listeners) { + Connector l = li.listener; + sb.append(l.getHost()).append(":").append(l.getPort()).append("/,"); + } + return sb.toString(); + } } /** diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java index ee86ebcdd0d..e03059a3470 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java @@ -19,13 +19,16 @@ package org.apache.hadoop.http; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.authorize.AccessControlList; import org.junit.Assert; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.http.HttpServer.Builder; import java.io.File; import java.io.IOException; import java.io.InputStream; +import java.net.URI; import java.net.URL; import java.net.MalformedURLException; @@ -120,8 +123,9 @@ public class HttpServerFunctionalTest extends Assert { public static HttpServer createServer(String host, int port) throws IOException { prepareTestWebapp(); - return new HttpServer.Builder().setName(TEST).setBindAddress(host) - .setPort(port).setFindPort(true).build(); + return new HttpServer.Builder().setName(TEST) + .addEndpoint(URI.create("http://" + host + ":" + port)) + .setFindPort(true).build(); } /** @@ -131,8 +135,7 @@ public class HttpServerFunctionalTest extends Assert { * @throws IOException if it could not be created */ public static HttpServer createServer(String webapp) throws IOException { - return new HttpServer.Builder().setName(webapp).setBindAddress("0.0.0.0") - .setPort(0).setFindPort(true).build(); + return localServerBuilder(webapp).setFindPort(true).build(); } /** * Create an HttpServer instance for the given webapp @@ -143,14 +146,17 @@ public class HttpServerFunctionalTest extends Assert { */ public static HttpServer createServer(String webapp, Configuration conf) throws IOException { - return new HttpServer.Builder().setName(webapp).setBindAddress("0.0.0.0") - .setPort(0).setFindPort(true).setConf(conf).build(); + return localServerBuilder(webapp).setFindPort(true).setConf(conf).build(); } public static HttpServer createServer(String webapp, Configuration conf, AccessControlList adminsAcl) throws IOException { - return new HttpServer.Builder().setName(webapp).setBindAddress("0.0.0.0") - .setPort(0).setFindPort(true).setConf(conf).setACL(adminsAcl).build(); + return localServerBuilder(webapp).setFindPort(true).setConf(conf).setACL(adminsAcl).build(); + } + + private static Builder localServerBuilder(String webapp) { + return new HttpServer.Builder().setName(webapp).addEndpoint( + URI.create("http://localhost:0")); } /** @@ -163,8 +169,7 @@ public class HttpServerFunctionalTest extends Assert { */ public static HttpServer createServer(String webapp, Configuration conf, String[] pathSpecs) throws IOException { - return new HttpServer.Builder().setName(webapp).setBindAddress("0.0.0.0") - .setPort(0).setFindPort(true).setConf(conf).setPathSpec(pathSpecs).build(); + return localServerBuilder(webapp).setFindPort(true).setConf(conf).setPathSpec(pathSpecs).build(); } /** @@ -201,8 +206,8 @@ public class HttpServerFunctionalTest extends Assert { public static URL getServerURL(HttpServer server) throws MalformedURLException { assertNotNull("No server", server); - int port = server.getPort(); - return new URL("http://localhost:" + port + "/"); + return new URL("http://" + + NetUtils.getHostPortString(server.getConnectorAddress(0))); } /** diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestGlobalFilter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestGlobalFilter.java index eef6d7de41f..70db923284c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestGlobalFilter.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestGlobalFilter.java @@ -36,6 +36,7 @@ import javax.servlet.http.HttpServletRequest; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.net.NetUtils; import org.junit.Test; public class TestGlobalFilter extends HttpServerFunctionalTest { @@ -125,7 +126,8 @@ public class TestGlobalFilter extends HttpServerFunctionalTest { dataURL, streamFile, rootURL, allURL, outURL, logURL}; //access the urls - final String prefix = "http://localhost:" + http.getPort(); + final String prefix = "http://" + + NetUtils.getHostPortString(http.getConnectorAddress(0)); try { for(int i = 0; i < urls.length; i++) { access(prefix + urls[i]); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java index 9dfaf3ec2a4..13627988b15 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java @@ -20,7 +20,7 @@ package org.apache.hadoop.http; import java.io.IOException; import java.io.PrintWriter; import java.net.HttpURLConnection; -import java.net.InetSocketAddress; +import java.net.URI; import java.net.URL; import java.util.Arrays; import java.util.Enumeration; @@ -53,6 +53,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.http.HttpServer.QuotingInputFilter.RequestQuoter; import org.apache.hadoop.http.resource.JerseyResource; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.Groups; import org.apache.hadoop.security.ShellBasedUnixGroupsMapping; import org.apache.hadoop.security.UserGroupInformation; @@ -61,6 +62,8 @@ import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import org.mockito.Mockito; +import org.mockito.internal.util.reflection.Whitebox; +import org.mortbay.jetty.Connector; import org.mortbay.util.ajax.JSON; public class TestHttpServer extends HttpServerFunctionalTest { @@ -362,11 +365,10 @@ public class TestHttpServer extends HttpServerFunctionalTest { MyGroupsProvider.mapping.put("userB", Arrays.asList("groupB")); HttpServer myServer = new HttpServer.Builder().setName("test") - .setBindAddress("0.0.0.0").setPort(0).setFindPort(true).build(); + .addEndpoint(new URI("http://localhost:0")).setFindPort(true).build(); myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf); myServer.start(); - int port = myServer.getPort(); - String serverURL = "http://localhost:" + port + "/"; + String serverURL = "http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/"; for (String servlet : new String[] { "conf", "logs", "stacks", "logLevel", "metrics" }) { for (String user : new String[] { "userA", "userB" }) { @@ -404,12 +406,13 @@ public class TestHttpServer extends HttpServerFunctionalTest { MyGroupsProvider.mapping.put("userE", Arrays.asList("groupE")); HttpServer myServer = new HttpServer.Builder().setName("test") - .setBindAddress("0.0.0.0").setPort(0).setFindPort(true).setConf(conf) + .addEndpoint(new URI("http://localhost:0")).setFindPort(true).setConf(conf) .setACL(new AccessControlList("userA,userB groupC,groupD")).build(); myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf); myServer.start(); - int port = myServer.getPort(); - String serverURL = "http://localhost:" + port + "/"; + + String serverURL = "http://" + + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/"; for (String servlet : new String[] { "conf", "logs", "stacks", "logLevel", "metrics" }) { for (String user : new String[] { "userA", "userB", "userC", "userD" }) { @@ -520,20 +523,20 @@ public class TestHttpServer extends HttpServerFunctionalTest { } @Test public void testBindAddress() throws Exception { - checkBindAddress("0.0.0.0", 0, false).stop(); + checkBindAddress("localhost", 0, false).stop(); // hang onto this one for a bit more testing HttpServer myServer = checkBindAddress("localhost", 0, false); HttpServer myServer2 = null; try { - int port = myServer.getListenerAddress().getPort(); + int port = myServer.getConnectorAddress(0).getPort(); // it's already in use, true = expect a higher port myServer2 = checkBindAddress("localhost", port, true); // try to reuse the port - port = myServer2.getListenerAddress().getPort(); + port = myServer2.getConnectorAddress(0).getPort(); myServer2.stop(); - assertEquals(-1, myServer2.getPort()); // not bound - myServer2.openListener(); - assertEquals(port, myServer2.getPort()); // expect same port + assertNull(myServer2.getConnectorAddress(0)); // not bound + myServer2.openListeners(); + assertEquals(port, myServer2.getConnectorAddress(0).getPort()); // expect same port } finally { myServer.stop(); if (myServer2 != null) { @@ -547,21 +550,24 @@ public class TestHttpServer extends HttpServerFunctionalTest { HttpServer server = createServer(host, port); try { // not bound, ephemeral should return requested port (0 for ephemeral) - InetSocketAddress addr = server.getListenerAddress(); - assertEquals(port, addr.getPort()); - // verify hostname is what was given - server.openListener(); - addr = server.getListenerAddress(); - assertEquals(host, addr.getHostName()); + List listeners = (List) Whitebox.getInternalState(server, + "listeners"); + Connector listener = (Connector) Whitebox.getInternalState( + listeners.get(0), "listener"); - int boundPort = addr.getPort(); + assertEquals(port, listener.getPort()); + // verify hostname is what was given + server.openListeners(); + assertEquals(host, server.getConnectorAddress(0).getHostName()); + + int boundPort = server.getConnectorAddress(0).getPort(); if (port == 0) { assertTrue(boundPort != 0); // ephemeral should now return bound port } else if (findPort) { assertTrue(boundPort > port); // allow a little wiggle room to prevent random test failures if // some consecutive ports are already in use - assertTrue(addr.getPort() - port < 8); + assertTrue(boundPort - port < 8); } } catch (Exception e) { server.stop(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java index 3bd77f039c8..be5900e64cd 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java @@ -36,6 +36,7 @@ import javax.servlet.http.HttpServletRequest; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.net.NetUtils; import org.junit.Test; public class TestPathFilter extends HttpServerFunctionalTest { @@ -126,7 +127,8 @@ public class TestPathFilter extends HttpServerFunctionalTest { // access the urls and verify our paths specs got added to the // filters - final String prefix = "http://localhost:" + http.getPort(); + final String prefix = "http://" + + NetUtils.getHostPortString(http.getConnectorAddress(0)); try { for(int i = 0; i < filteredUrls.length; i++) { access(prefix + filteredUrls[i]); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java index e5fd4b0a087..cb4b66b4c31 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java @@ -17,105 +17,101 @@ */ package org.apache.hadoop.http; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.InputStream; +import java.net.URI; +import java.net.URL; + +import javax.net.ssl.HttpsURLConnection; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.security.ssl.SSLFactory; -import org.junit.After; -import org.junit.Before; +import org.junit.AfterClass; +import org.junit.BeforeClass; import org.junit.Test; -import javax.net.ssl.HttpsURLConnection; -import java.io.File; -import java.io.FileWriter; -import java.io.InputStream; -import java.io.Writer; -import java.net.URL; - /** * This testcase issues SSL certificates configures the HttpServer to serve * HTTPS using the created certficates and calls an echo servlet using the * corresponding HTTPS URL. */ public class TestSSLHttpServer extends HttpServerFunctionalTest { - private static final String CONFIG_SITE_XML = "sslhttpserver-site.xml"; + private static final String BASEDIR = System.getProperty("test.build.dir", + "target/test-dir") + "/" + TestSSLHttpServer.class.getSimpleName(); - private static final String BASEDIR = - System.getProperty("test.build.dir", "target/test-dir") + "/" + - TestSSLHttpServer.class.getSimpleName(); - - static final Log LOG = LogFactory.getLog(TestSSLHttpServer.class); + private static final Log LOG = LogFactory.getLog(TestSSLHttpServer.class); + private static Configuration conf; private static HttpServer server; private static URL baseUrl; + private static String keystoresDir; + private static String sslConfDir; + private static SSLFactory clientSslFactory; + @BeforeClass + public static void setup() throws Exception { + conf = new Configuration(); + conf.setInt(HttpServer.HTTP_MAX_THREADS, 10); - @Before - public void setup() throws Exception { - HttpConfig.setPolicy(HttpConfig.Policy.HTTPS_ONLY); File base = new File(BASEDIR); FileUtil.fullyDelete(base); base.mkdirs(); - String classpathDir = - KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class); - Configuration conf = new Configuration(); - String keystoresDir = new File(BASEDIR).getAbsolutePath(); - String sslConfsDir = - KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class); - KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfsDir, conf, false); - conf.setBoolean(CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_KEY, true); + keystoresDir = new File(BASEDIR).getAbsolutePath(); + sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class); - //we do this trick because the MR AppMaster is started in another VM and - //the HttpServer configuration is not loaded from the job.xml but from the - //site.xml files in the classpath - Writer writer = new FileWriter(new File(classpathDir, CONFIG_SITE_XML)); - conf.writeXml(writer); - writer.close(); + KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false); + Configuration sslConf = new Configuration(false); + sslConf.addResource("ssl-server.xml"); + sslConf.addResource("ssl-client.xml"); - conf.setInt(HttpServer.HTTP_MAX_THREADS, 10); - conf.addResource(CONFIG_SITE_XML); - server = createServer("test", conf); + clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, sslConf); + clientSslFactory.init(); + + server = new HttpServer.Builder() + .setName("test") + .addEndpoint(new URI("https://localhost")) + .setConf(conf) + .keyPassword(sslConf.get("ssl.server.keystore.keypassword")) + .keyStore(sslConf.get("ssl.server.keystore.location"), + sslConf.get("ssl.server.keystore.password"), + sslConf.get("ssl.server.keystore.type", "jks")) + .trustStore(sslConf.get("ssl.server.truststore.location"), + sslConf.get("ssl.server.truststore.password"), + sslConf.get("ssl.server.truststore.type", "jks")).build(); server.addServlet("echo", "/echo", TestHttpServer.EchoServlet.class); server.start(); - baseUrl = new URL("https://localhost:" + server.getPort() + "/"); - LOG.info("HTTP server started: "+ baseUrl); + baseUrl = new URL("https://" + + NetUtils.getHostPortString(server.getConnectorAddress(0))); + LOG.info("HTTP server started: " + baseUrl); } - @After - public void cleanup() throws Exception { + @AfterClass + public static void cleanup() throws Exception { server.stop(); - String classpathDir = - KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class); - new File(classpathDir, CONFIG_SITE_XML).delete(); - HttpConfig.setPolicy(HttpConfig.Policy.HTTP_ONLY); + FileUtil.fullyDelete(new File(BASEDIR)); + KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir); + clientSslFactory.destroy(); } - @Test public void testEcho() throws Exception { - assertEquals("a:b\nc:d\n", - readOut(new URL(baseUrl, "/echo?a=b&c=d"))); - assertEquals("a:b\nc<:d\ne:>\n", - readOut(new URL(baseUrl, "/echo?a=b&c<=d&e=>"))); + assertEquals("a:b\nc:d\n", readOut(new URL(baseUrl, "/echo?a=b&c=d"))); + assertEquals("a:b\nc<:d\ne:>\n", readOut(new URL(baseUrl, + "/echo?a=b&c<=d&e=>"))); } private static String readOut(URL url) throws Exception { - StringBuilder out = new StringBuilder(); HttpsURLConnection conn = (HttpsURLConnection) url.openConnection(); - Configuration conf = new Configuration(); - conf.addResource(CONFIG_SITE_XML); - SSLFactory sslf = new SSLFactory(SSLFactory.Mode.CLIENT, conf); - sslf.init(); - conn.setSSLSocketFactory(sslf.createSSLSocketFactory()); + conn.setSSLSocketFactory(clientSslFactory.createSSLSocketFactory()); InputStream in = conn.getInputStream(); - byte[] buffer = new byte[64 * 1024]; - int len = in.read(buffer); - while (len > 0) { - out.append(new String(buffer, 0, len)); - len = in.read(buffer); - } + ByteArrayOutputStream out = new ByteArrayOutputStream(); + IOUtils.copyBytes(in, out, 1024); return out.toString(); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java index e5392396d5f..9b6e4119609 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java @@ -35,6 +35,7 @@ import javax.servlet.http.HttpServletRequest; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.test.GenericTestUtils; import org.junit.Test; @@ -125,7 +126,8 @@ public class TestServletFilter extends HttpServerFunctionalTest { } //access the urls as the sequence - final String prefix = "http://localhost:" + http.getPort(); + final String prefix = "http://" + + NetUtils.getHostPortString(http.getConnectorAddress(0)); try { for(int i = 0; i < sequence.length; i++) { access(prefix + urls[sequence[i]]); @@ -185,7 +187,7 @@ public class TestServletFilter extends HttpServerFunctionalTest { throws Exception { Configuration conf = new Configuration(); HttpServer http = createTestServer(conf); - http.defineFilter(http.webAppContext, + HttpServer.defineFilter(http.webAppContext, "ErrorFilter", ErrorFilter.class.getName(), null, null); try { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java index c5a0d0bc04c..c9b2a09f513 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java @@ -21,8 +21,10 @@ import java.io.*; import java.net.*; import org.apache.hadoop.http.HttpServer; +import org.apache.hadoop.net.NetUtils; import junit.framework.TestCase; + import org.apache.commons.logging.*; import org.apache.commons.logging.impl.*; import org.apache.log4j.*; @@ -43,15 +45,16 @@ public class TestLogLevel extends TestCase { assertTrue(!Level.ERROR.equals(log.getEffectiveLevel())); HttpServer server = new HttpServer.Builder().setName("..") - .setBindAddress("localhost").setPort(22222).setFindPort(true) + .addEndpoint(new URI("http://localhost:0")).setFindPort(true) .build(); server.start(); - int port = server.getPort(); + String authority = NetUtils.getHostPortString(server + .getConnectorAddress(0)); //servlet - URL url = new URL("http://localhost:" + port - + "/logLevel?log=" + logName + "&level=" + Level.ERROR); + URL url = new URL("http://" + authority + "/logLevel?log=" + logName + + "&level=" + Level.ERROR); out.println("*** Connecting to " + url); URLConnection connection = url.openConnection(); connection.connect(); @@ -67,7 +70,7 @@ public class TestLogLevel extends TestCase { assertTrue(Level.ERROR.equals(log.getEffectiveLevel())); //command line - String[] args = {"-setlevel", "localhost:"+port, logName,""+Level.DEBUG}; + String[] args = {"-setlevel", authority, logName, Level.DEBUG.toString()}; LogLevel.main(args); log.debug("log.debug3"); log.info("log.info3"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 9ed3e805159..fbc455aeee9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -221,6 +221,9 @@ Trunk (Unreleased) HDFS-5556. Add some more NameNode cache statistics, cache pool stats (cmccabe) + HDFS-5545. Allow specifying endpoints for listeners in HttpServer. (Haohui + Mai via jing9) + OPTIMIZATIONS HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index f43969360f9..4ebc2bb0c00 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -75,6 +75,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; +import org.apache.hadoop.http.HttpServer; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; @@ -1410,4 +1411,19 @@ public class DFSUtil { return (value == null || value.isEmpty()) ? defaultKey : DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY; } + + public static HttpServer.Builder loadSslConfToHttpServerBuilder( + HttpServer.Builder builder, Configuration sslConf) { + return builder + .needsClientAuth( + sslConf.getBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY, + DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT)) + .keyPassword(sslConf.get("ssl.server.keystore.keypassword")) + .keyStore(sslConf.get("ssl.server.keystore.location"), + sslConf.get("ssl.server.keystore.password"), + sslConf.get("ssl.server.keystore.type", "jks")) + .trustStore(sslConf.get("ssl.server.truststore.location"), + sslConf.get("ssl.server.truststore.password"), + sslConf.get("ssl.server.truststore.type", "jks")); + } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java index 6c26dd75fc0..b5537120d26 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java @@ -23,6 +23,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_INTERNAL_SPNE import java.io.IOException; import java.net.InetSocketAddress; +import java.net.URI; +import java.net.URISyntaxException; import javax.servlet.ServletContext; @@ -69,8 +71,15 @@ public class JournalNodeHttpServer { bindAddr.getHostName())); int tmpInfoPort = bindAddr.getPort(); + URI httpEndpoint; + try { + httpEndpoint = new URI("http://" + NetUtils.getHostPortString(bindAddr)); + } catch (URISyntaxException e) { + throw new IOException(e); + } + httpServer = new HttpServer.Builder().setName("journal") - .setBindAddress(bindAddr.getHostName()).setPort(tmpInfoPort) + .addEndpoint(httpEndpoint) .setFindPort(tmpInfoPort == 0).setConf(conf).setACL( new AccessControlList(conf.get(DFS_ADMIN, " "))) .setSecurityEnabled(UserGroupInformation.isSecurityEnabled()) @@ -85,7 +94,7 @@ public class JournalNodeHttpServer { httpServer.start(); // The web-server port can be ephemeral... ensure we have the correct info - infoPort = httpServer.getPort(); + infoPort = httpServer.getConnectorAddress(0).getPort(); LOG.info("Journal Web-server up at: " + bindAddr + ":" + infoPort); } @@ -104,7 +113,7 @@ public class JournalNodeHttpServer { * Return the actual address bound to by the running server. */ public InetSocketAddress getAddress() { - InetSocketAddress addr = httpServer.getListenerAddress(); + InetSocketAddress addr = httpServer.getConnectorAddress(0); assert addr.getPort() != 0; return addr; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 49a1995f482..d146b51b833 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -22,6 +22,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.protobuf.BlockingService; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -181,6 +182,7 @@ public class DataNode extends Configured private volatile boolean heartbeatsDisabledForTests = false; private DataStorage storage = null; private HttpServer infoServer = null; + private int infoPort; private int infoSecurePort; DataNodeMetrics metrics; private InetSocketAddress streamingAddr; @@ -310,27 +312,33 @@ public class DataNode extends Configured String infoHost = infoSocAddr.getHostName(); int tmpInfoPort = infoSocAddr.getPort(); HttpServer.Builder builder = new HttpServer.Builder().setName("datanode") - .setBindAddress(infoHost).setPort(tmpInfoPort) + .addEndpoint(URI.create("http://" + NetUtils.getHostPortString(infoSocAddr))) .setFindPort(tmpInfoPort == 0).setConf(conf) .setACL(new AccessControlList(conf.get(DFS_ADMIN, " "))); - this.infoServer = (secureResources == null) ? builder.build() : - builder.setConnector(secureResources.getListener()).build(); LOG.info("Opened info server at " + infoHost + ":" + tmpInfoPort); if (conf.getBoolean(DFS_HTTPS_ENABLE_KEY, false)) { - boolean needClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, - DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT); InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get( DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 0)); - Configuration sslConf = new HdfsConfiguration(false); - sslConf.addResource(conf.get(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY, - "ssl-server.xml")); - this.infoServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth); + builder.addEndpoint(URI.create("https://" + + NetUtils.getHostPortString(secInfoSocAddr))); + Configuration sslConf = new Configuration(false); + sslConf.setBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY, conf + .getBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY, + DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT)); + sslConf.addResource(conf.get( + DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY, + DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT)); + DFSUtil.loadSslConfToHttpServerBuilder(builder, sslConf); + if(LOG.isDebugEnabled()) { LOG.debug("Datanode listening for SSL on " + secInfoSocAddr); } infoSecurePort = secInfoSocAddr.getPort(); } + + this.infoServer = (secureResources == null) ? builder.build() : + builder.setConnector(secureResources.getListener()).build(); this.infoServer.addInternalServlet(null, "/streamFile/*", StreamFile.class); this.infoServer.addInternalServlet(null, "/getFileChecksum/*", FileChecksumServlets.GetServlet.class); @@ -346,6 +354,7 @@ public class DataNode extends Configured WebHdfsFileSystem.PATH_PREFIX + "/*"); } this.infoServer.start(); + this.infoPort = infoServer.getConnectorAddress(0).getPort(); } private void startPlugins(Configuration conf) { @@ -2276,7 +2285,7 @@ public class DataNode extends Configured * @return the datanode's http port */ public int getInfoPort() { - return infoServer.getPort(); + return infoPort; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 9261a4a62f3..ea07b065926 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -41,6 +41,7 @@ import org.apache.hadoop.ha.HealthCheckFailedException; import org.apache.hadoop.ha.ServiceFailedException; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Trash; + import static org.apache.hadoop.hdfs.DFSConfigKeys.*; import static org.apache.hadoop.util.ExitUtil.terminate; import static org.apache.hadoop.util.ToolRunner.confirmPrompt; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java index 07762ed4d70..b2072a648ad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java @@ -21,6 +21,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN; import java.io.IOException; import java.net.InetSocketAddress; +import java.net.URI; import java.util.HashMap; import java.util.Map; @@ -69,25 +70,45 @@ public class NameNodeHttpServer { this.bindAddress = bindAddress; } - public void start() throws IOException { + void start() throws IOException { final String infoHost = bindAddress.getHostName(); int infoPort = bindAddress.getPort(); - httpServer = new HttpServer.Builder().setName("hdfs") - .setBindAddress(infoHost).setPort(infoPort) + HttpServer.Builder builder = new HttpServer.Builder().setName("hdfs") + .addEndpoint(URI.create(("http://" + NetUtils.getHostPortString(bindAddress)))) .setFindPort(infoPort == 0).setConf(conf).setACL( new AccessControlList(conf.get(DFS_ADMIN, " "))) .setSecurityEnabled(UserGroupInformation.isSecurityEnabled()) .setUsernameConfKey( DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY) .setKeytabConfKey(DFSUtil.getSpnegoKeytabKey(conf, - DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY)).build(); + DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY)); + + boolean certSSL = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, false); + if (certSSL) { + httpsAddress = NetUtils.createSocketAddr(conf.get( + DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, + DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT)); + + builder.addEndpoint(URI.create("https://" + + NetUtils.getHostPortString(httpsAddress))); + Configuration sslConf = new Configuration(false); + sslConf.setBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY, conf + .getBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY, + DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT)); + sslConf.addResource(conf.get( + DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY, + DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT)); + DFSUtil.loadSslConfToHttpServerBuilder(builder, sslConf); + } + + httpServer = builder.build(); if (WebHdfsFileSystem.isEnabled(conf, HttpServer.LOG)) { //add SPNEGO authentication filter for webhdfs final String name = "SPNEGO"; final String classname = AuthFilter.class.getName(); final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*"; Map params = getAuthFilterParams(conf); - httpServer.defineFilter(httpServer.getWebAppContext(), name, classname, params, + HttpServer.defineFilter(httpServer.getWebAppContext(), name, classname, params, new String[]{pathSpec}); HttpServer.LOG.info("Added filter '" + name + "' (class=" + classname + ")"); @@ -97,34 +118,19 @@ public class NameNodeHttpServer { + ";" + Param.class.getPackage().getName(), pathSpec); } - boolean certSSL = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, false); + httpServer.setAttribute(NAMENODE_ATTRIBUTE_KEY, nn); + httpServer.setAttribute(JspHelper.CURRENT_CONF, conf); + setupServlets(httpServer, conf); + httpServer.start(); + httpAddress = httpServer.getConnectorAddress(0); if (certSSL) { - boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false); - httpsAddress = NetUtils.createSocketAddr(conf.get( - DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, - DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT)); - - Configuration sslConf = new Configuration(false); - sslConf.addResource(conf.get( - DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY, - DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT)); - httpServer.addSslListener(httpsAddress, sslConf, needClientAuth); + httpsAddress = httpServer.getConnectorAddress(1); // assume same ssl port for all datanodes InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.get( DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 50475)); httpServer.setAttribute(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY, datanodeSslPort .getPort()); } - httpServer.setAttribute(NAMENODE_ATTRIBUTE_KEY, nn); - httpServer.setAttribute(JspHelper.CURRENT_CONF, conf); - setupServlets(httpServer, conf); - httpServer.start(); - httpAddress = new InetSocketAddress(bindAddress.getAddress(), - httpServer.getPort()); - if (certSSL) { - httpsAddress = new InetSocketAddress(bindAddress.getAddress(), - httpServer.getConnectorPort(1)); - } } private Map getAuthFilterParams(Configuration conf) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index cfd3ffe1b29..022d6e21ac2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -30,6 +30,7 @@ import java.io.FilenameFilter; import java.io.IOException; import java.net.InetSocketAddress; import java.net.URI; +import java.net.URISyntaxException; import java.security.PrivilegedAction; import java.security.PrivilegedExceptionAction; import java.util.Collection; @@ -214,7 +215,7 @@ public class SecondaryNameNode implements Runnable { /** * Initialize SecondaryNameNode. - * @param commandLineOpts + * @param commandLineOpts */ private void initialize(final Configuration conf, CommandLineOpts commandLineOpts) throws IOException { @@ -256,8 +257,15 @@ public class SecondaryNameNode implements Runnable { // initialize the webserver for uploading files. int tmpInfoPort = infoSocAddr.getPort(); + URI httpEndpoint; + try { + httpEndpoint = new URI("http://" + NetUtils.getHostPortString(infoSocAddr)); + } catch (URISyntaxException e) { + throw new IOException(e); + } + infoServer = new HttpServer.Builder().setName("secondary") - .setBindAddress(infoBindAddress).setPort(tmpInfoPort) + .addEndpoint(httpEndpoint) .setFindPort(tmpInfoPort == 0).setConf(conf).setACL( new AccessControlList(conf.get(DFS_ADMIN, " "))) .setSecurityEnabled(UserGroupInformation.isSecurityEnabled()) @@ -275,7 +283,7 @@ public class SecondaryNameNode implements Runnable { LOG.info("Web server init done"); // The web-server port can be ephemeral... ensure we have the correct info - infoPort = infoServer.getPort(); + infoPort = infoServer.getConnectorAddress(0).getPort(); conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, infoBindAddress + ":" + infoPort); LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" + infoPort); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java index fd5bbf138e7..2a62bde29ca 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java @@ -278,13 +278,15 @@ public class TestJobEndNotifier extends JobEndNotifier { new File(System.getProperty( "build.webapps", "build/webapps") + "/test").mkdirs(); HttpServer server = new HttpServer.Builder().setName("test") - .setBindAddress("0.0.0.0").setPort(0).setFindPort(true).build(); + .addEndpoint(URI.create("http://localhost:0")) + .setFindPort(true).build(); server.addServlet("jobend", "/jobend", JobEndServlet.class); server.start(); JobEndServlet.calledTimes = 0; JobEndServlet.requestUri = null; - JobEndServlet.baseUrl = "http://localhost:" + server.getPort() + "/"; + JobEndServlet.baseUrl = "http://localhost:" + + server.getConnectorAddress(0).getPort() + "/"; JobEndServlet.foundJobState = null; return server; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java index 9e7ffc18003..16b0e10e833 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java @@ -103,12 +103,13 @@ public class TestJobEndNotifier extends TestCase { new File(System.getProperty("build.webapps", "build/webapps") + "/test" ).mkdirs(); server = new HttpServer.Builder().setName("test") - .setBindAddress("0.0.0.0").setPort(0).setFindPort(true).build(); + .addEndpoint(URI.create("http://localhost:0")) + .setFindPort(true).build(); server.addServlet("delay", "/delay", DelayServlet.class); server.addServlet("jobend", "/jobend", JobEndServlet.class); server.addServlet("fail", "/fail", FailServlet.class); server.start(); - int port = server.getPort(); + int port = server.getConnectorAddress(0).getPort(); baseUrl = new URL("http://localhost:" + port + "/"); JobEndServlet.calledTimes = 0; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java index 4d3dd63e299..90323ee089d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java @@ -83,11 +83,13 @@ public abstract class WebApp extends ServletModule { * @return InetSocketAddress */ public InetSocketAddress getListenerAddress() { - return checkNotNull(httpServer, "httpServer").getListenerAddress(); + return checkNotNull(httpServer, "httpServer").getConnectorAddress(0); } public int port() { - return checkNotNull(httpServer, "httpServer").getPort(); + InetSocketAddress addr = checkNotNull(httpServer, "httpServer") + .getConnectorAddress(0); + return addr == null ? -1 : addr.getPort(); } public void stop() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java index d2ce2f22bbc..7f70d98ded8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java @@ -22,6 +22,7 @@ import static com.google.common.base.Preconditions.checkNotNull; import java.io.IOException; import java.net.ConnectException; +import java.net.URI; import java.net.URL; import java.util.ArrayList; import java.util.HashMap; @@ -36,7 +37,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.HttpServer; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.security.AdminACLsManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -216,46 +216,34 @@ public class WebApps { System.exit(1); } } - HttpServer server = - new HttpServer(name, bindAddress, port, findPort, conf, - new AdminACLsManager(conf).getAdminAcl(), null, - pathList.toArray(new String[0])) { + HttpServer.Builder builder = new HttpServer.Builder().setName(name) + .addEndpoint(URI.create("http://" + bindAddress + ":" + port)) + .setConf(conf).setFindPort(findPort) + .setACL(new AdminACLsManager(conf).getAdminAcl()) + .setPathSpec(pathList.toArray(new String[0])); + + boolean hasSpnegoConf = spnegoPrincipalKey != null + && spnegoKeytabKey != null; + if (hasSpnegoConf) { + builder.setUsernameConfKey(conf.get(spnegoPrincipalKey)) + .setKeytabConfKey(conf.get(spnegoKeytabKey)) + .setSecurityEnabled(UserGroupInformation.isSecurityEnabled()); + } + HttpServer server = builder.build(); - { - if (UserGroupInformation.isSecurityEnabled()) { - boolean initSpnego = true; - if (spnegoPrincipalKey == null - || conf.get(spnegoPrincipalKey, "").isEmpty()) { - LOG.warn("Principal for spnego filter is not set"); - initSpnego = false; - } - if (spnegoKeytabKey == null - || conf.get(spnegoKeytabKey, "").isEmpty()) { - LOG.warn("Keytab for spnego filter is not set"); - initSpnego = false; - } - if (initSpnego) { - LOG.info("Initializing spnego filter with principal key : " - + spnegoPrincipalKey + " keytab key : " - + spnegoKeytabKey); - initSpnego(conf, spnegoPrincipalKey, spnegoKeytabKey); - } - } - } - }; for(ServletStruct struct: servlets) { server.addServlet(struct.name, struct.spec, struct.clazz); } for(Map.Entry entry : attributes.entrySet()) { server.setAttribute(entry.getKey(), entry.getValue()); } - server.defineFilter(server.getWebAppContext(), "guice", + HttpServer.defineFilter(server.getWebAppContext(), "guice", GuiceFilter.class.getName(), null, new String[] { "/*" }); webapp.setConf(conf); webapp.setHttpServer(server); server.start(); - LOG.info("Web app /"+ name +" started at "+ server.getPort()); + LOG.info("Web app /"+ name +" started at "+ server.getConnectorAddress(0).getPort()); } catch (ClassNotFoundException e) { throw new WebAppException("Error starting http server", e); } catch (IOException e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java index 3d8acf29523..6eaeb2b0c13 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java @@ -33,17 +33,6 @@ import static org.junit.Assert.assertTrue; import java.io.InputStream; import java.net.HttpURLConnection; import java.net.URL; -import java.util.Arrays; -import java.util.HashSet; -import java.util.Set; - -import javax.ws.rs.GET; -import javax.ws.rs.Path; -import javax.ws.rs.Produces; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.ext.ContextResolver; -import javax.ws.rs.ext.Provider; -import javax.xml.bind.JAXBContext; import org.apache.commons.lang.ArrayUtils; import org.apache.hadoop.yarn.MockApps; @@ -55,9 +44,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.inject.Inject; -import com.google.inject.Singleton; -import com.sun.jersey.api.json.JSONConfiguration; -import com.sun.jersey.api.json.JSONJAXBContext; public class TestWebApp { static final Logger LOG = LoggerFactory.getLogger(TestWebApp.class); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java index b155d9def6d..2f78ec4cb27 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java @@ -69,7 +69,7 @@ public class WebServer extends AbstractService { .withHttpSpnegoKeytabKey( YarnConfiguration.NM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY) .start(this.nmWebApp); - this.port = this.webApp.httpServer().getPort(); + this.port = this.webApp.httpServer().getConnectorAddress(0).getPort(); } catch (Exception e) { String msg = "NMWebapps failed to start."; LOG.error(msg, e); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java index 919eb8fd5c7..d66571e762c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.webproxy; import java.io.IOException; +import java.net.URI; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -30,7 +31,6 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; - import org.apache.hadoop.fs.CommonConfigurationKeys; public class WebAppProxy extends AbstractService { @@ -89,7 +89,8 @@ public class WebAppProxy extends AbstractService { protected void serviceStart() throws Exception { try { proxyServer = new HttpServer.Builder().setName("proxy") - .setBindAddress(bindAddress).setPort(port).setFindPort(port == 0) + .addEndpoint(URI.create("http://" + bindAddress + ":" + port)) + .setFindPort(port == 0) .setConf(getConfig()).setACL(acl).build(); proxyServer.addServlet(ProxyUriUtils.PROXY_SERVLET_NAME, ProxyUriUtils.PROXY_PATH_SPEC, WebAppProxyServlet.class); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java index 7d07b6b0f7e..47f4e09d75a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java @@ -29,6 +29,7 @@ import java.io.InputStream; import java.io.OutputStream; import java.net.HttpCookie; import java.net.HttpURLConnection; +import java.net.URI; import java.net.URL; import java.util.List; import java.util.Map; @@ -126,7 +127,7 @@ public class TestWebAppProxyServlet { proxy.init(configuration); proxy.start(); - int proxyPort = proxy.proxy.proxyServer.getPort(); + int proxyPort = proxy.proxy.proxyServer.getConnectorAddress(0).getPort(); AppReportFetcherForTest appReportFetcher = proxy.proxy.appReportFetcher; // wrong url @@ -285,8 +286,7 @@ public class TestWebAppProxyServlet { YarnConfiguration.DEFAULT_YARN_ADMIN_ACL)); proxyServer = new HttpServer.Builder() .setName("proxy") - .setBindAddress(bindAddress) - .setPort(0) + .addEndpoint(URI.create("http://" + bindAddress + ":0")) .setFindPort(true) .setConf(conf) .setACL(acl) @@ -306,7 +306,7 @@ public class TestWebAppProxyServlet { proxyServer.setAttribute(PROXY_HOST_ATTRIBUTE, proxyHost); proxyServer.start(); System.out.println("Proxy server is started at port " + - proxyServer.getPort()); + proxyServer.getConnectorAddress(0).getPort()); } catch (Exception e) { LOG.fatal("Could not start proxy web server", e); throw new YarnRuntimeException("Could not start proxy web server", e); From 8df119da214babde03e73243c7ca4cfe6d0ca562 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Wed, 27 Nov 2013 20:32:16 +0000 Subject: [PATCH 15/27] HDFS-5537. Remove FileWithSnapshot interface. Contributed by jing9 git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1546184 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../hdfs/server/namenode/FSDirectory.java | 2 +- .../hdfs/server/namenode/FSImageFormat.java | 2 +- .../FileUnderConstructionFeature.java | 2 +- .../hadoop/hdfs/server/namenode/INode.java | 5 +- .../hdfs/server/namenode/INodeFile.java | 46 ++-- .../hdfs/server/namenode/INodeReference.java | 18 +- .../server/namenode/snapshot/FileDiff.java | 92 +++++++ .../namenode/snapshot/FileDiffList.java | 35 +++ .../namenode/snapshot/FileWithSnapshot.java | 227 ------------------ .../snapshot/INodeDirectorySnapshottable.java | 6 +- .../snapshot/INodeDirectoryWithSnapshot.java | 8 +- .../snapshot/INodeFileWithSnapshot.java | 117 ++++++++- .../snapshot/SnapshotFSImageFormat.java | 6 +- .../snapshot/TestRenameWithSnapshots.java | 1 - 15 files changed, 280 insertions(+), 289 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshot.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index fbc455aeee9..c3cb8a74f4d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -224,6 +224,8 @@ Trunk (Unreleased) HDFS-5545. Allow specifying endpoints for listeners in HttpServer. (Haohui Mai via jing9) + HDFS-5537. Remove FileWithSnapshot interface. (jing9 via szetszwo) + OPTIMIZATIONS HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 07e2cdc02fd..8d0cf83be12 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -455,8 +455,8 @@ public class FSDirectory implements Closeable { boolean unprotectedRemoveBlock(String path, INodeFile fileNode, Block block) throws IOException { - Preconditions.checkArgument(fileNode.isUnderConstruction()); // modify file-> block and blocksMap + // fileNode should be under construction boolean removed = fileNode.removeLastBlock(block); if (!removed) { return false; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 5ce0e3f0269..62cd807a4cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -52,7 +52,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; -import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList; +import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java index a6947d7fe4f..0a7371dc485 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; /** - * I-node for file being written. + * Feature for under-construction file. */ @InterfaceAudience.Private public class FileUnderConstructionFeature extends INodeFile.Feature { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index b16a719eacf..1ba48a012e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference; import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName; -import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.util.ChunkedArrayList; @@ -315,7 +314,7 @@ public abstract class INode implements INodeAttributes, Diff.Element { * 1.2.2 Else do nothing with the current INode. Recursively clean its * children. * - * 1.3 The current inode is a {@link FileWithSnapshot}. + * 1.3 The current inode is a file with snapshot. * Call recordModification(..) to capture the current states. * Mark the INode as deleted. * @@ -328,7 +327,7 @@ public abstract class INode implements INodeAttributes, Diff.Element { * 2. When deleting a snapshot. * 2.1 To clean {@link INodeFile}: do nothing. * 2.2 To clean {@link INodeDirectory}: recursively clean its children. - * 2.3 To clean {@link FileWithSnapshot}: delete the corresponding snapshot in + * 2.3 To clean INodeFile with snapshot: delete the corresponding snapshot in * its diff list. * 2.4 To clean {@link INodeDirectoryWithSnapshot}: delete the corresponding * snapshot in its diff list. Recursively clean its children. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 5fc2095dad2..55b29ba06bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -32,10 +32,8 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; -import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot; -import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff; -import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList; -import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.Util; +import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff; +import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; @@ -188,7 +186,7 @@ public class INodeFile extends INodeWithAdditionalFields INodeFile toUnderConstruction(String clientName, String clientMachine, DatanodeDescriptor clientNode) { Preconditions.checkState(!isUnderConstruction(), - "file is already an INodeFileUnderConstruction"); + "file is already under construction"); FileUnderConstructionFeature uc = new FileUnderConstructionFeature( clientName, clientMachine, clientNode); addFeature(uc); @@ -200,6 +198,8 @@ public class INodeFile extends INodeWithAdditionalFields * feature. */ public INodeFile toCompleteFile(long mtime) { + Preconditions.checkState(isUnderConstruction(), + "file is no longer under construction"); FileUnderConstructionFeature uc = getFileUnderConstructionFeature(); if (uc != null) { assertAllBlocksComplete(); @@ -221,15 +221,16 @@ public class INodeFile extends INodeWithAdditionalFields } } - @Override //BlockCollection + @Override // BlockCollection public void setBlock(int index, BlockInfo blk) { this.blocks[index] = blk; } - @Override // BlockCollection + @Override // BlockCollection, the file should be under construction public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock, DatanodeDescriptor[] locations) throws IOException { - Preconditions.checkState(isUnderConstruction()); + Preconditions.checkState(isUnderConstruction(), + "file is no longer under construction"); if (numBlocks() == 0) { throw new IOException("Failed to set last block: File is empty."); @@ -247,6 +248,8 @@ public class INodeFile extends INodeWithAdditionalFields * the last one on the list. */ boolean removeLastBlock(Block oldblock) { + Preconditions.checkState(isUnderConstruction(), + "file is no longer under construction"); if (blocks == null || blocks.length == 0) { return false; } @@ -298,10 +301,8 @@ public class INodeFile extends INodeWithAdditionalFields } @Override - public final short getBlockReplication() { - return this instanceof FileWithSnapshot? - Util.getBlockReplication((FileWithSnapshot)this) - : getFileReplication(null); + public short getBlockReplication() { + return getFileReplication(null); } /** Set the replication factor of this file. */ @@ -421,8 +422,8 @@ public class INodeFile extends INodeWithAdditionalFields clear(); removedINodes.add(this); - if (this instanceof FileWithSnapshot) { - ((FileWithSnapshot) this).getDiffs().clear(); + if (this instanceof INodeFileWithSnapshot) { + ((INodeFileWithSnapshot) this).getDiffs().clear(); } } @@ -437,8 +438,8 @@ public class INodeFile extends INodeWithAdditionalFields boolean useCache, int lastSnapshotId) { long nsDelta = 1; final long dsDelta; - if (this instanceof FileWithSnapshot) { - FileDiffList fileDiffList = ((FileWithSnapshot) this).getDiffs(); + if (this instanceof INodeFileWithSnapshot) { + FileDiffList fileDiffList = ((INodeFileWithSnapshot) this).getDiffs(); Snapshot last = fileDiffList.getLastSnapshot(); List diffs = fileDiffList.asList(); @@ -470,8 +471,8 @@ public class INodeFile extends INodeWithAdditionalFields private void computeContentSummary4Snapshot(final Content.Counts counts) { // file length and diskspace only counted for the latest state of the file // i.e. either the current state or the last snapshot - if (this instanceof FileWithSnapshot) { - final FileWithSnapshot withSnapshot = (FileWithSnapshot)this; + if (this instanceof INodeFileWithSnapshot) { + final INodeFileWithSnapshot withSnapshot = (INodeFileWithSnapshot) this; final FileDiffList diffs = withSnapshot.getDiffs(); final int n = diffs.asList().size(); counts.add(Content.FILE, n); @@ -487,8 +488,8 @@ public class INodeFile extends INodeWithAdditionalFields } private void computeContentSummary4Current(final Content.Counts counts) { - if (this instanceof FileWithSnapshot - && ((FileWithSnapshot)this).isCurrentFileDeleted()) { + if (this instanceof INodeFileWithSnapshot + && ((INodeFileWithSnapshot) this).isCurrentFileDeleted()) { return; } @@ -507,8 +508,9 @@ public class INodeFile extends INodeWithAdditionalFields * otherwise, get the file size from the given snapshot. */ public final long computeFileSize(Snapshot snapshot) { - if (snapshot != null && this instanceof FileWithSnapshot) { - final FileDiff d = ((FileWithSnapshot)this).getDiffs().getDiff(snapshot); + if (snapshot != null && this instanceof INodeFileWithSnapshot) { + final FileDiff d = ((INodeFileWithSnapshot) this).getDiffs().getDiff( + snapshot); if (d != null) { return d.getFileSize(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java index f77863ada85..24707a6c011 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java @@ -26,8 +26,8 @@ import java.util.List; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; -import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot; +import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import com.google.common.base.Preconditions; @@ -102,8 +102,8 @@ public abstract class INodeReference extends INode { } if (wn != null) { INode referred = wc.getReferredINode(); - if (referred instanceof FileWithSnapshot) { - return ((FileWithSnapshot) referred).getDiffs().getPrior( + if (referred instanceof INodeFileWithSnapshot) { + return ((INodeFileWithSnapshot) referred).getDiffs().getPrior( wn.lastSnapshotId); } else if (referred instanceof INodeDirectoryWithSnapshot) { return ((INodeDirectoryWithSnapshot) referred).getDiffs().getPrior( @@ -547,8 +547,8 @@ public abstract class INodeReference extends INode { private Snapshot getSelfSnapshot() { INode referred = getReferredINode().asReference().getReferredINode(); Snapshot snapshot = null; - if (referred instanceof FileWithSnapshot) { - snapshot = ((FileWithSnapshot) referred).getDiffs().getPrior( + if (referred instanceof INodeFileWithSnapshot) { + snapshot = ((INodeFileWithSnapshot) referred).getDiffs().getPrior( lastSnapshotId); } else if (referred instanceof INodeDirectoryWithSnapshot) { snapshot = ((INodeDirectoryWithSnapshot) referred).getDiffs().getPrior( @@ -637,10 +637,10 @@ public abstract class INodeReference extends INode { Snapshot snapshot = getSelfSnapshot(prior); INode referred = getReferredINode().asReference().getReferredINode(); - if (referred instanceof FileWithSnapshot) { + if (referred instanceof INodeFileWithSnapshot) { // if referred is a file, it must be a FileWithSnapshot since we did // recordModification before the rename - FileWithSnapshot sfile = (FileWithSnapshot) referred; + INodeFileWithSnapshot sfile = (INodeFileWithSnapshot) referred; // make sure we mark the file as deleted sfile.deleteCurrentFile(); try { @@ -671,8 +671,8 @@ public abstract class INodeReference extends INode { WithCount wc = (WithCount) getReferredINode().asReference(); INode referred = wc.getReferredINode(); Snapshot lastSnapshot = null; - if (referred instanceof FileWithSnapshot) { - lastSnapshot = ((FileWithSnapshot) referred).getDiffs() + if (referred instanceof INodeFileWithSnapshot) { + lastSnapshot = ((INodeFileWithSnapshot) referred).getDiffs() .getLastSnapshot(); } else if (referred instanceof INodeDirectoryWithSnapshot) { lastSnapshot = ((INodeDirectoryWithSnapshot) referred) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java new file mode 100644 index 00000000000..a5b7bcf2aa5 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java @@ -0,0 +1,92 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode.snapshot; + +import java.io.DataOutput; +import java.io.IOException; +import java.util.List; + +import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization; +import org.apache.hadoop.hdfs.server.namenode.INode; +import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; +import org.apache.hadoop.hdfs.server.namenode.INodeFile; +import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes; +import org.apache.hadoop.hdfs.server.namenode.Quota; +import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap; + +/** + * The difference of an {@link INodeFile} between two snapshots. + */ +public class FileDiff extends + AbstractINodeDiff { + + /** The file size at snapshot creation time. */ + private final long fileSize; + + FileDiff(Snapshot snapshot, INodeFile file) { + super(snapshot, null, null); + fileSize = file.computeFileSize(); + } + + /** Constructor used by FSImage loading */ + FileDiff(Snapshot snapshot, INodeFileAttributes snapshotINode, + FileDiff posteriorDiff, long fileSize) { + super(snapshot, snapshotINode, posteriorDiff); + this.fileSize = fileSize; + } + + /** @return the file size in the snapshot. */ + public long getFileSize() { + return fileSize; + } + + @Override + Quota.Counts combinePosteriorAndCollectBlocks( + INodeFileWithSnapshot currentINode, FileDiff posterior, + BlocksMapUpdateInfo collectedBlocks, final List removedINodes) { + return currentINode.updateQuotaAndCollectBlocks(posterior, collectedBlocks, + removedINodes); + } + + @Override + public String toString() { + return super.toString() + " fileSize=" + fileSize + ", rep=" + + (snapshotINode == null? "?": snapshotINode.getFileReplication()); + } + + @Override + void write(DataOutput out, ReferenceMap referenceMap) throws IOException { + writeSnapshot(out); + out.writeLong(fileSize); + + // write snapshotINode + if (snapshotINode != null) { + out.writeBoolean(true); + FSImageSerialization.writeINodeFileAttributes(snapshotINode, out); + } else { + out.writeBoolean(false); + } + } + + @Override + Quota.Counts destroyDiffAndCollectBlocks(INodeFileWithSnapshot currentINode, + BlocksMapUpdateInfo collectedBlocks, final List removedINodes) { + return currentINode.updateQuotaAndCollectBlocks(this, collectedBlocks, + removedINodes); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java new file mode 100644 index 00000000000..8a166e69b8a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode.snapshot; + +import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes; + +/** A list of FileDiffs for storing snapshot data. */ +public class FileDiffList extends + AbstractINodeDiffList { + + @Override + FileDiff createDiff(Snapshot snapshot, INodeFileWithSnapshot file) { + return new FileDiff(snapshot, file); + } + + @Override + INodeFileAttributes createSnapshotCopy(INodeFileWithSnapshot currentINode) { + return new INodeFileAttributes.SnapshotCopy(currentINode); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshot.java deleted file mode 100644 index 31a672f1e70..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshot.java +++ /dev/null @@ -1,227 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.namenode.snapshot; - -import java.io.DataOutput; -import java.io.IOException; -import java.util.List; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; -import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization; -import org.apache.hadoop.hdfs.server.namenode.INode; -import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; -import org.apache.hadoop.hdfs.server.namenode.INodeFile; -import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes; -import org.apache.hadoop.hdfs.server.namenode.Quota; -import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap; - -/** - * An interface for {@link INodeFile} to support snapshot. - */ -@InterfaceAudience.Private -public interface FileWithSnapshot { - /** - * The difference of an {@link INodeFile} between two snapshots. - */ - public static class FileDiff extends AbstractINodeDiff { - /** The file size at snapshot creation time. */ - private final long fileSize; - - private FileDiff(Snapshot snapshot, INodeFile file) { - super(snapshot, null, null); - fileSize = file.computeFileSize(); - } - - /** Constructor used by FSImage loading */ - FileDiff(Snapshot snapshot, INodeFileAttributes snapshotINode, - FileDiff posteriorDiff, long fileSize) { - super(snapshot, snapshotINode, posteriorDiff); - this.fileSize = fileSize; - } - - /** @return the file size in the snapshot. */ - public long getFileSize() { - return fileSize; - } - - private static Quota.Counts updateQuotaAndCollectBlocks( - INodeFile currentINode, FileDiff removed, - BlocksMapUpdateInfo collectedBlocks, final List removedINodes) { - FileWithSnapshot sFile = (FileWithSnapshot) currentINode; - long oldDiskspace = currentINode.diskspaceConsumed(); - if (removed.snapshotINode != null) { - short replication = removed.snapshotINode.getFileReplication(); - short currentRepl = currentINode.getBlockReplication(); - if (currentRepl == 0) { - oldDiskspace = currentINode.computeFileSize(true, true) * replication; - } else if (replication > currentRepl) { - oldDiskspace = oldDiskspace / currentINode.getBlockReplication() - * replication; - } - } - - Util.collectBlocksAndClear(sFile, collectedBlocks, removedINodes); - - long dsDelta = oldDiskspace - currentINode.diskspaceConsumed(); - return Quota.Counts.newInstance(0, dsDelta); - } - - @Override - Quota.Counts combinePosteriorAndCollectBlocks(INodeFile currentINode, - FileDiff posterior, BlocksMapUpdateInfo collectedBlocks, - final List removedINodes) { - return updateQuotaAndCollectBlocks(currentINode, posterior, - collectedBlocks, removedINodes); - } - - @Override - public String toString() { - return super.toString() + " fileSize=" + fileSize + ", rep=" - + (snapshotINode == null? "?": snapshotINode.getFileReplication()); - } - - @Override - void write(DataOutput out, ReferenceMap referenceMap) throws IOException { - writeSnapshot(out); - out.writeLong(fileSize); - - // write snapshotINode - if (snapshotINode != null) { - out.writeBoolean(true); - FSImageSerialization.writeINodeFileAttributes(snapshotINode, out); - } else { - out.writeBoolean(false); - } - } - - @Override - Quota.Counts destroyDiffAndCollectBlocks(INodeFile currentINode, - BlocksMapUpdateInfo collectedBlocks, final List removedINodes) { - return updateQuotaAndCollectBlocks(currentINode, this, - collectedBlocks, removedINodes); - } - } - - /** A list of FileDiffs for storing snapshot data. */ - public static class FileDiffList - extends AbstractINodeDiffList { - - @Override - FileDiff createDiff(Snapshot snapshot, INodeFile file) { - return new FileDiff(snapshot, file); - } - - @Override - INodeFileAttributes createSnapshotCopy(INodeFile currentINode) { - return new INodeFileAttributes.SnapshotCopy(currentINode); - } - } - - /** @return the {@link INodeFile} view of this object. */ - public INodeFile asINodeFile(); - - /** @return the file diff list. */ - public FileDiffList getDiffs(); - - /** Is the current file deleted? */ - public boolean isCurrentFileDeleted(); - - /** Delete the file from the current tree */ - public void deleteCurrentFile(); - - /** Utility methods for the classes which implement the interface. */ - public static class Util { - /** - * @return block replication, which is the max file replication among - * the file and the diff list. - */ - public static short getBlockReplication(final FileWithSnapshot file) { - short max = file.isCurrentFileDeleted()? 0 - : file.asINodeFile().getFileReplication(); - for(FileDiff d : file.getDiffs()) { - if (d.snapshotINode != null) { - final short replication = d.snapshotINode.getFileReplication(); - if (replication > max) { - max = replication; - } - } - } - return max; - } - - /** - * If some blocks at the end of the block list no longer belongs to - * any inode, collect them and update the block list. - */ - static void collectBlocksAndClear(final FileWithSnapshot file, - final BlocksMapUpdateInfo info, final List removedINodes) { - // check if everything is deleted. - if (file.isCurrentFileDeleted() - && file.getDiffs().asList().isEmpty()) { - file.asINodeFile().destroyAndCollectBlocks(info, removedINodes); - return; - } - - // find max file size. - final long max; - if (file.isCurrentFileDeleted()) { - final FileDiff last = file.getDiffs().getLast(); - max = last == null? 0: last.fileSize; - } else { - max = file.asINodeFile().computeFileSize(); - } - - collectBlocksBeyondMax(file, max, info); - } - - private static void collectBlocksBeyondMax(final FileWithSnapshot file, - final long max, final BlocksMapUpdateInfo collectedBlocks) { - final BlockInfo[] oldBlocks = file.asINodeFile().getBlocks(); - if (oldBlocks != null) { - //find the minimum n such that the size of the first n blocks > max - int n = 0; - for(long size = 0; n < oldBlocks.length && max > size; n++) { - size += oldBlocks[n].getNumBytes(); - } - - // starting from block n, the data is beyond max. - if (n < oldBlocks.length) { - // resize the array. - final BlockInfo[] newBlocks; - if (n == 0) { - newBlocks = null; - } else { - newBlocks = new BlockInfo[n]; - System.arraycopy(oldBlocks, 0, newBlocks, 0, n); - } - - // set new blocks - file.asINodeFile().setBlocks(newBlocks); - - // collect the blocks beyond max. - if (collectedBlocks != null) { - for(; n < oldBlocks.length; n++) { - collectedBlocks.addDeleteBlock(oldBlocks[n]); - } - } - } - } - } - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java index 1e2c5dd3db2..e31e5796e88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java @@ -432,8 +432,8 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithSnapshot { parentPath.remove(parentPath.size() - 1); } } - } else if (node.isFile() && node.asFile() instanceof FileWithSnapshot) { - FileWithSnapshot file = (FileWithSnapshot) node.asFile(); + } else if (node.isFile() && node.asFile() instanceof INodeFileWithSnapshot) { + INodeFileWithSnapshot file = (INodeFileWithSnapshot) node.asFile(); Snapshot earlierSnapshot = diffReport.isFromEarlier() ? diffReport.from : diffReport.to; Snapshot laterSnapshot = diffReport.isFromEarlier() ? diffReport.to @@ -441,7 +441,7 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithSnapshot { boolean change = file.getDiffs().changedBetweenSnapshots(earlierSnapshot, laterSnapshot); if (change) { - diffReport.addFileDiff(file.asINodeFile(), relativePath); + diffReport.addFileDiff(file, relativePath); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java index 5fcd65d875e..33261a23296 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java @@ -804,10 +804,10 @@ public class INodeDirectoryWithSnapshot extends INodeDirectory { // For DstReference node, since the node is not in the created list of // prior, we should treat it as regular file/dir } else if (topNode.isFile() - && topNode.asFile() instanceof FileWithSnapshot) { - FileWithSnapshot fs = (FileWithSnapshot) topNode.asFile(); - counts.add(fs.getDiffs().deleteSnapshotDiff(post, prior, - topNode.asFile(), collectedBlocks, removedINodes, countDiffChange)); + && topNode.asFile() instanceof INodeFileWithSnapshot) { + INodeFileWithSnapshot fs = (INodeFileWithSnapshot) topNode.asFile(); + counts.add(fs.getDiffs().deleteSnapshotDiff(post, prior, fs, + collectedBlocks, removedINodes, countDiffChange)); } else if (topNode.isDirectory()) { INodeDirectory dir = topNode.asDirectory(); ChildrenDiff priorChildrenDiff = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java index 7c98cabc4ce..c3dc95bc3e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java @@ -21,6 +21,7 @@ import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes; @@ -31,14 +32,13 @@ import org.apache.hadoop.hdfs.server.namenode.Quota; * Represent an {@link INodeFile} that is snapshotted. */ @InterfaceAudience.Private -public class INodeFileWithSnapshot extends INodeFile - implements FileWithSnapshot { +public class INodeFileWithSnapshot extends INodeFile { private final FileDiffList diffs; private boolean isCurrentFileDeleted = false; public INodeFileWithSnapshot(INodeFile f) { - this(f, f instanceof FileWithSnapshot? - ((FileWithSnapshot)f).getDiffs(): null); + this(f, f instanceof INodeFileWithSnapshot ? + ((INodeFileWithSnapshot) f).getDiffs() : null); } public INodeFileWithSnapshot(INodeFile f, FileDiffList diffs) { @@ -46,12 +46,12 @@ public class INodeFileWithSnapshot extends INodeFile this.diffs = diffs != null? diffs: new FileDiffList(); } - @Override + /** Is the current file deleted? */ public boolean isCurrentFileDeleted() { return isCurrentFileDeleted; } - @Override + /** Delete the file from the current tree */ public void deleteCurrentFile() { isCurrentFileDeleted = true; } @@ -70,12 +70,7 @@ public class INodeFileWithSnapshot extends INodeFile return this; } - @Override - public INodeFile asINodeFile() { - return this; - } - - @Override + /** @return the file diff list. */ public FileDiffList getDiffs() { return diffs; } @@ -90,7 +85,7 @@ public class INodeFileWithSnapshot extends INodeFile recordModification(prior, null); deleteCurrentFile(); } - Util.collectBlocksAndClear(this, collectedBlocks, removedINodes); + this.collectBlocksAndClear(collectedBlocks, removedINodes); return Quota.Counts.newInstance(); } else { // delete a snapshot prior = getDiffs().updatePrior(snapshot, prior); @@ -104,4 +99,100 @@ public class INodeFileWithSnapshot extends INodeFile return super.toDetailString() + (isCurrentFileDeleted()? "(DELETED), ": ", ") + diffs; } + + /** + * @return block replication, which is the max file replication among + * the file and the diff list. + */ + @Override + public short getBlockReplication() { + short max = isCurrentFileDeleted() ? 0 : getFileReplication(); + for(FileDiff d : getDiffs()) { + if (d.snapshotINode != null) { + final short replication = d.snapshotINode.getFileReplication(); + if (replication > max) { + max = replication; + } + } + } + return max; + } + + /** + * If some blocks at the end of the block list no longer belongs to + * any inode, collect them and update the block list. + */ + void collectBlocksAndClear(final BlocksMapUpdateInfo info, + final List removedINodes) { + // check if everything is deleted. + if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) { + destroyAndCollectBlocks(info, removedINodes); + return; + } + + // find max file size. + final long max; + if (isCurrentFileDeleted()) { + final FileDiff last = getDiffs().getLast(); + max = last == null? 0: last.getFileSize(); + } else { + max = computeFileSize(); + } + + collectBlocksBeyondMax(max, info); + } + + private void collectBlocksBeyondMax(final long max, + final BlocksMapUpdateInfo collectedBlocks) { + final BlockInfo[] oldBlocks = getBlocks(); + if (oldBlocks != null) { + //find the minimum n such that the size of the first n blocks > max + int n = 0; + for(long size = 0; n < oldBlocks.length && max > size; n++) { + size += oldBlocks[n].getNumBytes(); + } + + // starting from block n, the data is beyond max. + if (n < oldBlocks.length) { + // resize the array. + final BlockInfo[] newBlocks; + if (n == 0) { + newBlocks = null; + } else { + newBlocks = new BlockInfo[n]; + System.arraycopy(oldBlocks, 0, newBlocks, 0, n); + } + + // set new blocks + setBlocks(newBlocks); + + // collect the blocks beyond max. + if (collectedBlocks != null) { + for(; n < oldBlocks.length; n++) { + collectedBlocks.addDeleteBlock(oldBlocks[n]); + } + } + } + } + } + + Quota.Counts updateQuotaAndCollectBlocks(FileDiff removed, + BlocksMapUpdateInfo collectedBlocks, final List removedINodes) { + long oldDiskspace = this.diskspaceConsumed(); + if (removed.snapshotINode != null) { + short replication = removed.snapshotINode.getFileReplication(); + short currentRepl = getBlockReplication(); + if (currentRepl == 0) { + oldDiskspace = computeFileSize(true, true) * replication; + } else if (replication > currentRepl) { + oldDiskspace = oldDiskspace / getBlockReplication() + * replication; + } + } + + this.collectBlocksAndClear(collectedBlocks, removedINodes); + + long dsDelta = oldDiskspace - diskspaceConsumed(); + return Quota.Counts.newInstance(0, dsDelta); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java index 01233dd7eca..d66b254802e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java @@ -36,8 +36,6 @@ import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes; import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes; import org.apache.hadoop.hdfs.server.namenode.INodeReference; -import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff; -import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiffList; import org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff; @@ -99,8 +97,8 @@ public class SnapshotFSImageFormat { public static void saveFileDiffList(final INodeFile file, final DataOutput out) throws IOException { - saveINodeDiffs(file instanceof FileWithSnapshot? - ((FileWithSnapshot)file).getDiffs(): null, out, null); + saveINodeDiffs(file instanceof INodeFileWithSnapshot? + ((INodeFileWithSnapshot) file).getDiffs(): null, out, null); } public static FileDiffList loadFileDiffList(DataInput in, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java index c0bd91cd00a..5111176c7c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java @@ -63,7 +63,6 @@ import org.apache.hadoop.hdfs.server.namenode.INodeMap; import org.apache.hadoop.hdfs.server.namenode.INodeReference; import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount; import org.apache.hadoop.hdfs.server.namenode.Quota; -import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.ChildrenDiff; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff; import org.apache.hadoop.hdfs.util.Diff.ListType; From eed47f20ef548a9673709699fb256c47f28ca681 Mon Sep 17 00:00:00 2001 From: Uma Maheswara Rao G Date: Wed, 27 Nov 2013 20:56:22 +0000 Subject: [PATCH 16/27] HADOOP-10132. RPC#stopProxy() should log the class of proxy when IllegalArgumentException is encountered. Contributed by Ted Yu git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1546192 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 141834bd7a2..8dff925d861 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -469,6 +469,9 @@ Release 2.2.1 - UNRELEASED HADOOP-9623 Update jets3t dependency to 0.9.0. (Amandeep Khurana via Colin Patrick McCabe) + HADOOP-10132. RPC#stopProxy() should log the class of proxy when IllegalArgumentException + is encountered (Ted yu via umamahesh) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java index 214b936743f..4ae7956c68e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java @@ -634,7 +634,7 @@ public class RPC { } catch (IOException e) { LOG.error("Closing proxy or invocation handler caused exception", e); } catch (IllegalArgumentException e) { - LOG.error("RPC.stopProxy called on non proxy.", e); + LOG.error("RPC.stopProxy called on non proxy: class=" + proxy.getClass().getName(), e); } // If you see this error on a mock object in a unit test you're From ec5c8a9865e5491dce9545909729be6eb1effd96 Mon Sep 17 00:00:00 2001 From: Brandon Li Date: Wed, 27 Nov 2013 22:07:37 +0000 Subject: [PATCH 17/27] HDFS-5577. NFS user guide update. Contributed by Brandon Li git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1546210 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm | 5 +++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index c3cb8a74f4d..5b2ab62e616 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -754,6 +754,8 @@ Release 2.2.1 - UNRELEASED HDFS-5548. Use ConcurrentHashMap in portmap (Haohui Mai via brandonli) + HDFS-5577. NFS user guide update (brandonli) + Release 2.2.0 - 2013-10-13 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm index c8de842510d..e976ebdd8e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm @@ -53,11 +53,12 @@ HDFS NFS Gateway * If the client mounts the export with access time update allowed, make sure the following property is not disabled in the configuration file. Only NameNode needs to restart after this property is changed. On some Unix systems, the user can disable access time update - by mounting the export with "noatime". + by mounting the export with "noatime". If the export is mounted with "noatime", the user + doesn't need to change the following property and thus no need to restart namenode. ---- - dfs.access.time.precision + dfs.namenode.accesstime.precision 3600000 The access time for HDFS file is precise upto this value. The default value is 1 hour. Setting a value of 0 disables From 6369c8d81972a9a0b6ef41f4508fcb60d34e3d78 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Wed, 27 Nov 2013 23:22:33 +0000 Subject: [PATCH 18/27] YARN-1239. Modified ResourceManager state-store implementations to start storing version numbers. Contributed by Jian He. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1546229 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 + ...erver_resourcemanager_service_protos.proto | 11 ++- .../resourcemanager/ResourceManager.java | 1 + .../recovery/FileSystemRMStateStore.java | 40 +++++++++- .../recovery/MemoryRMStateStore.java | 22 ++++- .../recovery/NullRMStateStore.java | 22 +++++ .../recovery/RMStateStore.java | 54 ++++++++++++- .../RMStateVersionIncompatibleException.java | 42 ++++++++++ .../recovery/ZKRMStateStore.java | 41 +++++++++- .../recovery/records/RMStateVersion.java | 80 +++++++++++++++++++ .../records/impl/pb/RMStateVersionPBImpl.java | 76 ++++++++++++++++++ .../recovery/RMStateStoreTestBase.java | 36 +++++++++ .../recovery/TestFSRMStateStore.java | 24 +++++- .../recovery/TestZKRMStateStore.java | 24 +++++- 14 files changed, 459 insertions(+), 17 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateVersionIncompatibleException.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/RMStateVersion.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/RMStateVersionPBImpl.java diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 20cf15bda48..eab6fcf5eb7 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -123,6 +123,9 @@ Release 2.3.0 - UNRELEASED YARN-1314. Fixed DistributedShell to not fail with multiple arguments for a shell command separated by spaces. (Xuan Gong via vinodkv) + YARN-1239. Modified ResourceManager state-store implementations to start + storing version numbers. (Jian He via vinodkv) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto index 6fc82322099..df77486ca60 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto @@ -103,9 +103,9 @@ enum RMAppStateProto { message ApplicationStateDataProto { optional int64 submit_time = 1; - optional int64 start_time = 2; - optional ApplicationSubmissionContextProto application_submission_context = 3; - optional string user = 4; + optional ApplicationSubmissionContextProto application_submission_context = 2; + optional string user = 3; + optional int64 start_time = 4; optional RMAppStateProto application_state = 5; optional string diagnostics = 6 [default = "N/A"]; optional int64 finish_time = 7; @@ -121,3 +121,8 @@ message ApplicationAttemptStateDataProto { optional int64 start_time = 7; optional FinalApplicationStatusProto final_application_status = 8; } + +message RMStateVersionProto { + optional int32 major_version = 1; + optional int32 minor_version = 2; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index 82a1f649736..3c187f90e0d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -457,6 +457,7 @@ public class ResourceManager extends CompositeService implements Recoverable { if(recoveryEnabled) { try { + rmStore.checkVersion(); RMState state = rmStore.loadState(); recover(state); } catch (Exception e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java index 46a58fc96ab..2ef6bcd62c0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java @@ -44,9 +44,12 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ApplicationAttemptStateDataProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ApplicationStateDataProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RMStateVersionProto; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationAttemptStateDataPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationStateDataPBImpl; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.RMStateVersionPBImpl; import org.apache.hadoop.yarn.util.ConverterUtils; import com.google.common.annotations.VisibleForTesting; @@ -63,7 +66,9 @@ public class FileSystemRMStateStore extends RMStateStore { public static final Log LOG = LogFactory.getLog(FileSystemRMStateStore.class); - private static final String ROOT_DIR_NAME = "FSRMStateRoot"; + protected static final String ROOT_DIR_NAME = "FSRMStateRoot"; + protected static final RMStateVersion CURRENT_VERSION_INFO = RMStateVersion + .newInstance(1, 0); protected FileSystem fs; @@ -78,7 +83,6 @@ public class FileSystemRMStateStore extends RMStateStore { @Override public synchronized void initInternal(Configuration conf) throws Exception{ - fsWorkingPath = new Path(conf.get(YarnConfiguration.FS_RM_STATE_STORE_URI)); rootDirPath = new Path(fsWorkingPath, ROOT_DIR_NAME); rmDTSecretManagerRoot = new Path(rootDirPath, RM_DT_SECRET_MANAGER_ROOT); @@ -100,6 +104,36 @@ public class FileSystemRMStateStore extends RMStateStore { fs.close(); } + @Override + protected RMStateVersion getCurrentVersion() { + return CURRENT_VERSION_INFO; + } + + @Override + protected synchronized RMStateVersion loadVersion() throws Exception { + Path versionNodePath = getNodePath(rootDirPath, VERSION_NODE); + if (fs.exists(versionNodePath)) { + FileStatus status = fs.getFileStatus(versionNodePath); + byte[] data = readFile(versionNodePath, status.getLen()); + RMStateVersion version = + new RMStateVersionPBImpl(RMStateVersionProto.parseFrom(data)); + return version; + } + return null; + } + + @Override + protected synchronized void storeVersion() throws Exception { + Path versionNodePath = getNodePath(rootDirPath, VERSION_NODE); + byte[] data = + ((RMStateVersionPBImpl) CURRENT_VERSION_INFO).getProto().toByteArray(); + if (fs.exists(versionNodePath)) { + updateFile(versionNodePath, data); + } else { + writeFile(versionNodePath, data); + } + } + @Override public synchronized RMState loadState() throws Exception { RMState rmState = new RMState(); @@ -430,7 +464,7 @@ public class FileSystemRMStateStore extends RMStateStore { fs.rename(tempPath, outputPath); } - private void updateFile(Path outputPath, byte[] data) throws Exception { + protected void updateFile(Path outputPath, byte[] data) throws Exception { if (fs.exists(outputPath)) { deleteFile(outputPath); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java index 495c292dd1e..d5ff5ededd4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java @@ -32,6 +32,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationAttemptStateDataPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationStateDataPBImpl; import org.apache.hadoop.yarn.util.ConverterUtils; @@ -43,12 +44,15 @@ import com.google.common.annotations.VisibleForTesting; public class MemoryRMStateStore extends RMStateStore { RMState state = new RMState(); - @VisibleForTesting public RMState getState() { return state; } - + + @Override + public void checkVersion() throws Exception { + } + @Override public synchronized RMState loadState() throws Exception { // return a copy of the state to allow for modification of the real state @@ -224,4 +228,18 @@ public class MemoryRMStateStore extends RMStateStore { state.rmSecretManagerState.getMasterKeyState(); rmDTMasterKeyState.remove(delegationKey); } + + @Override + protected RMStateVersion loadVersion() throws Exception { + return null; + } + + @Override + protected void storeVersion() throws Exception { + } + + @Override + protected RMStateVersion getCurrentVersion() { + return null; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java index c8ad1c42ca9..c212c1fe855 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java @@ -23,6 +23,7 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationAttemptStateDataPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationStateDataPBImpl; @@ -99,6 +100,27 @@ public class NullRMStateStore extends RMStateStore { @Override protected void updateApplicationAttemptStateInternal(String attemptId, ApplicationAttemptStateDataPBImpl attemptStateData) throws Exception { + } + + @Override + public void checkVersion() throws Exception { // Do nothing } + + @Override + protected RMStateVersion loadVersion() throws Exception { + // Do nothing + return null; + } + + @Override + protected void storeVersion() throws Exception { + // Do nothing + } + + @Override + protected RMStateVersion getCurrentVersion() { + // Do nothing + return null; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java index b9724d210bb..5e0e94429c4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java @@ -43,18 +43,18 @@ import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl; - import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationAttemptStateDataPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationStateDataPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppNewSavedEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRemovedEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; -import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppNewSavedEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppUpdateSavedEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; @@ -78,6 +78,7 @@ public abstract class RMStateStore extends AbstractService { protected static final String DELEGATION_TOKEN_PREFIX = "RMDelegationToken_"; protected static final String DELEGATION_TOKEN_SEQUENCE_NUMBER_PREFIX = "RMDTSequenceNumber_"; + protected static final String VERSION_NODE = "RMVersionNode"; public static final Log LOG = LogFactory.getLog(RMStateStore.class); @@ -304,7 +305,54 @@ public abstract class RMStateStore extends AbstractService { * after this */ protected abstract void closeInternal() throws Exception; - + + /** + * 1) Versioning scheme: major.minor. For e.g. 1.0, 1.1, 1.2...1.25, 2.0 etc. + * 2) Any incompatible change of state-store is a major upgrade, and any + * compatible change of state-store is a minor upgrade. + * 3) If theres's no version, treat it as 1.0. + * 4) Within a minor upgrade, say 1.1 to 1.2: + * overwrite the version info and proceed as normal. + * 5) Within a major upgrade, say 1.2 to 2.0: + * throw exception and indicate user to use a separate upgrade tool to + * upgrade RM state. + */ + public void checkVersion() throws Exception { + RMStateVersion loadedVersion = loadVersion(); + LOG.info("Loaded RM state version info " + loadedVersion); + if (loadedVersion != null && loadedVersion.equals(getCurrentVersion())) { + return; + } + // if there is no version info, treat it as 1.0; + if (loadedVersion == null) { + loadedVersion = RMStateVersion.newInstance(1, 0); + } + if (loadedVersion.isCompatibleTo(getCurrentVersion())) { + LOG.info("Storing RM state version info " + getCurrentVersion()); + storeVersion(); + } else { + throw new RMStateVersionIncompatibleException( + "Expecting RM state version " + getCurrentVersion() + + ", but loading version " + loadedVersion); + } + } + + /** + * Derived class use this method to load the version information from state + * store. + */ + protected abstract RMStateVersion loadVersion() throws Exception; + + /** + * Derived class use this method to store the version information. + */ + protected abstract void storeVersion() throws Exception; + + /** + * Get the current version of the underlying state store. + */ + protected abstract RMStateVersion getCurrentVersion(); + /** * Blocking API * The derived class must recover state from the store and return a new diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateVersionIncompatibleException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateVersionIncompatibleException.java new file mode 100644 index 00000000000..135868f1a57 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateVersionIncompatibleException.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.recovery; + +import org.apache.hadoop.yarn.exceptions.YarnException; + +/** + * This exception is thrown by ResourceManager if it's loading an incompatible + * version of state from state store on recovery. + */ +public class RMStateVersionIncompatibleException extends YarnException { + + private static final long serialVersionUID = 1364408L; + + public RMStateVersionIncompatibleException(Throwable cause) { + super(cause); + } + + public RMStateVersionIncompatibleException(String message) { + super(message); + } + + public RMStateVersionIncompatibleException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java index 45afb4e5d9f..1621d8327a5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java @@ -33,7 +33,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.io.DataInputByteBuffer; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.token.delegation.DelegationKey; @@ -41,16 +40,18 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ZKUtil; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.client.RMHAServiceTarget; import org.apache.hadoop.yarn.conf.HAUtil; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ApplicationAttemptStateDataProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ApplicationStateDataProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RMStateVersionProto; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationAttemptStateDataPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationStateDataPBImpl; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.RMStateVersionPBImpl; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; @@ -64,9 +65,9 @@ import org.apache.zookeeper.ZooKeeper; import org.apache.zookeeper.data.ACL; import org.apache.zookeeper.data.Id; import org.apache.zookeeper.data.Stat; +import org.apache.zookeeper.server.auth.DigestAuthenticationProvider; import com.google.common.annotations.VisibleForTesting; -import org.apache.zookeeper.server.auth.DigestAuthenticationProvider; @Private @Unstable @@ -74,7 +75,9 @@ public class ZKRMStateStore extends RMStateStore { public static final Log LOG = LogFactory.getLog(ZKRMStateStore.class); - private static final String ROOT_ZNODE_NAME = "ZKRMStateRoot"; + protected static final String ROOT_ZNODE_NAME = "ZKRMStateRoot"; + protected static final RMStateVersion CURRENT_VERSION_INFO = RMStateVersion + .newInstance(1, 0); private int numRetries; private String zkHostPort = null; @@ -301,6 +304,36 @@ public class ZKRMStateStore extends RMStateStore { closeZkClients(); } + @Override + protected RMStateVersion getCurrentVersion() { + return CURRENT_VERSION_INFO; + } + + @Override + protected synchronized void storeVersion() throws Exception { + String versionNodePath = getNodePath(zkRootNodePath, VERSION_NODE); + byte[] data = + ((RMStateVersionPBImpl) CURRENT_VERSION_INFO).getProto().toByteArray(); + if (zkClient.exists(versionNodePath, true) != null) { + setDataWithRetries(versionNodePath, data, -1); + } else { + createWithRetries(versionNodePath, data, zkAcl, CreateMode.PERSISTENT); + } + } + + @Override + protected synchronized RMStateVersion loadVersion() throws Exception { + String versionNodePath = getNodePath(zkRootNodePath, VERSION_NODE); + + if (zkClient.exists(versionNodePath, true) != null) { + byte[] data = getDataWithRetries(versionNodePath, true); + RMStateVersion version = + new RMStateVersionPBImpl(RMStateVersionProto.parseFrom(data)); + return version; + } + return null; + } + @Override public synchronized RMState loadState() throws Exception { RMState rmState = new RMState(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/RMStateVersion.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/RMStateVersion.java new file mode 100644 index 00000000000..cfee512b5d4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/RMStateVersion.java @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.recovery.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * The version information of RM state. + */ +@Private +@Unstable +public abstract class RMStateVersion { + + public static RMStateVersion newInstance(int majorVersion, int minorVersion) { + RMStateVersion version = Records.newRecord(RMStateVersion.class); + version.setMajorVersion(majorVersion); + version.setMinorVersion(minorVersion); + return version; + } + + public abstract int getMajorVersion(); + + public abstract void setMajorVersion(int majorVersion); + + public abstract int getMinorVersion(); + + public abstract void setMinorVersion(int minorVersion); + + public String toString() { + return getMajorVersion() + "." + getMinorVersion(); + } + + public boolean isCompatibleTo(RMStateVersion version) { + return getMajorVersion() == version.getMajorVersion(); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + getMajorVersion(); + result = prime * result + getMinorVersion(); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + RMStateVersion other = (RMStateVersion) obj; + if (this.getMajorVersion() == other.getMajorVersion() + && this.getMinorVersion() == other.getMinorVersion()) { + return true; + } else { + return false; + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/RMStateVersionPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/RMStateVersionPBImpl.java new file mode 100644 index 00000000000..f960413ce64 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/RMStateVersionPBImpl.java @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb; + +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RMStateVersionProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RMStateVersionProtoOrBuilder; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; + +public class RMStateVersionPBImpl extends RMStateVersion { + + RMStateVersionProto proto = RMStateVersionProto.getDefaultInstance(); + RMStateVersionProto.Builder builder = null; + boolean viaProto = false; + + public RMStateVersionPBImpl() { + builder = RMStateVersionProto.newBuilder(); + } + + public RMStateVersionPBImpl(RMStateVersionProto proto) { + this.proto = proto; + viaProto = true; + } + + public RMStateVersionProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = RMStateVersionProto.newBuilder(proto); + } + viaProto = false; + } + + @Override + public int getMajorVersion() { + RMStateVersionProtoOrBuilder p = viaProto ? proto : builder; + return p.getMajorVersion(); + } + + @Override + public void setMajorVersion(int major) { + maybeInitBuilder(); + builder.setMajorVersion(major); + } + + @Override + public int getMinorVersion() { + RMStateVersionProtoOrBuilder p = viaProto ? proto : builder; + return p.getMinorVersion(); + } + + @Override + public void setMinorVersion(int minor) { + maybeInitBuilder(); + builder.setMinorVersion(minor); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java index 95c14bfbf69..417fdb147cc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java @@ -58,6 +58,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.Appli import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.ApplicationState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMDTSecretManagerState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; @@ -106,6 +107,8 @@ public class RMStateStoreTestBase extends ClientBaseWithFixes{ interface RMStateStoreHelper { RMStateStore getRMStateStore() throws Exception; boolean isFinalStateValid() throws Exception; + void writeVersion(RMStateVersion version) throws Exception; + RMStateVersion getCurrentVersion() throws Exception; } void waitNotify(TestDispatcher dispatcher) { @@ -379,4 +382,37 @@ public class RMStateStoreTestBase extends ClientBaseWithFixes{ appToken.setService(new Text("appToken service")); return appToken; } + + public void testCheckVersion(RMStateStoreHelper stateStoreHelper) + throws Exception { + RMStateStore store = stateStoreHelper.getRMStateStore(); + store.setRMDispatcher(new TestDispatcher()); + + // default version + RMStateVersion defaultVersion = stateStoreHelper.getCurrentVersion(); + store.checkVersion(); + Assert.assertEquals(defaultVersion, store.loadVersion()); + + // compatible version + RMStateVersion compatibleVersion = + RMStateVersion.newInstance(defaultVersion.getMajorVersion(), + defaultVersion.getMinorVersion() + 2); + stateStoreHelper.writeVersion(compatibleVersion); + Assert.assertEquals(compatibleVersion, store.loadVersion()); + store.checkVersion(); + // overwrite the compatible version + Assert.assertEquals(defaultVersion, store.loadVersion()); + + // incompatible version + RMStateVersion incompatibleVersion = + RMStateVersion.newInstance(defaultVersion.getMajorVersion() + 2, + defaultVersion.getMinorVersion()); + stateStoreHelper.writeVersion(incompatibleVersion); + try { + store.checkVersion(); + Assert.fail("Invalid version, should fail."); + } catch (Throwable t) { + Assert.assertTrue(t instanceof RMStateVersionIncompatibleException); + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java index a1a6eab3fd3..63fe97557c5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java @@ -32,6 +32,8 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.RMStateVersionPBImpl; import org.apache.hadoop.yarn.util.ConverterUtils; import org.junit.Test; @@ -42,7 +44,7 @@ public class TestFSRMStateStore extends RMStateStoreTestBase { class TestFSRMStateStoreTester implements RMStateStoreHelper { Path workingDirPathURI; - FileSystemRMStateStore store; + TestFileSystemRMStore store; MiniDFSCluster cluster; class TestFileSystemRMStore extends FileSystemRMStateStore { @@ -54,6 +56,14 @@ public class TestFSRMStateStore extends RMStateStoreTestBase { start(); Assert.assertNotNull(fs); } + + public Path getVersionNode() { + return new Path(new Path(workingDirPathURI, ROOT_DIR_NAME), VERSION_NODE); + } + + public RMStateVersion getCurrentVersion() { + return CURRENT_VERSION_INFO; + } } public TestFSRMStateStoreTester(MiniDFSCluster cluster) throws Exception { @@ -81,6 +91,17 @@ public class TestFSRMStateStore extends RMStateStoreTestBase { FileStatus[] files = fs.listStatus(workingDirPathURI); return files.length == 1; } + + @Override + public void writeVersion(RMStateVersion version) throws Exception { + store.updateFile(store.getVersionNode(), ((RMStateVersionPBImpl) version) + .getProto().toByteArray()); + } + + @Override + public RMStateVersion getCurrentVersion() throws Exception { + return store.getCurrentVersion(); + } } @Test @@ -113,6 +134,7 @@ public class TestFSRMStateStore extends RMStateStoreTestBase { Assert.assertFalse(fileSystemRMStateStore.fsWorkingPath .getFileSystem(conf).exists(tempAppAttemptFile)); testRMDTSecretManagerStateStore(fsTester); + testCheckVersion(fsTester); } finally { cluster.shutdown(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java index 4138cfaec12..eceeecc6854 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java @@ -44,6 +44,8 @@ import org.apache.hadoop.yarn.conf.HAUtil; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.ClientRMService; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.RMStateVersionPBImpl; import org.apache.zookeeper.ZooKeeper; import org.junit.Test; @@ -54,7 +56,7 @@ public class TestZKRMStateStore extends RMStateStoreTestBase { class TestZKRMStateStoreTester implements RMStateStoreHelper { ZooKeeper client; - ZKRMStateStore store; + TestZKRMStateStoreInternal store; class TestZKRMStateStoreInternal extends ZKRMStateStore { @@ -69,6 +71,14 @@ public class TestZKRMStateStore extends RMStateStoreTestBase { public ZooKeeper getNewZooKeeper() throws IOException { return client; } + + public String getVersionNode() { + return znodeWorkingPath + "/" + ROOT_ZNODE_NAME + "/" + VERSION_NODE; + } + + public RMStateVersion getCurrentVersion() { + return CURRENT_VERSION_INFO; + } } public RMStateStore getRMStateStore() throws Exception { @@ -86,6 +96,17 @@ public class TestZKRMStateStore extends RMStateStoreTestBase { List nodes = client.getChildren(store.znodeWorkingPath, false); return nodes.size() == 1; } + + @Override + public void writeVersion(RMStateVersion version) throws Exception { + client.setData(store.getVersionNode(), ((RMStateVersionPBImpl) version) + .getProto().toByteArray(), -1); + } + + @Override + public RMStateVersion getCurrentVersion() throws Exception { + return store.getCurrentVersion(); + } } @Test @@ -93,6 +114,7 @@ public class TestZKRMStateStore extends RMStateStoreTestBase { TestZKRMStateStoreTester zkTester = new TestZKRMStateStoreTester(); testRMAppStateStore(zkTester); testRMDTSecretManagerStateStore(zkTester); + testCheckVersion(zkTester); } private Configuration createHARMConf( From 5ea533c2bfc72fd3adbfd972d18806fbc397e0f8 Mon Sep 17 00:00:00 2001 From: Brandon Li Date: Wed, 27 Nov 2013 23:41:15 +0000 Subject: [PATCH 19/27] HDFS-5563. NFS gateway should commit the buffered data when read request comes after write to the same file. Contributed by Brandon Li git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1546233 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop/hdfs/nfs/nfs3/OpenFileCtx.java | 51 ++++++---- .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java | 8 ++ .../hadoop/hdfs/nfs/nfs3/WriteManager.java | 52 ++++++++++- .../hadoop/hdfs/nfs/nfs3/TestWrites.java | 92 +++++++++++++++++-- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + 5 files changed, 173 insertions(+), 33 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java index ce2a7d35923..219660c6fa7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java @@ -708,15 +708,28 @@ class OpenFileCtx { } return response; } - + + /** + * Check the commit status with the given offset + * @param commitOffset the offset to commit + * @param channel the channel to return response + * @param xid the xid of the commit request + * @param preOpAttr the preOp attribute + * @param fromRead whether the commit is triggered from read request + * @return one commit status: COMMIT_FINISHED, COMMIT_WAIT, + * COMMIT_INACTIVE_CTX, COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR + */ public COMMIT_STATUS checkCommit(DFSClient dfsClient, long commitOffset, - Channel channel, int xid, Nfs3FileAttributes preOpAttr) { - // Keep stream active - updateLastAccessTime(); + Channel channel, int xid, Nfs3FileAttributes preOpAttr, boolean fromRead) { + if (!fromRead) { + Preconditions.checkState(channel != null && preOpAttr != null); + // Keep stream active + updateLastAccessTime(); + } Preconditions.checkState(commitOffset >= 0); COMMIT_STATUS ret = checkCommitInternal(commitOffset, channel, xid, - preOpAttr); + preOpAttr, fromRead); if (LOG.isDebugEnabled()) { LOG.debug("Got commit status: " + ret.name()); } @@ -743,14 +756,10 @@ class OpenFileCtx { } return ret; } - - /** - * return one commit status: COMMIT_FINISHED, COMMIT_WAIT, - * COMMIT_INACTIVE_CTX, COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR - */ + @VisibleForTesting synchronized COMMIT_STATUS checkCommitInternal(long commitOffset, - Channel channel, int xid, Nfs3FileAttributes preOpAttr) { + Channel channel, int xid, Nfs3FileAttributes preOpAttr, boolean fromRead) { if (!activeState) { if (pendingWrites.isEmpty()) { return COMMIT_STATUS.COMMIT_INACTIVE_CTX; @@ -767,9 +776,11 @@ class OpenFileCtx { if (commitOffset > 0) { if (commitOffset > flushed) { - CommitCtx commitCtx = new CommitCtx(commitOffset, channel, xid, - preOpAttr); - pendingCommits.put(commitOffset, commitCtx); + if (!fromRead) { + CommitCtx commitCtx = new CommitCtx(commitOffset, channel, xid, + preOpAttr); + pendingCommits.put(commitOffset, commitCtx); + } return COMMIT_STATUS.COMMIT_WAIT; } else { return COMMIT_STATUS.COMMIT_DO_SYNC; @@ -784,11 +795,13 @@ class OpenFileCtx { // do a sync here though the output stream might be closed. return COMMIT_STATUS.COMMIT_FINISHED; } else { - // Insert commit - long maxOffset = key.getKey().getMax() - 1; - Preconditions.checkState(maxOffset > 0); - CommitCtx commitCtx = new CommitCtx(maxOffset, channel, xid, preOpAttr); - pendingCommits.put(maxOffset, commitCtx); + if (!fromRead) { + // Insert commit + long maxOffset = key.getKey().getMax() - 1; + Preconditions.checkState(maxOffset > 0); + CommitCtx commitCtx = new CommitCtx(maxOffset, channel, xid, preOpAttr); + pendingCommits.put(maxOffset, commitCtx); + } return COMMIT_STATUS.COMMIT_WAIT; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java index 91ce8ef24dc..17670a9afe8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java @@ -628,6 +628,14 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } } + // In case there is buffered data for the same file, flush it. This can be + // optimized later by reading from the cache. + int ret = writeManager.commitBeforeRead(dfsClient, handle, offset + count); + if (ret != Nfs3Status.NFS3_OK) { + LOG.warn("commitBeforeRead didn't succeed with ret=" + ret + + ". Read may not get most recent data."); + } + try { int buffSize = Math.min(MAX_READ_TRANSFER_SIZE, count); byte[] readbuffer = new byte[buffSize]; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java index aa6a8a3650b..01b3dac8648 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdfs.nfs.nfs3; import java.io.IOException; -import java.util.concurrent.ConcurrentMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -41,11 +40,9 @@ import org.apache.hadoop.nfs.nfs3.response.WRITE3Response; import org.apache.hadoop.nfs.nfs3.response.WccData; import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.security.VerifierNone; -import org.apache.hadoop.util.Daemon; import org.jboss.netty.channel.Channel; import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.Maps; /** * Manage the writes and responds asynchronously. @@ -207,6 +204,51 @@ public class WriteManager { return; } + // Do a possible commit before read request in case there is buffered data + // inside DFSClient which has been flushed but not synced. + int commitBeforeRead(DFSClient dfsClient, FileHandle fileHandle, + long commitOffset) { + int status; + OpenFileCtx openFileCtx = fileContextCache.get(fileHandle); + + if (openFileCtx == null) { + if (LOG.isDebugEnabled()) { + LOG.debug("No opened stream for fileId:" + fileHandle.getFileId() + + " commitOffset=" + commitOffset + + ". Return success in this case."); + } + status = Nfs3Status.NFS3_OK; + + } else { + COMMIT_STATUS ret = openFileCtx.checkCommit(dfsClient, commitOffset, + null, 0, null, true); + switch (ret) { + case COMMIT_FINISHED: + case COMMIT_INACTIVE_CTX: + status = Nfs3Status.NFS3_OK; + break; + case COMMIT_INACTIVE_WITH_PENDING_WRITE: + case COMMIT_ERROR: + status = Nfs3Status.NFS3ERR_IO; + break; + case COMMIT_WAIT: + /** + * This should happen rarely in some possible cases, such as read + * request arrives before DFSClient is able to quickly flush data to DN, + * or Prerequisite writes is not available. Won't wait since we don't + * want to block read. + */ + status = Nfs3Status.NFS3ERR_JUKEBOX; + break; + default: + LOG.error("Should not get commit return code:" + ret.name()); + throw new RuntimeException("Should not get commit return code:" + + ret.name()); + } + } + return status; + } + void handleCommit(DFSClient dfsClient, FileHandle fileHandle, long commitOffset, Channel channel, int xid, Nfs3FileAttributes preOpAttr) { int status; @@ -219,9 +261,8 @@ public class WriteManager { } else { COMMIT_STATUS ret = openFileCtx.checkCommit(dfsClient, commitOffset, - channel, xid, preOpAttr); + channel, xid, preOpAttr, false); switch (ret) { - case COMMIT_DO_SYNC: case COMMIT_FINISHED: case COMMIT_INACTIVE_CTX: status = Nfs3Status.NFS3_OK; @@ -234,6 +275,7 @@ public class WriteManager { // Do nothing. Commit is async now. return; default: + LOG.error("Should not get commit return code:" + ret.name()); throw new RuntimeException("Should not get commit return code:" + ret.name()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java index d16b268382c..2ef614a1edf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.nfs.nfs3; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -26,6 +27,7 @@ import java.nio.ByteBuffer; import java.util.Arrays; import java.util.concurrent.ConcurrentNavigableMap; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -39,6 +41,7 @@ import org.apache.hadoop.nfs.nfs3.IdUserGroup; import org.apache.hadoop.nfs.nfs3.Nfs3Constant; import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow; import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; +import org.apache.hadoop.nfs.nfs3.Nfs3Status; import org.apache.hadoop.nfs.nfs3.request.CREATE3Request; import org.apache.hadoop.nfs.nfs3.request.READ3Request; import org.apache.hadoop.nfs.nfs3.request.SetAttr3; @@ -47,6 +50,7 @@ import org.apache.hadoop.nfs.nfs3.response.CREATE3Response; import org.apache.hadoop.nfs.nfs3.response.READ3Response; import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.security.SecurityHandler; +import org.jboss.netty.channel.Channel; import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; @@ -139,32 +143,33 @@ public class TestWrites { // Test inactive open file context ctx.setActiveStatusForTest(false); - ret = ctx.checkCommit(dfsClient, 0, null, 1, attr); + Channel ch = Mockito.mock(Channel.class); + ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false); Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_CTX); ctx.getPendingWritesForTest().put(new OffsetRange(5, 10), new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null)); - ret = ctx.checkCommit(dfsClient, 0, null, 1, attr); + ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false); Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE); // Test request with non zero commit offset ctx.setActiveStatusForTest(true); Mockito.when(fos.getPos()).thenReturn((long) 10); - COMMIT_STATUS status = ctx.checkCommitInternal(5, null, 1, attr); + COMMIT_STATUS status = ctx.checkCommitInternal(5, null, 1, attr, false); Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC); // Do_SYNC state will be updated to FINISHED after data sync - ret = ctx.checkCommit(dfsClient, 5, null, 1, attr); + ret = ctx.checkCommit(dfsClient, 5, ch, 1, attr, false); Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED); - status = ctx.checkCommitInternal(10, null, 1, attr); + status = ctx.checkCommitInternal(10, ch, 1, attr, false); Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC); - ret = ctx.checkCommit(dfsClient, 10, null, 1, attr); + ret = ctx.checkCommit(dfsClient, 10, ch, 1, attr, false); Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED); ConcurrentNavigableMap commits = ctx .getPendingCommitsForTest(); Assert.assertTrue(commits.size() == 0); - ret = ctx.checkCommit(dfsClient, 11, null, 1, attr); + ret = ctx.checkCommit(dfsClient, 11, ch, 1, attr, false); Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_WAIT); Assert.assertTrue(commits.size() == 1); long key = commits.firstKey(); @@ -173,7 +178,7 @@ public class TestWrites { // Test request with zero commit offset commits.remove(new Long(11)); // There is one pending write [5,10] - ret = ctx.checkCommit(dfsClient, 0, null, 1, attr); + ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false); Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_WAIT); Assert.assertTrue(commits.size() == 1); key = commits.firstKey(); @@ -181,10 +186,79 @@ public class TestWrites { // Empty pending writes ctx.getPendingWritesForTest().remove(new OffsetRange(5, 10)); - ret = ctx.checkCommit(dfsClient, 0, null, 1, attr); + ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false); Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED); } + @Test + // Validate all the commit check return codes OpenFileCtx.COMMIT_STATUS, which + // includes COMMIT_FINISHED, COMMIT_WAIT, COMMIT_INACTIVE_CTX, + // COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR, and COMMIT_DO_SYNC. + public void testCheckCommitFromRead() throws IOException { + DFSClient dfsClient = Mockito.mock(DFSClient.class); + Nfs3FileAttributes attr = new Nfs3FileAttributes(); + HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class); + Mockito.when(fos.getPos()).thenReturn((long) 0); + + OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, + new IdUserGroup()); + + FileHandle h = new FileHandle(1); // fake handle for "/dumpFilePath" + COMMIT_STATUS ret; + WriteManager wm = new WriteManager(new IdUserGroup(), new Configuration()); + assertTrue(wm.addOpenFileStream(h, ctx)); + + // Test inactive open file context + ctx.setActiveStatusForTest(false); + Channel ch = Mockito.mock(Channel.class); + ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true); + assertEquals( COMMIT_STATUS.COMMIT_INACTIVE_CTX, ret); + assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 0)); + + ctx.getPendingWritesForTest().put(new OffsetRange(5, 10), + new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null)); + ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true); + assertEquals(COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE, ret); + assertEquals(Nfs3Status.NFS3ERR_IO, wm.commitBeforeRead(dfsClient, h, 0)); + + // Test request with non zero commit offset + ctx.setActiveStatusForTest(true); + Mockito.when(fos.getPos()).thenReturn((long) 10); + COMMIT_STATUS status = ctx.checkCommitInternal(5, ch, 1, attr, false); + assertEquals(COMMIT_STATUS.COMMIT_DO_SYNC, status); + // Do_SYNC state will be updated to FINISHED after data sync + ret = ctx.checkCommit(dfsClient, 5, ch, 1, attr, true); + assertEquals(COMMIT_STATUS.COMMIT_FINISHED, ret); + assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 5)); + + status = ctx.checkCommitInternal(10, ch, 1, attr, true); + assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC); + ret = ctx.checkCommit(dfsClient, 10, ch, 1, attr, true); + assertEquals(COMMIT_STATUS.COMMIT_FINISHED, ret); + assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 10)); + + ConcurrentNavigableMap commits = ctx + .getPendingCommitsForTest(); + assertTrue(commits.size() == 0); + ret = ctx.checkCommit(dfsClient, 11, ch, 1, attr, true); + assertEquals(COMMIT_STATUS.COMMIT_WAIT, ret); + assertEquals(0, commits.size()); // commit triggered by read doesn't wait + assertEquals(Nfs3Status.NFS3ERR_JUKEBOX, wm.commitBeforeRead(dfsClient, h, 11)); + + // Test request with zero commit offset + // There is one pending write [5,10] + ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true); + assertEquals(COMMIT_STATUS.COMMIT_WAIT, ret); + assertEquals(0, commits.size()); + assertEquals(Nfs3Status.NFS3ERR_JUKEBOX, wm.commitBeforeRead(dfsClient, h, 0)); + + // Empty pending writes + ctx.getPendingWritesForTest().remove(new OffsetRange(5, 10)); + ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true); + assertEquals(COMMIT_STATUS.COMMIT_FINISHED, ret); + assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 0)); + } + private void waitWrite(RpcProgramNfs3 nfsd, FileHandle handle, int maxWaitTime) throws InterruptedException { int waitedTime = 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 5b2ab62e616..527b659fe7e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -756,6 +756,9 @@ Release 2.2.1 - UNRELEASED HDFS-5577. NFS user guide update (brandonli) + HDFS-5563. NFS gateway should commit the buffered data when read request comes + after write to the same file (brandonli) + Release 2.2.0 - 2013-10-13 INCOMPATIBLE CHANGES From bb11d47758a7e5845ceb196a936fbad1814faf07 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Thu, 28 Nov 2013 00:44:54 +0000 Subject: [PATCH 20/27] HDFS-5562. TestCacheDirectives and TestFsDatasetCache should stub out native mlock. Contributed by Colin Patrick McCabe and Akira Ajisaka. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1546246 13f79535-47bb-0310-9956-ffa450edef68 --- .../main/java/org/apache/hadoop/io/nativeio/NativeIO.java | 8 ++++++++ hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../org/apache/hadoop/hdfs/server/datanode/DataNode.java | 2 +- .../hadoop/hdfs/server/datanode/TestFsDatasetCache.java | 5 +++-- 4 files changed, 15 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java index c7eeb1c3969..7ea7e59151b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java @@ -142,6 +142,10 @@ public class NativeIO { NativeIO.POSIX.posixFadviseIfPossible(identifier, fd, offset, len, flags); } + + public boolean verifyCanMlock() { + return NativeIO.isAvailable(); + } } /** @@ -163,6 +167,10 @@ public class NativeIO { public long getOperatingSystemPageSize() { return 4096; } + + public boolean verifyCanMlock() { + return true; + } } static { diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 527b659fe7e..d44113b9f8b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -416,6 +416,9 @@ Trunk (Unreleased) HDFS-5565. CacheAdmin help should match against non-dashed commands (wang via cmccabe) + HDFS-5562. TestCacheDirectives and TestFsDatasetCache should stub out + native mlock. (Colin McCabe and Akira Ajisaka via wang) + Release 2.3.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index d146b51b833..a4834f28286 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -673,7 +673,7 @@ public class DataNode extends Configured this.dnConf = new DNConf(conf); if (dnConf.maxLockedMemory > 0) { - if (!NativeIO.isAvailable()) { + if (!NativeIO.POSIX.getCacheManipulator().verifyCanMlock()) { throw new RuntimeException(String.format( "Cannot start datanode because the configured max locked memory" + " size (%s) is greater than zero and native code is not available.", diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java index d439fab6aa8..7c5ab7dbd57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java @@ -113,6 +113,9 @@ public class TestFsDatasetCache { conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_KEY, true); + prevCacheManipulator = NativeIO.POSIX.getCacheManipulator(); + NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator()); + cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(1).build(); cluster.waitActive(); @@ -125,8 +128,6 @@ public class TestFsDatasetCache { spyNN = DataNodeTestUtils.spyOnBposToNN(dn, nn); - prevCacheManipulator = NativeIO.POSIX.getCacheManipulator(); - NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator()); } @After From 9da451cac57f3cd64c2c047675e5b60ca88ecf83 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Thu, 28 Nov 2013 07:20:21 +0000 Subject: [PATCH 21/27] HDFS-5430. Support TTL on CacheDirectives. Contributed by Andrew Wang. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1546301 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../java/org/apache/hadoop/hdfs/DFSUtil.java | 65 +++++- .../hadoop/hdfs/protocol/CacheDirective.java | 45 +++- .../hdfs/protocol/CacheDirectiveInfo.java | 148 +++++++++++- .../hdfs/protocol/CacheDirectiveStats.java | 33 ++- .../hadoop/hdfs/protocolPB/PBHelper.java | 27 ++- .../CacheReplicationMonitor.java | 27 ++- .../hdfs/server/namenode/CacheManager.java | 106 ++++++--- .../hdfs/server/namenode/FSEditLog.java | 6 +- .../hdfs/server/namenode/FSEditLogLoader.java | 8 +- .../hdfs/server/namenode/FSEditLogOp.java | 69 ++++-- .../server/namenode/FSEditLogOpCodes.java | 6 +- .../apache/hadoop/hdfs/tools/CacheAdmin.java | 81 +++++-- .../main/proto/ClientNamenodeProtocol.proto | 7 + .../org/apache/hadoop/hdfs/TestDFSUtil.java | 41 ++++ .../server/namenode/TestCacheDirectives.java | 67 +++++- .../src/test/resources/editsStored | Bin 4647 -> 4592 bytes .../src/test/resources/editsStored.xml | 213 ++++++++---------- .../src/test/resources/testCacheAdminConf.xml | 30 +-- 19 files changed, 743 insertions(+), 238 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index d44113b9f8b..10027631333 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -226,6 +226,8 @@ Trunk (Unreleased) HDFS-5537. Remove FileWithSnapshot interface. (jing9 via szetszwo) + HDFS-5430. Support TTL on CacheDirectives. (wang) + OPTIMIZATIONS HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 4ebc2bb0c00..e978ddfd06c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -38,12 +38,15 @@ import java.net.InetSocketAddress; import java.net.URI; import java.net.URISyntaxException; import java.security.SecureRandom; +import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Comparator; +import java.util.Date; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Random; import java.util.Set; @@ -1426,4 +1429,64 @@ public class DFSUtil { sslConf.get("ssl.server.truststore.password"), sslConf.get("ssl.server.truststore.type", "jks")); } -} \ No newline at end of file + + /** + * Converts a Date into an ISO-8601 formatted datetime string. + */ + public static String dateToIso8601String(Date date) { + SimpleDateFormat df = + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ", Locale.ENGLISH); + return df.format(date); + } + + /** + * Converts a time duration in milliseconds into DDD:HH:MM:SS format. + */ + public static String durationToString(long durationMs) { + Preconditions.checkArgument(durationMs >= 0, "Invalid negative duration"); + // Chop off the milliseconds + long durationSec = durationMs / 1000; + final int secondsPerMinute = 60; + final int secondsPerHour = 60*60; + final int secondsPerDay = 60*60*24; + final long days = durationSec / secondsPerDay; + durationSec -= days * secondsPerDay; + final long hours = durationSec / secondsPerHour; + durationSec -= hours * secondsPerHour; + final long minutes = durationSec / secondsPerMinute; + durationSec -= minutes * secondsPerMinute; + final long seconds = durationSec; + return String.format("%03d:%02d:%02d:%02d", days, hours, minutes, seconds); + } + + /** + * Converts a relative time string into a duration in milliseconds. + */ + public static long parseRelativeTime(String relTime) throws IOException { + if (relTime.length() < 2) { + throw new IOException("Unable to parse relative time value of " + relTime + + ": too short"); + } + String ttlString = relTime.substring(0, relTime.length()-1); + int ttl; + try { + ttl = Integer.parseInt(ttlString); + } catch (NumberFormatException e) { + throw new IOException("Unable to parse relative time value of " + relTime + + ": " + ttlString + " is not a number"); + } + if (relTime.endsWith("s")) { + // pass + } else if (relTime.endsWith("m")) { + ttl *= 60; + } else if (relTime.endsWith("h")) { + ttl *= 60*60; + } else if (relTime.endsWith("d")) { + ttl *= 60*60*24; + } else { + throw new IOException("Unable to parse relative time value of " + relTime + + ": unknown time unit " + relTime.charAt(relTime.length() - 1)); + } + return ttl*1000; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java index 1fa1c289d7f..99024491c9c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java @@ -17,9 +17,14 @@ */ package org.apache.hadoop.hdfs.protocol; +import static com.google.common.base.Preconditions.checkNotNull; + +import java.util.Date; + import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.server.namenode.CachePool; import org.apache.hadoop.util.IntrusiveCollection; import org.apache.hadoop.util.IntrusiveCollection.Element; @@ -27,7 +32,7 @@ import org.apache.hadoop.util.IntrusiveCollection.Element; import com.google.common.base.Preconditions; /** - * Represents an entry in the PathBasedCache on the NameNode. + * Namenode class that tracks state related to a cached path. * * This is an implementation class, not part of the public API. */ @@ -37,6 +42,8 @@ public final class CacheDirective implements IntrusiveCollection.Element { private final String path; private final short replication; private CachePool pool; + private final long expiryTime; + private long bytesNeeded; private long bytesCached; private long filesAffected; @@ -44,13 +51,13 @@ public final class CacheDirective implements IntrusiveCollection.Element { private Element next; public CacheDirective(long id, String path, - short replication) { + short replication, long expiryTime) { Preconditions.checkArgument(id > 0); this.id = id; + this.path = checkNotNull(path); Preconditions.checkArgument(replication > 0); - this.path = path; this.replication = replication; - Preconditions.checkNotNull(path); + this.expiryTime = expiryTime; this.bytesNeeded = 0; this.bytesCached = 0; this.filesAffected = 0; @@ -64,20 +71,40 @@ public final class CacheDirective implements IntrusiveCollection.Element { return path; } - public CachePool getPool() { - return pool; - } - public short getReplication() { return replication; } + public CachePool getPool() { + return pool; + } + + /** + * @return When this directive expires, in milliseconds since Unix epoch + */ + public long getExpiryTime() { + return expiryTime; + } + + /** + * @return When this directive expires, as an ISO-8601 formatted string. + */ + public String getExpiryTimeString() { + return DFSUtil.dateToIso8601String(new Date(expiryTime)); + } + + /** + * Returns a {@link CacheDirectiveInfo} based on this CacheDirective. + *

    + * This always sets an absolute expiry time, never a relative TTL. + */ public CacheDirectiveInfo toInfo() { return new CacheDirectiveInfo.Builder(). setId(id). setPath(new Path(path)). setReplication(replication). setPool(pool.getPoolName()). + setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiryTime)). build(); } @@ -86,6 +113,7 @@ public final class CacheDirective implements IntrusiveCollection.Element { setBytesNeeded(bytesNeeded). setBytesCached(bytesCached). setFilesAffected(filesAffected). + setHasExpired(new Date().getTime() > expiryTime). build(); } @@ -100,6 +128,7 @@ public final class CacheDirective implements IntrusiveCollection.Element { append(", path:").append(path). append(", replication:").append(replication). append(", pool:").append(pool). + append(", expiryTime: ").append(getExpiryTimeString()). append(", bytesNeeded:").append(bytesNeeded). append(", bytesCached:").append(bytesCached). append(", filesAffected:").append(filesAffected). diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java index 620026c8e84..d7a911123e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java @@ -17,11 +17,14 @@ */ package org.apache.hadoop.hdfs.protocol; +import java.util.Date; + import org.apache.commons.lang.builder.EqualsBuilder; import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSUtil; /** * Describes a path-based cache directive. @@ -37,6 +40,7 @@ public class CacheDirectiveInfo { private Path path; private Short replication; private String pool; + private Expiration expiration; /** * Builds a new CacheDirectiveInfo populated with the set properties. @@ -44,7 +48,7 @@ public class CacheDirectiveInfo { * @return New CacheDirectiveInfo. */ public CacheDirectiveInfo build() { - return new CacheDirectiveInfo(id, path, replication, pool); + return new CacheDirectiveInfo(id, path, replication, pool, expiration); } /** @@ -62,6 +66,7 @@ public class CacheDirectiveInfo { this.path = directive.getPath(); this.replication = directive.getReplication(); this.pool = directive.getPool(); + this.expiration = directive.getExpiration(); } /** @@ -107,18 +112,134 @@ public class CacheDirectiveInfo { this.pool = pool; return this; } + + /** + * Sets when the CacheDirective should expire. A + * {@link CacheDirectiveInfo.Expiration} can specify either an absolute or + * relative expiration time. + * + * @param expiration when this CacheDirective should expire + * @return This builder, for call chaining + */ + public Builder setExpiration(Expiration expiration) { + this.expiration = expiration; + return this; + } + } + + /** + * Denotes a relative or absolute expiration time for a CacheDirective. Use + * factory methods {@link CacheDirectiveInfo.Expiration#newAbsolute(Date)} and + * {@link CacheDirectiveInfo.Expiration#newRelative(long)} to create an + * Expiration. + *

    + * In either case, the server-side clock is used to determine when a + * CacheDirective expires. + */ + public static class Expiration { + + /** Denotes a CacheDirectiveInfo that never expires **/ + public static final int EXPIRY_NEVER = -1; + + /** + * Create a new relative Expiration. + * + * @param ms how long until the CacheDirective expires, in milliseconds + * @return A relative Expiration + */ + public static Expiration newRelative(long ms) { + return new Expiration(ms, true); + } + + /** + * Create a new absolute Expiration. + * + * @param date when the CacheDirective expires + * @return An absolute Expiration + */ + public static Expiration newAbsolute(Date date) { + return new Expiration(date.getTime(), false); + } + + /** + * Create a new absolute Expiration. + * + * @param ms when the CacheDirective expires, in milliseconds since the Unix + * epoch. + * @return An absolute Expiration + */ + public static Expiration newAbsolute(long ms) { + return new Expiration(ms, false); + } + + private final long ms; + private final boolean isRelative; + + private Expiration(long ms, boolean isRelative) { + this.ms = ms; + this.isRelative = isRelative; + } + + /** + * @return true if Expiration was specified as a relative duration, false if + * specified as an absolute time. + */ + public boolean isRelative() { + return isRelative; + } + + /** + * @return The raw underlying millisecond value, either a relative duration + * or an absolute time as milliseconds since the Unix epoch. + */ + public long getMillis() { + return ms; + } + + /** + * @return Expiration time as a {@link Date} object. This converts a + * relative Expiration into an absolute Date based on the local + * clock. + */ + public Date getAbsoluteDate() { + return new Date(getAbsoluteMillis()); + } + + /** + * @return Expiration time in milliseconds from the Unix epoch. This + * converts a relative Expiration into an absolute time based on the + * local clock. + */ + public long getAbsoluteMillis() { + if (!isRelative) { + return ms; + } else { + return new Date().getTime() + ms; + } + } + + @Override + public String toString() { + if (isRelative) { + return DFSUtil.durationToString(ms); + } + return DFSUtil.dateToIso8601String(new Date(ms)); + } } private final Long id; private final Path path; private final Short replication; private final String pool; + private final Expiration expiration; - CacheDirectiveInfo(Long id, Path path, Short replication, String pool) { + CacheDirectiveInfo(Long id, Path path, Short replication, String pool, + Expiration expiration) { this.id = id; this.path = path; this.replication = replication; this.pool = pool; + this.expiration = expiration; } /** @@ -148,7 +269,14 @@ public class CacheDirectiveInfo { public String getPool() { return pool; } - + + /** + * @return When this directive expires. + */ + public Expiration getExpiration() { + return expiration; + } + @Override public boolean equals(Object o) { if (o == null) { @@ -162,6 +290,7 @@ public class CacheDirectiveInfo { append(getPath(), other.getPath()). append(getReplication(), other.getReplication()). append(getPool(), other.getPool()). + append(getExpiration(), other.getExpiration()). isEquals(); } @@ -171,6 +300,7 @@ public class CacheDirectiveInfo { append(path). append(replication). append(pool). + append(expiration). hashCode(); } @@ -181,19 +311,23 @@ public class CacheDirectiveInfo { String prefix = ""; if (id != null) { builder.append(prefix).append("id: ").append(id); - prefix = ","; + prefix = ", "; } if (path != null) { builder.append(prefix).append("path: ").append(path); - prefix = ","; + prefix = ", "; } if (replication != null) { builder.append(prefix).append("replication: ").append(replication); - prefix = ","; + prefix = ", "; } if (pool != null) { builder.append(prefix).append("pool: ").append(pool); - prefix = ","; + prefix = ", "; + } + if (expiration != null) { + builder.append(prefix).append("expiration: ").append(expiration); + prefix = ", "; } builder.append("}"); return builder.toString(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java index b0f58b51bac..b1c3ed48687 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java @@ -30,6 +30,7 @@ public class CacheDirectiveStats { private long bytesNeeded; private long bytesCached; private long filesAffected; + private boolean hasExpired; /** * Builds a new CacheDirectiveStats populated with the set properties. @@ -37,7 +38,8 @@ public class CacheDirectiveStats { * @return New CacheDirectiveStats. */ public CacheDirectiveStats build() { - return new CacheDirectiveStats(bytesNeeded, bytesCached, filesAffected); + return new CacheDirectiveStats(bytesNeeded, bytesCached, filesAffected, + hasExpired); } /** @@ -52,7 +54,7 @@ public class CacheDirectiveStats { * @param bytesNeeded The bytes needed. * @return This builder, for call chaining. */ - public Builder setBytesNeeded(Long bytesNeeded) { + public Builder setBytesNeeded(long bytesNeeded) { this.bytesNeeded = bytesNeeded; return this; } @@ -63,7 +65,7 @@ public class CacheDirectiveStats { * @param bytesCached The bytes cached. * @return This builder, for call chaining. */ - public Builder setBytesCached(Long bytesCached) { + public Builder setBytesCached(long bytesCached) { this.bytesCached = bytesCached; return this; } @@ -74,21 +76,34 @@ public class CacheDirectiveStats { * @param filesAffected The files affected. * @return This builder, for call chaining. */ - public Builder setFilesAffected(Long filesAffected) { + public Builder setFilesAffected(long filesAffected) { this.filesAffected = filesAffected; return this; } + + /** + * Sets whether this directive has expired. + * + * @param hasExpired if this directive has expired + * @return This builder, for call chaining. + */ + public Builder setHasExpired(boolean hasExpired) { + this.hasExpired = hasExpired; + return this; + } } private final long bytesNeeded; private final long bytesCached; private final long filesAffected; + private final boolean hasExpired; private CacheDirectiveStats(long bytesNeeded, long bytesCached, - long filesAffected) { + long filesAffected, boolean hasExpired) { this.bytesNeeded = bytesNeeded; this.bytesCached = bytesCached; this.filesAffected = filesAffected; + this.hasExpired = hasExpired; } /** @@ -112,6 +127,13 @@ public class CacheDirectiveStats { return filesAffected; } + /** + * @return Whether this directive has expired. + */ + public boolean hasExpired() { + return hasExpired; + } + @Override public String toString() { StringBuilder builder = new StringBuilder(); @@ -119,6 +141,7 @@ public class CacheDirectiveStats { builder.append("bytesNeeded: ").append(bytesNeeded); builder.append(", ").append("bytesCached: ").append(bytesCached); builder.append(", ").append("filesAffected: ").append(filesAffected); + builder.append(", ").append("hasExpired: ").append(hasExpired); builder.append("}"); return builder.toString(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 1aff12a605d..8f340b4bf43 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -61,6 +61,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto; @@ -1591,6 +1592,9 @@ public class PBHelper { if (info.getPool() != null) { builder.setPool(info.getPool()); } + if (info.getExpiration() != null) { + builder.setExpiration(convert(info.getExpiration())); + } return builder.build(); } @@ -1611,15 +1615,35 @@ public class PBHelper { if (proto.hasPool()) { builder.setPool(proto.getPool()); } + if (proto.hasExpiration()) { + builder.setExpiration(convert(proto.getExpiration())); + } return builder.build(); } - + + public static CacheDirectiveInfoExpirationProto convert( + CacheDirectiveInfo.Expiration expiration) { + return CacheDirectiveInfoExpirationProto.newBuilder() + .setIsRelative(expiration.isRelative()) + .setMillis(expiration.getMillis()) + .build(); + } + + public static CacheDirectiveInfo.Expiration convert( + CacheDirectiveInfoExpirationProto proto) { + if (proto.getIsRelative()) { + return CacheDirectiveInfo.Expiration.newRelative(proto.getMillis()); + } + return CacheDirectiveInfo.Expiration.newAbsolute(proto.getMillis()); + } + public static CacheDirectiveStatsProto convert(CacheDirectiveStats stats) { CacheDirectiveStatsProto.Builder builder = CacheDirectiveStatsProto.newBuilder(); builder.setBytesNeeded(stats.getBytesNeeded()); builder.setBytesCached(stats.getBytesCached()); builder.setFilesAffected(stats.getFilesAffected()); + builder.setHasExpired(stats.hasExpired()); return builder.build(); } @@ -1628,6 +1652,7 @@ public class PBHelper { builder.setBytesNeeded(proto.getBytesNeeded()); builder.setBytesCached(proto.getBytesCached()); builder.setFilesAffected(proto.getFilesAffected()); + builder.setHasExpired(proto.getHasExpired()); return builder.build(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java index 86e71fb1c18..d0e35680d8c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java @@ -22,6 +22,7 @@ import static org.apache.hadoop.util.ExitUtil.terminate; import java.io.Closeable; import java.io.IOException; import java.util.Collection; +import java.util.Date; import java.util.Iterator; import java.util.LinkedList; import java.util.List; @@ -211,12 +212,24 @@ public class CacheReplicationMonitor extends Thread implements Closeable { */ private void rescanCacheDirectives() { FSDirectory fsDir = namesystem.getFSDirectory(); - for (CacheDirective pce : cacheManager.getEntriesById().values()) { + final long now = new Date().getTime(); + for (CacheDirective directive : cacheManager.getEntriesById().values()) { + // Reset the directive + directive.clearBytesNeeded(); + directive.clearBytesCached(); + directive.clearFilesAffected(); + // Skip processing this entry if it has expired + LOG.info("Directive expiry is at " + directive.getExpiryTime()); + if (directive.getExpiryTime() > 0 && directive.getExpiryTime() <= now) { + if (LOG.isDebugEnabled()) { + LOG.debug("Skipping directive id " + directive.getId() + + " because it has expired (" + directive.getExpiryTime() + ">=" + + now); + } + continue; + } scannedDirectives++; - pce.clearBytesNeeded(); - pce.clearBytesCached(); - pce.clearFilesAffected(); - String path = pce.getPath(); + String path = directive.getPath(); INode node; try { node = fsDir.getINode(path); @@ -233,11 +246,11 @@ public class CacheReplicationMonitor extends Thread implements Closeable { ReadOnlyList children = dir.getChildrenList(null); for (INode child : children) { if (child.isFile()) { - rescanFile(pce, child.asFile()); + rescanFile(directive, child.asFile()); } } } else if (node.isFile()) { - rescanFile(pce, node.asFile()); + rescanFile(directive, node.asFile()); } else { if (LOG.isDebugEnabled()) { LOG.debug("Ignoring non-directory, non-file inode " + node + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java index af549c50b5a..a4712b6d104 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -17,12 +17,12 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT; @@ -43,18 +43,18 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.InvalidRequestException; import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries; +import org.apache.hadoop.fs.InvalidRequestException; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.CacheDirective; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; +import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; -import org.apache.hadoop.hdfs.protocol.CacheDirective; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor; @@ -249,7 +249,7 @@ public final class CacheManager { return cachedBlocks; } - private long getNextEntryId() throws IOException { + private long getNextDirectiveId() throws IOException { assert namesystem.hasWriteLock(); if (nextDirectiveId >= Long.MAX_VALUE - 1) { throw new IOException("No more available IDs."); @@ -301,6 +301,34 @@ public final class CacheManager { return repl; } + /** + * Calculates the absolute expiry time of the directive from the + * {@link CacheDirectiveInfo.Expiration}. This converts a relative Expiration + * into an absolute time based on the local clock. + * + * @param directive from which to get the expiry time + * @param defaultValue to use if Expiration is not set + * @return Absolute expiry time in milliseconds since Unix epoch + * @throws InvalidRequestException if the Expiration is invalid + */ + private static long validateExpiryTime(CacheDirectiveInfo directive, + long defaultValue) throws InvalidRequestException { + long expiryTime; + CacheDirectiveInfo.Expiration expiration = directive.getExpiration(); + if (expiration != null) { + if (expiration.getMillis() < 0) { + throw new InvalidRequestException("Cannot set a negative expiration: " + + expiration.getMillis()); + } + // Converts a relative duration into an absolute time based on the local + // clock + expiryTime = expiration.getAbsoluteMillis(); + } else { + expiryTime = defaultValue; + } + return expiryTime; + } + /** * Get a CacheDirective by ID, validating the ID and that the directive * exists. @@ -346,6 +374,26 @@ public final class CacheManager { directives.add(directive); } + /** + * To be called only from the edit log loading code + */ + CacheDirectiveInfo addDirectiveFromEditLog(CacheDirectiveInfo directive) + throws InvalidRequestException { + long id = directive.getId(); + CacheDirective entry = + new CacheDirective( + directive.getId(), + directive.getPath().toUri().getPath(), + directive.getReplication(), + directive.getExpiration().getAbsoluteMillis()); + CachePool pool = cachePools.get(directive.getPool()); + addInternal(entry, pool); + if (nextDirectiveId <= id) { + nextDirectiveId = id + 1; + } + return entry.toInfo(); + } + public CacheDirectiveInfo addDirective( CacheDirectiveInfo info, FSPermissionChecker pc) throws IOException { @@ -356,27 +404,12 @@ public final class CacheManager { checkWritePermission(pc, pool); String path = validatePath(info); short replication = validateReplication(info, (short)1); - long id; - if (info.getId() != null) { - // We are loading a directive from the edit log. - // Use the ID from the edit log. - id = info.getId(); - if (id <= 0) { - throw new InvalidRequestException("can't add an ID " + - "of " + id + ": it is not positive."); - } - if (id >= Long.MAX_VALUE) { - throw new InvalidRequestException("can't add an ID " + - "of " + id + ": it is too big."); - } - if (nextDirectiveId <= id) { - nextDirectiveId = id + 1; - } - } else { - // Add a new directive with the next available ID. - id = getNextEntryId(); - } - directive = new CacheDirective(id, path, replication); + long expiryTime = validateExpiryTime(info, + CacheDirectiveInfo.Expiration.EXPIRY_NEVER); + // All validation passed + // Add a new entry with the next available ID. + long id = getNextDirectiveId(); + directive = new CacheDirective(id, path, replication, expiryTime); addInternal(directive, pool); } catch (IOException e) { LOG.warn("addDirective of " + info + " failed: ", e); @@ -407,10 +440,13 @@ public final class CacheManager { if (info.getPath() != null) { path = validatePath(info); } + short replication = prevEntry.getReplication(); - if (info.getReplication() != null) { - replication = validateReplication(info, replication); - } + replication = validateReplication(info, replication); + + long expiryTime = prevEntry.getExpiryTime(); + expiryTime = validateExpiryTime(info, expiryTime); + CachePool pool = prevEntry.getPool(); if (info.getPool() != null) { pool = getCachePool(validatePoolName(info)); @@ -418,7 +454,7 @@ public final class CacheManager { } removeInternal(prevEntry); CacheDirective newEntry = - new CacheDirective(id, path, replication); + new CacheDirective(id, path, replication, expiryTime); addInternal(newEntry, pool); } catch (IOException e) { LOG.warn("modifyDirective of " + idString + " failed: ", e); @@ -788,6 +824,7 @@ public final class CacheManager { Text.writeString(out, directive.getPath()); out.writeShort(directive.getReplication()); Text.writeString(out, directive.getPool().getPoolName()); + out.writeLong(directive.getExpiryTime()); counter.increment(); } prog.endStep(Phase.SAVING_CHECKPOINT, step); @@ -826,6 +863,7 @@ public final class CacheManager { String path = Text.readString(in); short replication = in.readShort(); String poolName = Text.readString(in); + long expiryTime = in.readLong(); // Get pool reference by looking it up in the map CachePool pool = cachePools.get(poolName); if (pool == null) { @@ -833,7 +871,7 @@ public final class CacheManager { ", which does not exist."); } CacheDirective directive = - new CacheDirective(directiveId, path, replication); + new CacheDirective(directiveId, path, replication, expiryTime); boolean addedDirective = pool.getDirectiveList().add(directive); assert addedDirective; if (directivesById.put(directive.getId(), directive) != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index a4d6baba3b2..e7019f4a52b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -953,7 +953,11 @@ public class FSEditLog implements LogsPurgeable { .setSnapshotRoot(path); logEdit(op); } - + + /** + * Log a CacheDirectiveInfo returned from + * {@link CacheManager#addDirective(CacheDirectiveInfo, FSPermissionChecker)} + */ void logAddCacheDirectiveInfo(CacheDirectiveInfo directive, boolean toLogRpcIds) { AddCacheDirectiveInfoOp op = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 4a332836fe0..d9b67aab5b2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -636,17 +636,17 @@ public class FSEditLogLoader { fsNamesys.setLastAllocatedBlockId(allocateBlockIdOp.blockId); break; } - case OP_ADD_PATH_BASED_CACHE_DIRECTIVE: { + case OP_ADD_CACHE_DIRECTIVE: { AddCacheDirectiveInfoOp addOp = (AddCacheDirectiveInfoOp) op; CacheDirectiveInfo result = fsNamesys. - getCacheManager().addDirective(addOp.directive, null); + getCacheManager().addDirectiveFromEditLog(addOp.directive); if (toAddRetryCache) { Long id = result.getId(); fsNamesys.addCacheEntryWithPayload(op.rpcClientId, op.rpcCallId, id); } break; } - case OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE: { + case OP_MODIFY_CACHE_DIRECTIVE: { ModifyCacheDirectiveInfoOp modifyOp = (ModifyCacheDirectiveInfoOp) op; fsNamesys.getCacheManager().modifyDirective( @@ -656,7 +656,7 @@ public class FSEditLogLoader { } break; } - case OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE: { + case OP_REMOVE_CACHE_DIRECTIVE: { RemoveCacheDirectiveInfoOp removeOp = (RemoveCacheDirectiveInfoOp) op; fsNamesys.getCacheManager().removeDirective(removeOp.id, null); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index 27d8b9eca44..5b81d3a7f7b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -18,9 +18,8 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD; +import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD_CACHE_DIRECTIVE; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD_CACHE_POOL; -import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD_PATH_BASED_CACHE_DIRECTIVE; -import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ALLOCATE_BLOCK_ID; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ALLOW_SNAPSHOT; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_CANCEL_DELEGATION_TOKEN; @@ -35,10 +34,11 @@ import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_END_LOG import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_GET_DELEGATION_TOKEN; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_INVALID; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_MKDIR; +import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_MODIFY_CACHE_DIRECTIVE; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_MODIFY_CACHE_POOL; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REASSIGN_LEASE; +import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REMOVE_CACHE_DIRECTIVE; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REMOVE_CACHE_POOL; -import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME_OLD; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME_SNAPSHOT; @@ -64,6 +64,7 @@ import java.io.EOFException; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Date; import java.util.EnumMap; import java.util.List; import java.util.zip.CheckedInputStream; @@ -81,12 +82,12 @@ import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DeprecatedUTF8; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; -import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.util.XMLUtils; import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException; @@ -109,7 +110,6 @@ import org.xml.sax.helpers.AttributesImpl; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; -import com.google.common.base.Strings; /** * Helper classes for reading the ops from an InputStream. @@ -165,11 +165,11 @@ public abstract class FSEditLogOp { inst.put(OP_RENAME_SNAPSHOT, new RenameSnapshotOp()); inst.put(OP_SET_GENSTAMP_V2, new SetGenstampV2Op()); inst.put(OP_ALLOCATE_BLOCK_ID, new AllocateBlockIdOp()); - inst.put(OP_ADD_PATH_BASED_CACHE_DIRECTIVE, + inst.put(OP_ADD_CACHE_DIRECTIVE, new AddCacheDirectiveInfoOp()); - inst.put(OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE, + inst.put(OP_MODIFY_CACHE_DIRECTIVE, new ModifyCacheDirectiveInfoOp()); - inst.put(OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE, + inst.put(OP_REMOVE_CACHE_DIRECTIVE, new RemoveCacheDirectiveInfoOp()); inst.put(OP_ADD_CACHE_POOL, new AddCachePoolOp()); inst.put(OP_MODIFY_CACHE_POOL, new ModifyCachePoolOp()); @@ -2874,12 +2874,12 @@ public abstract class FSEditLogOp { CacheDirectiveInfo directive; public AddCacheDirectiveInfoOp() { - super(OP_ADD_PATH_BASED_CACHE_DIRECTIVE); + super(OP_ADD_CACHE_DIRECTIVE); } static AddCacheDirectiveInfoOp getInstance(OpInstanceCache cache) { return (AddCacheDirectiveInfoOp) cache - .get(OP_ADD_PATH_BASED_CACHE_DIRECTIVE); + .get(OP_ADD_CACHE_DIRECTIVE); } public AddCacheDirectiveInfoOp setDirective( @@ -2889,6 +2889,7 @@ public abstract class FSEditLogOp { assert(directive.getPath() != null); assert(directive.getReplication() != null); assert(directive.getPool() != null); + assert(directive.getExpiration() != null); return this; } @@ -2898,11 +2899,13 @@ public abstract class FSEditLogOp { String path = FSImageSerialization.readString(in); short replication = FSImageSerialization.readShort(in); String pool = FSImageSerialization.readString(in); + long expiryTime = FSImageSerialization.readLong(in); directive = new CacheDirectiveInfo.Builder(). setId(id). setPath(new Path(path)). setReplication(replication). setPool(pool). + setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiryTime)). build(); readRpcIds(in, logVersion); } @@ -2913,6 +2916,8 @@ public abstract class FSEditLogOp { FSImageSerialization.writeString(directive.getPath().toUri().getPath(), out); FSImageSerialization.writeShort(directive.getReplication(), out); FSImageSerialization.writeString(directive.getPool(), out); + FSImageSerialization.writeLong( + directive.getExpiration().getMillis(), out); writeRpcIds(rpcClientId, rpcCallId, out); } @@ -2925,6 +2930,8 @@ public abstract class FSEditLogOp { XMLUtils.addSaxString(contentHandler, "REPLICATION", Short.toString(directive.getReplication())); XMLUtils.addSaxString(contentHandler, "POOL", directive.getPool()); + XMLUtils.addSaxString(contentHandler, "EXPIRATION", + "" + directive.getExpiration().getMillis()); appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId); } @@ -2935,6 +2942,8 @@ public abstract class FSEditLogOp { setPath(new Path(st.getValue("PATH"))). setReplication(Short.parseShort(st.getValue("REPLICATION"))). setPool(st.getValue("POOL")). + setExpiration(CacheDirectiveInfo.Expiration.newAbsolute( + Long.parseLong(st.getValue("EXPIRATION")))). build(); readRpcIdsFromXml(st); } @@ -2946,7 +2955,8 @@ public abstract class FSEditLogOp { builder.append("id=" + directive.getId() + ","); builder.append("path=" + directive.getPath().toUri().getPath() + ","); builder.append("replication=" + directive.getReplication() + ","); - builder.append("pool=" + directive.getPool()); + builder.append("pool=" + directive.getPool() + ","); + builder.append("expiration=" + directive.getExpiration().getMillis()); appendRpcIdsToString(builder, rpcClientId, rpcCallId); builder.append("]"); return builder.toString(); @@ -2961,12 +2971,12 @@ public abstract class FSEditLogOp { CacheDirectiveInfo directive; public ModifyCacheDirectiveInfoOp() { - super(OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE); + super(OP_MODIFY_CACHE_DIRECTIVE); } static ModifyCacheDirectiveInfoOp getInstance(OpInstanceCache cache) { return (ModifyCacheDirectiveInfoOp) cache - .get(OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE); + .get(OP_MODIFY_CACHE_DIRECTIVE); } public ModifyCacheDirectiveInfoOp setDirective( @@ -2991,7 +3001,12 @@ public abstract class FSEditLogOp { if ((flags & 0x4) != 0) { builder.setPool(FSImageSerialization.readString(in)); } - if ((flags & ~0x7) != 0) { + if ((flags & 0x8) != 0) { + builder.setExpiration( + CacheDirectiveInfo.Expiration.newAbsolute( + FSImageSerialization.readLong(in))); + } + if ((flags & ~0xF) != 0) { throw new IOException("unknown flags set in " + "ModifyCacheDirectiveInfoOp: " + flags); } @@ -3005,7 +3020,8 @@ public abstract class FSEditLogOp { byte flags = (byte)( ((directive.getPath() != null) ? 0x1 : 0) | ((directive.getReplication() != null) ? 0x2 : 0) | - ((directive.getPool() != null) ? 0x4 : 0) + ((directive.getPool() != null) ? 0x4 : 0) | + ((directive.getExpiration() != null) ? 0x8 : 0) ); out.writeByte(flags); if (directive.getPath() != null) { @@ -3018,6 +3034,10 @@ public abstract class FSEditLogOp { if (directive.getPool() != null) { FSImageSerialization.writeString(directive.getPool(), out); } + if (directive.getExpiration() != null) { + FSImageSerialization.writeLong(directive.getExpiration().getMillis(), + out); + } writeRpcIds(rpcClientId, rpcCallId, out); } @@ -3036,6 +3056,10 @@ public abstract class FSEditLogOp { if (directive.getPool() != null) { XMLUtils.addSaxString(contentHandler, "POOL", directive.getPool()); } + if (directive.getExpiration() != null) { + XMLUtils.addSaxString(contentHandler, "EXPIRATION", + "" + directive.getExpiration().getMillis()); + } appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId); } @@ -3056,6 +3080,11 @@ public abstract class FSEditLogOp { if (pool != null) { builder.setPool(pool); } + String expiryTime = st.getValueOrNull("EXPIRATION"); + if (expiryTime != null) { + builder.setExpiration(CacheDirectiveInfo.Expiration.newAbsolute( + Long.parseLong(expiryTime))); + } this.directive = builder.build(); readRpcIdsFromXml(st); } @@ -3075,6 +3104,10 @@ public abstract class FSEditLogOp { if (directive.getPool() != null) { builder.append(",").append("pool=").append(directive.getPool()); } + if (directive.getExpiration() != null) { + builder.append(",").append("expiration="). + append(directive.getExpiration().getMillis()); + } appendRpcIdsToString(builder, rpcClientId, rpcCallId); builder.append("]"); return builder.toString(); @@ -3089,12 +3122,12 @@ public abstract class FSEditLogOp { long id; public RemoveCacheDirectiveInfoOp() { - super(OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE); + super(OP_REMOVE_CACHE_DIRECTIVE); } static RemoveCacheDirectiveInfoOp getInstance(OpInstanceCache cache) { return (RemoveCacheDirectiveInfoOp) cache - .get(OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE); + .get(OP_REMOVE_CACHE_DIRECTIVE); } public RemoveCacheDirectiveInfoOp setId(long id) { @@ -3162,7 +3195,7 @@ public abstract class FSEditLogOp { @Override public void writeFields(DataOutputStream out) throws IOException { - info .writeTo(out); + info.writeTo(out); writeRpcIds(rpcClientId, rpcCallId, out); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java index bf27709e4d2..26252c198c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java @@ -64,12 +64,12 @@ public enum FSEditLogOpCodes { OP_DISALLOW_SNAPSHOT ((byte) 30), OP_SET_GENSTAMP_V2 ((byte) 31), OP_ALLOCATE_BLOCK_ID ((byte) 32), - OP_ADD_PATH_BASED_CACHE_DIRECTIVE ((byte) 33), - OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE ((byte) 34), + OP_ADD_CACHE_DIRECTIVE ((byte) 33), + OP_REMOVE_CACHE_DIRECTIVE ((byte) 34), OP_ADD_CACHE_POOL ((byte) 35), OP_MODIFY_CACHE_POOL ((byte) 36), OP_REMOVE_CACHE_POOL ((byte) 37), - OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE ((byte) 38); + OP_MODIFY_CACHE_DIRECTIVE ((byte) 38); private byte opCode; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java index d814fa560bd..7bfd90b9522 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java @@ -29,12 +29,13 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; +import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats; import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; -import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.server.namenode.CachePool; import org.apache.hadoop.hdfs.tools.TableListing.Justification; import org.apache.hadoop.ipc.RemoteException; @@ -132,7 +133,8 @@ public class CacheAdmin extends Configured implements Tool { @Override public String getShortUsage() { return "[" + getName() + - " -path -replication -pool ]\n"; + " -path -pool " + + "[-replication ] [-ttl ]]\n"; } @Override @@ -140,11 +142,15 @@ public class CacheAdmin extends Configured implements Tool { TableListing listing = getOptionDescriptionListing(); listing.addRow("", "A path to cache. The path can be " + "a directory or a file."); - listing.addRow("", "The cache replication factor to use. " + - "Defaults to 1."); listing.addRow("", "The pool to which the directive will be " + "added. You must have write permission on the cache pool " + "in order to add new directives."); + listing.addRow("", "The cache replication factor to use. " + + "Defaults to 1."); + listing.addRow("", "How long the directive is " + + "valid. Can be specified in minutes, hours, and days via e.g. " + + "30m, 4h, 2d. Valid units are [smhd]." + + " If unspecified, the directive never expires."); return getShortUsage() + "\n" + "Add a new cache directive.\n\n" + listing.toString(); @@ -152,33 +158,48 @@ public class CacheAdmin extends Configured implements Tool { @Override public int run(Configuration conf, List args) throws IOException { + CacheDirectiveInfo.Builder builder = new CacheDirectiveInfo.Builder(); + String path = StringUtils.popOptionWithArgument("-path", args); if (path == null) { System.err.println("You must specify a path with -path."); return 1; } - short replication = 1; - String replicationString = - StringUtils.popOptionWithArgument("-replication", args); - if (replicationString != null) { - replication = Short.parseShort(replicationString); - } + builder.setPath(new Path(path)); + String poolName = StringUtils.popOptionWithArgument("-pool", args); if (poolName == null) { System.err.println("You must specify a pool name with -pool."); return 1; } + builder.setPool(poolName); + + String replicationString = + StringUtils.popOptionWithArgument("-replication", args); + if (replicationString != null) { + Short replication = Short.parseShort(replicationString); + builder.setReplication(replication); + } + + String ttlString = StringUtils.popOptionWithArgument("-ttl", args); + if (ttlString != null) { + try { + long ttl = DFSUtil.parseRelativeTime(ttlString); + builder.setExpiration(CacheDirectiveInfo.Expiration.newRelative(ttl)); + } catch (IOException e) { + System.err.println( + "Error while parsing ttl value: " + e.getMessage()); + return 1; + } + } + if (!args.isEmpty()) { System.err.println("Can't understand argument: " + args.get(0)); return 1; } DistributedFileSystem dfs = getDFS(conf); - CacheDirectiveInfo directive = new CacheDirectiveInfo.Builder(). - setPath(new Path(path)). - setReplication(replication). - setPool(poolName). - build(); + CacheDirectiveInfo directive = builder.build(); try { long id = dfs.addCacheDirective(directive); System.out.println("Added cache directive " + id); @@ -261,7 +282,7 @@ public class CacheAdmin extends Configured implements Tool { public String getShortUsage() { return "[" + getName() + " -id [-path ] [-replication ] " + - "[-pool ] ]\n"; + "[-pool ] [-ttl ]]\n"; } @Override @@ -275,6 +296,10 @@ public class CacheAdmin extends Configured implements Tool { listing.addRow("", "The pool to which the directive will be " + "added. You must have write permission on the cache pool " + "in order to move a directive into it. (optional)"); + listing.addRow("", "How long the directive is " + + "valid. Can be specified in minutes, hours, and days via e.g. " + + "30m, 4h, 2d. Valid units are [smhd]." + + " If unspecified, the directive never expires."); return getShortUsage() + "\n" + "Modify a cache directive.\n\n" + listing.toString(); @@ -308,6 +333,19 @@ public class CacheAdmin extends Configured implements Tool { builder.setPool(poolName); modified = true; } + String ttlString = StringUtils.popOptionWithArgument("-ttl", args); + if (ttlString != null) { + long ttl; + try { + ttl = DFSUtil.parseRelativeTime(ttlString); + } catch (IOException e) { + System.err.println( + "Error while parsing ttl value: " + e.getMessage()); + return 1; + } + builder.setExpiration(CacheDirectiveInfo.Expiration.newRelative(ttl)); + modified = true; + } if (!args.isEmpty()) { System.err.println("Can't understand argument: " + args.get(0)); System.err.println("Usage is " + getShortUsage()); @@ -435,7 +473,8 @@ public class CacheAdmin extends Configured implements Tool { TableListing.Builder tableBuilder = new TableListing.Builder(). addField("ID", Justification.RIGHT). addField("POOL", Justification.LEFT). - addField("REPLICATION", Justification.RIGHT). + addField("REPL", Justification.RIGHT). + addField("EXPIRY", Justification.LEFT). addField("PATH", Justification.LEFT); if (printStats) { tableBuilder.addField("NEEDED", Justification.RIGHT). @@ -456,6 +495,14 @@ public class CacheAdmin extends Configured implements Tool { row.add("" + directive.getId()); row.add(directive.getPool()); row.add("" + directive.getReplication()); + String expiry; + if (directive.getExpiration().getMillis() == + CacheDirectiveInfo.Expiration.EXPIRY_NEVER) { + expiry = "never"; + } else { + expiry = directive.getExpiration().toString(); + } + row.add(expiry); row.add(directive.getPath().toUri().getPath()); if (printStats) { row.add("" + stats.getBytesNeeded()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index a2b1b735732..8fbd31571db 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -368,12 +368,19 @@ message CacheDirectiveInfoProto { optional string path = 2; optional uint32 replication = 3; optional string pool = 4; + optional CacheDirectiveInfoExpirationProto expiration = 5; +} + +message CacheDirectiveInfoExpirationProto { + required int64 millis = 1; + required bool isRelative = 2; } message CacheDirectiveStatsProto { required int64 bytesNeeded = 1; required int64 bytesCached = 2; required int64 filesAffected = 3; + required bool hasExpired = 4; } message AddCacheDirectiveRequestProto { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java index e3d0e0213f9..9ddf8314281 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java @@ -31,6 +31,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_A import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID; +import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains; import static org.hamcrest.CoreMatchers.not; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; @@ -62,6 +63,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Shell; import org.junit.Assume; import org.junit.Before; @@ -724,4 +726,43 @@ public class TestDFSUtil { DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY, DFSUtil.getSpnegoKeytabKey(conf, defaultKey)); } + + @Test(timeout=1000) + public void testDurationToString() throws Exception { + assertEquals("000:00:00:00", DFSUtil.durationToString(0)); + try { + DFSUtil.durationToString(-199); + } catch (IllegalArgumentException e) { + GenericTestUtils.assertExceptionContains("Invalid negative duration", e); + } + assertEquals("001:01:01:01", + DFSUtil.durationToString(((24*60*60)+(60*60)+(60)+1)*1000)); + assertEquals("000:23:59:59", + DFSUtil.durationToString(((23*60*60)+(59*60)+(59))*1000)); + } + + @Test(timeout=5000) + public void testRelativeTimeConversion() throws Exception { + try { + DFSUtil.parseRelativeTime("1"); + } catch (IOException e) { + assertExceptionContains("too short", e); + } + try { + DFSUtil.parseRelativeTime("1z"); + } catch (IOException e) { + assertExceptionContains("unknown time unit", e); + } + try { + DFSUtil.parseRelativeTime("yyz"); + } catch (IOException e) { + assertExceptionContains("is not a number", e); + } + assertEquals(61*1000, DFSUtil.parseRelativeTime("61s")); + assertEquals(61*60*1000, DFSUtil.parseRelativeTime("61m")); + assertEquals(0, DFSUtil.parseRelativeTime("0s")); + assertEquals(25*60*60*1000, DFSUtil.parseRelativeTime("25h")); + assertEquals(4*24*60*60*1000, DFSUtil.parseRelativeTime("4d")); + assertEquals(999*24*60*60*1000, DFSUtil.parseRelativeTime("999d")); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java index 2bcfe1965d3..6fd7881a94b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java @@ -33,10 +33,12 @@ import static org.junit.Assert.fail; import java.io.IOException; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; +import java.util.Date; import java.util.Iterator; import java.util.LinkedList; import java.util.List; +import org.apache.commons.lang.time.DateUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -54,13 +56,13 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; +import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats; import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; -import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; +import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration; import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type; -import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator; @@ -521,10 +523,14 @@ public class TestCacheDirectives { int numEntries = 10; String entryPrefix = "/party-"; long prevId = -1; + final Date expiry = new Date(); for (int i=0; i dit = dfs.listCacheDirectives(null); @@ -558,6 +564,7 @@ public class TestCacheDirectives { assertEquals(i+1, cd.getId().longValue()); assertEquals(entryPrefix + i, cd.getPath().toUri().getPath()); assertEquals(pool, cd.getPool()); + assertEquals(expiry.getTime(), cd.getExpiration().getMillis()); } assertFalse("Unexpected # of cache directives found", dit.hasNext()); @@ -1001,4 +1008,58 @@ public class TestCacheDirectives { info.getMode().toShort()); assertEquals("Mismatched weight", 99, (int)info.getWeight()); } + + @Test(timeout=60000) + public void testExpiry() throws Exception { + HdfsConfiguration conf = createCachingConf(); + MiniDFSCluster cluster = + new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build(); + try { + DistributedFileSystem dfs = cluster.getFileSystem(); + String pool = "pool1"; + dfs.addCachePool(new CachePoolInfo(pool)); + Path p = new Path("/mypath"); + DFSTestUtil.createFile(dfs, p, BLOCK_SIZE*2, (short)2, 0x999); + // Expire after test timeout + Date start = new Date(); + Date expiry = DateUtils.addSeconds(start, 120); + final long id = dfs.addCacheDirective(new CacheDirectiveInfo.Builder() + .setPath(p) + .setPool(pool) + .setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiry)) + .setReplication((short)2) + .build()); + waitForCachedBlocks(cluster.getNameNode(), 2, 4, "testExpiry:1"); + // Change it to expire sooner + dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id) + .setExpiration(Expiration.newRelative(0)).build()); + waitForCachedBlocks(cluster.getNameNode(), 0, 0, "testExpiry:2"); + RemoteIterator it = dfs.listCacheDirectives(null); + CacheDirectiveEntry ent = it.next(); + assertFalse(it.hasNext()); + Date entryExpiry = new Date(ent.getInfo().getExpiration().getMillis()); + assertTrue("Directive should have expired", + entryExpiry.before(new Date())); + // Change it back to expire later + dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id) + .setExpiration(Expiration.newRelative(120000)).build()); + waitForCachedBlocks(cluster.getNameNode(), 2, 4, "testExpiry:3"); + it = dfs.listCacheDirectives(null); + ent = it.next(); + assertFalse(it.hasNext()); + entryExpiry = new Date(ent.getInfo().getExpiration().getMillis()); + assertTrue("Directive should not have expired", + entryExpiry.after(new Date())); + // Verify that setting a negative TTL throws an error + try { + dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id) + .setExpiration(Expiration.newRelative(-1)).build()); + } catch (InvalidRequestException e) { + GenericTestUtils + .assertExceptionContains("Cannot set a negative expiration", e); + } + } finally { + cluster.shutdown(); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored index c1300fee9fae991d9d589c3192628cdc8c5378f8..3f37c932bc0e7a88bb56bef94c11f60fd5648ea1 100644 GIT binary patch delta 1476 zcmZ3k@21YPFS(ZhSQDyR6W=R7h0}E4QGh-w3ctZwGLnCuN10XbH5Kvpe zE+z59>D=17(oB`fPhx=Pvn8u+zA*7U#H`s!W+|`~1t}h%?9Heb4l;f*R^1%)-`>2* z1a&7n1Gj!!W=?8+Zhl#63e>1ISoLvUTUV(&`2wRF*bCdCYPV0ED9rly+^Uoplckwj zC9t@bFZ9;pTa$M%87pFuM!YS=3z!xUJ&k}vpfc4T2?Q~?IzWj>(O7#Xm-NW#hW=!MBytY8;i21Yz6 zN-v9&=OQUNn{CdUdD!w8L7{sY8WTX%+SxN$o?qWBJoz&_IN&d5K`qK6&m#4}mehvL z6F8zE7R`g3_MJ0><(#k^_vBF)(mWkc_~GyC*Ng+jv$=Rtz3_u;& zG&(h`O!_rhhEHAyRXT^oR<;b&B(~Ws-KitggwK!X`A`xC_$Av!n9yD-M!&bA!GIAIDRo~?)UhbeR=8R z9)3e`;NO4-{tfa{g(vHmy3>pm=c4yq`C?E( zPX=%i$&FM*PTnXeGr3QYk9W@Dh#pR;I#6_#W`7qsZ|1t zYX$6%rB9f=lgSvHq~M3b_!*N~m^H9T3hht#6q)S84EAU)(8-`c%Vi=j(1gwXFXc{t z4mFG$*)U!P`LFv~pdlhSnUPU^ax%+P6_8~?Kv6~ptl=T`#Oh7jyn`cCX?qn znOvU^na%ucd5oaY%?pE@*2SK|vOV9ZY4SIAaKPt5BPlP1Jc~3V?Az9Cp286Yv1lvA zBA{u%IWt%;o!xIgIh_k)(SE4Ze)23bo%Lw4*k)F4h(-CPaMN;lGFUbyw|gU3}K z=-GT{tikIbqO~V;vJI~VC_wTY1#_CohDmyNUh9y~#m*y4Y;=?sUGcKY2QKNuU0= zar~3-@TnwY^_*{=#Zpr>Xo_=y(m=-oo%{d)|9?@n54R~nMIB+XSPk;akTv)=xq%;R znE3ya*{?r&9ls$c2n&GbfbvfP19|c3HFN5Xr<214CQV)^z{PUQ>&Lgr-v#6u!DSa0 zqU@TypI>HjxFDb5gm;Rj@=&*XL1^YLhibQbi9rRu8F)Qh+=Ao%5_41i@>5b_Iz{Cw tHpp>875PAE2B540$Qw=H;NDm+sLc3f;zSX_-r}!c#Gwj()sC`R0|4YY$?E_B diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml index ed2f2b74aca..fd2ba09e368 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml @@ -13,8 +13,8 @@ 2 1 - 1381946377599 - 4f37c0db7342fb35 + 1386314182272 + 39c9f9e9c1e88712 @@ -24,8 +24,8 @@ 3 2 - 1381946377609 - 471d4ddd00402ba6 + 1386314182280 + 2b35e969a178a2f6 @@ -37,18 +37,18 @@ 16386 /file_create_u\0001;F431 1 - 1381255179312 - 1381255179312 + 1385622983286 + 1385622983286 512 - DFSClient_NONMAPREDUCE_-134124999_1 + DFSClient_NONMAPREDUCE_-1208536327_1 127.0.0.1 andrew supergroup 420 - 27ac79f0-d378-4933-824b-c2a188968d97 - 8 + 26a8071a-18f8-42ce-ad7e-75692493e45c + 6 @@ -59,8 +59,8 @@ 0 /file_create_u\0001;F431 1 - 1381255179355 - 1381255179312 + 1385622983323 + 1385622983286 512 @@ -78,9 +78,9 @@ 0 /file_create_u\0001;F431 /file_moved - 1381255179373 - 27ac79f0-d378-4933-824b-c2a188968d97 - 10 + 1385622983331 + 26a8071a-18f8-42ce-ad7e-75692493e45c + 8 @@ -89,9 +89,9 @@ 7 0 /file_moved - 1381255179397 - 27ac79f0-d378-4933-824b-c2a188968d97 - 11 + 1385622983340 + 26a8071a-18f8-42ce-ad7e-75692493e45c + 9 @@ -101,7 +101,7 @@ 0 16387 /directory_mkdir - 1381255179424 + 1385622983351 andrew supergroup @@ -136,8 +136,8 @@ 12 /directory_mkdir snapshot1 - 27ac79f0-d378-4933-824b-c2a188968d97 - 16 + 26a8071a-18f8-42ce-ad7e-75692493e45c + 14 @@ -147,8 +147,8 @@ /directory_mkdir snapshot1 snapshot2 - 27ac79f0-d378-4933-824b-c2a188968d97 - 17 + 26a8071a-18f8-42ce-ad7e-75692493e45c + 15 @@ -157,8 +157,8 @@ 14 /directory_mkdir snapshot2 - 27ac79f0-d378-4933-824b-c2a188968d97 - 18 + 26a8071a-18f8-42ce-ad7e-75692493e45c + 16 @@ -169,18 +169,18 @@ 16388 /file_create_u\0001;F431 1 - 1381255179522 - 1381255179522 + 1385622983397 + 1385622983397 512 - DFSClient_NONMAPREDUCE_-134124999_1 + DFSClient_NONMAPREDUCE_-1208536327_1 127.0.0.1 andrew supergroup 420 - 27ac79f0-d378-4933-824b-c2a188968d97 - 19 + 26a8071a-18f8-42ce-ad7e-75692493e45c + 17 @@ -191,8 +191,8 @@ 0 /file_create_u\0001;F431 1 - 1381255179531 - 1381255179522 + 1385622983402 + 1385622983397 512 @@ -253,10 +253,10 @@ 0 /file_create_u\0001;F431 /file_moved - 1381255179602 + 1385622983438 NONE - 27ac79f0-d378-4933-824b-c2a188968d97 - 26 + 26a8071a-18f8-42ce-ad7e-75692493e45c + 24 @@ -267,18 +267,18 @@ 16389 /file_concat_target 1 - 1381255179619 - 1381255179619 + 1385622983445 + 1385622983445 512 - DFSClient_NONMAPREDUCE_-134124999_1 + DFSClient_NONMAPREDUCE_-1208536327_1 127.0.0.1 andrew supergroup 420 - 27ac79f0-d378-4933-824b-c2a188968d97 - 28 + 26a8071a-18f8-42ce-ad7e-75692493e45c + 26 @@ -388,8 +388,8 @@ 0 /file_concat_target 1 - 1381255179862 - 1381255179619 + 1385622983524 + 1385622983445 512 @@ -423,18 +423,18 @@ 16390 /file_concat_0 1 - 1381255179876 - 1381255179876 + 1385622983530 + 1385622983530 512 - DFSClient_NONMAPREDUCE_-134124999_1 + DFSClient_NONMAPREDUCE_-1208536327_1 127.0.0.1 andrew supergroup 420 - 27ac79f0-d378-4933-824b-c2a188968d97 - 41 + 26a8071a-18f8-42ce-ad7e-75692493e45c + 39 @@ -544,8 +544,8 @@ 0 /file_concat_0 1 - 1381255179957 - 1381255179876 + 1385622983582 + 1385622983530 512 @@ -579,18 +579,18 @@ 16391 /file_concat_1 1 - 1381255179967 - 1381255179967 + 1385622983593 + 1385622983593 512 - DFSClient_NONMAPREDUCE_-134124999_1 + DFSClient_NONMAPREDUCE_-1208536327_1 127.0.0.1 andrew supergroup 420 - 27ac79f0-d378-4933-824b-c2a188968d97 - 53 + 26a8071a-18f8-42ce-ad7e-75692493e45c + 51 @@ -700,8 +700,8 @@ 0 /file_concat_1 1 - 1381255180085 - 1381255179967 + 1385622983655 + 1385622983593 512 @@ -733,13 +733,13 @@ 56 0 /file_concat_target - 1381255180099 + 1385622983667 /file_concat_0 /file_concat_1 - 27ac79f0-d378-4933-824b-c2a188968d97 - 64 + 26a8071a-18f8-42ce-ad7e-75692493e45c + 62 @@ -750,15 +750,15 @@ 16392 /file_symlink /file_concat_target - 1381255180116 - 1381255180116 + 1385622983683 + 1385622983683 andrew supergroup 511 - 27ac79f0-d378-4933-824b-c2a188968d97 - 65 + 26a8071a-18f8-42ce-ad7e-75692493e45c + 63 @@ -771,11 +771,11 @@ andrew JobTracker - 1381255180128 - 1381859980128 + 1385622983698 + 1386227783698 2 - 1381341580128 + 1385709383698 @@ -788,11 +788,11 @@ andrew JobTracker - 1381255180128 - 1381859980128 + 1385622983698 + 1386227783698 2 - 1381341580177 + 1385709383746 @@ -805,8 +805,8 @@ andrew JobTracker - 1381255180128 - 1381859980128 + 1385622983698 + 1386227783698 2 @@ -822,8 +822,8 @@ 493 100 - 27ac79f0-d378-4933-824b-c2a188968d97 - 75 + 26a8071a-18f8-42ce-ad7e-75692493e45c + 67 @@ -835,39 +835,40 @@ party 448 1989 - 27ac79f0-d378-4933-824b-c2a188968d97 - 76 + 26a8071a-18f8-42ce-ad7e-75692493e45c + 68 - OP_ADD_PATH_BASED_CACHE_DIRECTIVE + OP_ADD_CACHE_DIRECTIVE 63 1 /bar 1 poolparty - 27ac79f0-d378-4933-824b-c2a188968d97 - 77 + -1 + 26a8071a-18f8-42ce-ad7e-75692493e45c + 69 - OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE + OP_MODIFY_CACHE_DIRECTIVE 64 1 - 2 - - -2 + /bar2 + 26a8071a-18f8-42ce-ad7e-75692493e45c + 70 - OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE + OP_REMOVE_CACHE_DIRECTIVE 65 1 - 27ac79f0-d378-4933-824b-c2a188968d97 - 78 + 26a8071a-18f8-42ce-ad7e-75692493e45c + 71 @@ -875,8 +876,8 @@ 66 poolparty - 27ac79f0-d378-4933-824b-c2a188968d97 - 79 + 26a8071a-18f8-42ce-ad7e-75692493e45c + 72 @@ -887,18 +888,18 @@ 16393 /hard-lease-recovery-test 1 - 1381255180288 - 1381255180288 + 1385622983896 + 1385622983896 512 - DFSClient_NONMAPREDUCE_-134124999_1 + DFSClient_NONMAPREDUCE_-1208536327_1 127.0.0.1 andrew supergroup 420 - 27ac79f0-d378-4933-824b-c2a188968d97 - 74 + 26a8071a-18f8-42ce-ad7e-75692493e45c + 73 @@ -936,7 +937,7 @@ /hard-lease-recovery-test 1073741834 - 0 + 11 1010 @@ -954,23 +955,7 @@ OP_REASSIGN_LEASE 73 - DFSClient_NONMAPREDUCE_-134124999_1 - /hard-lease-recovery-test - HDFS_NameNode - - - - OP_SET_GENSTAMP_V2 - - 74 - 1012 - - - - OP_REASSIGN_LEASE - - 75 - HDFS_NameNode + DFSClient_NONMAPREDUCE_-1208536327_1 /hard-lease-recovery-test HDFS_NameNode @@ -978,20 +963,20 @@ OP_CLOSE - 76 + 74 0 0 /hard-lease-recovery-test 1 - 1381255185142 - 1381255180288 + 1385622986265 + 1385622983896 512 1073741834 11 - 1012 + 1011 andrew @@ -1003,7 +988,7 @@ OP_END_LOG_SEGMENT - 77 + 75 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml index a6828b0bb35..20584927d52 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml @@ -165,9 +165,9 @@ Testing creating cache paths -addPool pool1 - -addDirective -path /foo -pool pool1 - -addDirective -path /bar -pool pool1 - -addDirective -path /baz -replication 2 -pool pool1 + -addDirective -path /foo -pool pool1 -ttl 2d + -addDirective -path /bar -pool pool1 -ttl 24h + -addDirective -path /baz -replication 2 -pool pool1 -ttl 60m -listDirectives -pool pool1 @@ -180,15 +180,15 @@ SubstringComparator - 1 pool1 1 /foo + 1 pool1 1 SubstringComparator - 2 pool1 1 /bar + 2 pool1 1 SubstringComparator - 3 pool1 2 /baz + 3 pool1 2 @@ -234,11 +234,11 @@ SubstringComparator - 8 pool2 1 /baz + 8 pool2 1 never /baz SubstringComparator - 9 pool2 1 /buz + 9 pool2 1 never /buz @@ -265,11 +265,11 @@ SubstringComparator - 10 pool1 1 /foo + 10 pool1 1 never /foo SubstringComparator - 12 pool2 1 /foo + 12 pool2 1 never /foo @@ -296,7 +296,7 @@ SubstringComparator - 16 pool2 1 /foo + 16 pool2 1 never /foo @@ -320,7 +320,7 @@ SubstringComparator - 19 pool1 1 /bar + 19 pool1 1 never /bar @@ -349,11 +349,11 @@ SubstringComparator - 22 pool1 1 /bar + 22 pool1 1 never /bar SubstringComparator - 24 pool2 1 /bar + 24 pool2 1 never /bar @@ -379,7 +379,7 @@ SubstringComparator - 25 pool1 1 /bar3 + 25 pool1 1 never /bar3 From 7545d8bf996e5d060d5d613bf769ec52a62d428b Mon Sep 17 00:00:00 2001 From: Sanford Ryza Date: Fri, 29 Nov 2013 19:02:41 +0000 Subject: [PATCH 22/27] YARN-1241. In Fair Scheduler, maxRunningApps does not work for non-leaf queues. (Sandy Ryza) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1546623 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 + .../scheduler/fair/AppSchedulable.java | 18 +- .../scheduler/fair/FSLeafQueue.java | 111 +++++++---- .../scheduler/fair/FSParentQueue.java | 14 ++ .../scheduler/fair/FSQueue.java | 10 + .../scheduler/fair/FSSchedulerApp.java | 8 +- .../scheduler/fair/FairScheduler.java | 84 +++----- .../scheduler/fair/QueueManager.java | 3 +- .../dao/FairSchedulerLeafQueueInfo.java | 3 +- .../capacity/TestCapacityScheduler.java | 15 +- .../scheduler/fair/TestFSSchedulerApp.java | 7 +- .../scheduler/fair/TestFairScheduler.java | 183 ++++++++++++++++-- .../scheduler/fifo/TestFifoScheduler.java | 3 +- 13 files changed, 323 insertions(+), 139 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index eab6fcf5eb7..8147df09a6a 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -126,6 +126,9 @@ Release 2.3.0 - UNRELEASED YARN-1239. Modified ResourceManager state-store implementations to start storing version numbers. (Jian He via vinodkv) + YARN-1241. In Fair Scheduler, maxRunningApps does not work for non-leaf + queues. (Sandy Ryza) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AppSchedulable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AppSchedulable.java index e5991b28335..275061a5fa1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AppSchedulable.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AppSchedulable.java @@ -51,7 +51,6 @@ public class AppSchedulable extends Schedulable { private FairScheduler scheduler; private FSSchedulerApp app; private Resource demand = Resources.createResource(0); - private boolean runnable = false; // everyone starts as not runnable private long startTime; private static RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); private static final Log LOG = LogFactory.getLog(AppSchedulable.class); @@ -61,7 +60,7 @@ public class AppSchedulable extends Schedulable { public AppSchedulable(FairScheduler scheduler, FSSchedulerApp app, FSLeafQueue queue) { this.scheduler = scheduler; this.app = app; - this.startTime = System.currentTimeMillis(); + this.startTime = scheduler.getClock().getTime(); this.queue = queue; this.containerTokenSecretManager = scheduler. getContainerTokenSecretManager(); @@ -138,18 +137,6 @@ public class AppSchedulable extends Schedulable { return p; } - /** - * Is this application runnable? Runnable means that the user and queue - * application counts are within configured quotas. - */ - public boolean getRunnable() { - return runnable; - } - - public void setRunnable(boolean runnable) { - this.runnable = runnable; - } - /** * Create and return a container object reflecting an allocation for the * given appliction on the given node with the given capability and @@ -281,9 +268,6 @@ public class AppSchedulable extends Schedulable { unreserve(priority, node); return Resources.none(); } - } else { - // If this app is over quota, don't schedule anything - if (!(getRunnable())) { return Resources.none(); } } Collection prioritiesToTry = (reserved) ? diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java index 3064903d426..a6fbedbc52d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java @@ -22,7 +22,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Comparator; -import java.util.Iterator; import java.util.List; import org.apache.commons.logging.Log; @@ -42,7 +41,9 @@ public class FSLeafQueue extends FSQueue { private static final Log LOG = LogFactory.getLog( FSLeafQueue.class.getName()); - private final List appScheds = + private final List runnableAppScheds = // apps that are runnable + new ArrayList(); + private final List nonRunnableAppScheds = new ArrayList(); private final FairScheduler scheduler; @@ -62,29 +63,51 @@ public class FSLeafQueue extends FSQueue { this.lastTimeAtHalfFairShare = scheduler.getClock().getTime(); } - public void addApp(FSSchedulerApp app) { + public void addApp(FSSchedulerApp app, boolean runnable) { AppSchedulable appSchedulable = new AppSchedulable(scheduler, app, this); app.setAppSchedulable(appSchedulable); - appScheds.add(appSchedulable); + if (runnable) { + runnableAppScheds.add(appSchedulable); + } else { + nonRunnableAppScheds.add(appSchedulable); + } } // for testing void addAppSchedulable(AppSchedulable appSched) { - appScheds.add(appSched); + runnableAppScheds.add(appSched); } - public void removeApp(FSSchedulerApp app) { - for (Iterator it = appScheds.iterator(); it.hasNext();) { - AppSchedulable appSched = it.next(); - if (appSched.getApp() == app) { - it.remove(); - break; - } + /** + * Removes the given app from this queue. + * @return whether or not the app was runnable + */ + public boolean removeApp(FSSchedulerApp app) { + if (runnableAppScheds.remove(app.getAppSchedulable())) { + return true; + } else if (nonRunnableAppScheds.remove(app.getAppSchedulable())) { + return false; + } else { + throw new IllegalStateException("Given app to remove " + app + + " does not exist in queue " + this); } } - public Collection getAppSchedulables() { - return appScheds; + public void makeAppRunnable(AppSchedulable appSched) { + if (!nonRunnableAppScheds.remove(appSched)) { + throw new IllegalStateException("Can't make app runnable that does not " + + "already exist in queue as non-runnable" + appSched); + } + + runnableAppScheds.add(appSched); + } + + public Collection getRunnableAppSchedulables() { + return runnableAppScheds; + } + + public List getNonRunnableAppSchedulables() { + return nonRunnableAppScheds; } @Override @@ -98,7 +121,7 @@ public class FSLeafQueue extends FSQueue { @Override public void recomputeShares() { - policy.computeShares(getAppSchedulables(), getFairShare()); + policy.computeShares(getRunnableAppSchedulables(), getFairShare()); } @Override @@ -109,7 +132,10 @@ public class FSLeafQueue extends FSQueue { @Override public Resource getResourceUsage() { Resource usage = Resources.createResource(0); - for (AppSchedulable app : appScheds) { + for (AppSchedulable app : runnableAppScheds) { + Resources.addTo(usage, app.getResourceUsage()); + } + for (AppSchedulable app : nonRunnableAppScheds) { Resources.addTo(usage, app.getResourceUsage()); } return usage; @@ -121,25 +147,35 @@ public class FSLeafQueue extends FSQueue { // Limit demand to maxResources Resource maxRes = queueMgr.getMaxResources(getName()); demand = Resources.createResource(0); - for (AppSchedulable sched : appScheds) { - sched.updateDemand(); - Resource toAdd = sched.getDemand(); - if (LOG.isDebugEnabled()) { - LOG.debug("Counting resource from " + sched.getName() + " " + toAdd - + "; Total resource consumption for " + getName() + " now " - + demand); - } - demand = Resources.add(demand, toAdd); - demand = Resources.componentwiseMin(demand, maxRes); + for (AppSchedulable sched : runnableAppScheds) { if (Resources.equals(demand, maxRes)) { break; } + updateDemandForApp(sched, maxRes); + } + for (AppSchedulable sched : nonRunnableAppScheds) { + if (Resources.equals(demand, maxRes)) { + break; + } + updateDemandForApp(sched, maxRes); } if (LOG.isDebugEnabled()) { LOG.debug("The updated demand for " + getName() + " is " + demand + "; the max is " + maxRes); } } + + private void updateDemandForApp(AppSchedulable sched, Resource maxRes) { + sched.updateDemand(); + Resource toAdd = sched.getDemand(); + if (LOG.isDebugEnabled()) { + LOG.debug("Counting resource from " + sched.getName() + " " + toAdd + + "; Total resource consumption for " + getName() + " now " + + demand); + } + demand = Resources.add(demand, toAdd); + demand = Resources.componentwiseMin(demand, maxRes); + } @Override public Resource assignContainer(FSSchedulerNode node) { @@ -153,17 +189,15 @@ public class FSLeafQueue extends FSQueue { } Comparator comparator = policy.getComparator(); - Collections.sort(appScheds, comparator); - for (AppSchedulable sched : appScheds) { - if (sched.getRunnable()) { - if (SchedulerAppUtils.isBlacklisted(sched.getApp(), node, LOG)) { - continue; - } + Collections.sort(runnableAppScheds, comparator); + for (AppSchedulable sched : runnableAppScheds) { + if (SchedulerAppUtils.isBlacklisted(sched.getApp(), node, LOG)) { + continue; + } - assigned = sched.assignContainer(node); - if (!assigned.equals(Resources.none())) { - break; - } + assigned = sched.assignContainer(node); + if (!assigned.equals(Resources.none())) { + break; } } return assigned; @@ -205,4 +239,9 @@ public class FSLeafQueue extends FSQueue { public void setLastTimeAtHalfFairShare(long lastTimeAtHalfFairShare) { this.lastTimeAtHalfFairShare = lastTimeAtHalfFairShare; } + + @Override + public int getNumRunnableApps() { + return runnableAppScheds.size(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java index 1b51c02065b..45d2811919f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java @@ -43,6 +43,7 @@ public class FSParentQueue extends FSQueue { new ArrayList(); private final QueueManager queueMgr; private Resource demand = Resources.createResource(0); + private int runnableApps; public FSParentQueue(String name, QueueManager queueMgr, FairScheduler scheduler, FSParentQueue parent) { @@ -171,4 +172,17 @@ public class FSParentQueue extends FSQueue { } super.policy = policy; } + + public void incrementRunnableApps() { + runnableApps++; + } + + public void decrementRunnableApps() { + runnableApps--; + } + + @Override + public int getNumRunnableApps() { + return runnableApps; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java index 9f3c4c97c5d..94c8f70a9aa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java @@ -72,6 +72,10 @@ public abstract class FSQueue extends Schedulable implements Queue { public SchedulingPolicy getPolicy() { return policy; } + + public FSParentQueue getParent() { + return parent; + } protected void throwPolicyDoesnotApplyException(SchedulingPolicy policy) throws AllocationConfigurationException { @@ -164,6 +168,12 @@ public abstract class FSQueue extends Schedulable implements Queue { */ public abstract Collection getChildQueues(); + /** + * Return the number of apps for which containers can be allocated. + * Includes apps in subqueues. + */ + public abstract int getNumRunnableApps(); + /** * Helper method to check if the queue should attempt assigning resources * diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerApp.java index caf2a97d712..10913b17ea9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerApp.java @@ -44,7 +44,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerFini import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplication; import org.apache.hadoop.yarn.util.resource.Resources; @@ -62,7 +61,7 @@ public class FSSchedulerApp extends SchedulerApplication { final Map preemptionMap = new HashMap(); public FSSchedulerApp(ApplicationAttemptId applicationAttemptId, - String user, Queue queue, ActiveUsersManager activeUsersManager, + String user, FSLeafQueue queue, ActiveUsersManager activeUsersManager, RMContext rmContext) { super(applicationAttemptId, user, queue, activeUsersManager, rmContext); } @@ -327,4 +326,9 @@ public class FSSchedulerApp extends SchedulerApplication { public Set getPreemptionContainers() { return preemptionMap.keySet(); } + + @Override + public FSLeafQueue getQueue() { + return (FSLeafQueue)super.getQueue(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index 7d6c6a50523..a882113c004 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -190,9 +190,13 @@ public class FairScheduler implements ResourceScheduler { // heartbeat protected int maxAssign; // Max containers to assign per heartbeat + @VisibleForTesting + final MaxRunningAppsEnforcer maxRunningEnforcer; + public FairScheduler() { clock = new SystemClock(); queueMgr = new QueueManager(this); + maxRunningEnforcer = new MaxRunningAppsEnforcer(queueMgr); } private void validateConf(Configuration conf) { @@ -272,7 +276,6 @@ public class FairScheduler implements ResourceScheduler { */ protected synchronized void update() { queueMgr.reloadAllocsIfNecessary(); // Relaod alloc file - updateRunnability(); // Set job runnability based on user/queue limits updatePreemptionVariables(); // Determine if any queues merit preemption FSQueue rootQueue = queueMgr.getRootQueue(); @@ -377,7 +380,7 @@ public class FairScheduler implements ResourceScheduler { for (FSLeafQueue sched : scheds) { if (Resources.greaterThan(RESOURCE_CALCULATOR, clusterCapacity, sched.getResourceUsage(), sched.getFairShare())) { - for (AppSchedulable as : sched.getAppSchedulables()) { + for (AppSchedulable as : sched.getRunnableAppSchedulables()) { for (RMContainer c : as.getApp().getLiveContainers()) { runningContainers.add(c); apps.put(c, as.getApp()); @@ -505,63 +508,23 @@ public class FairScheduler implements ResourceScheduler { return resToPreempt; } - /** - * This updates the runnability of all apps based on whether or not any - * users/queues have exceeded their capacity. - */ - private void updateRunnability() { - List apps = new ArrayList(); - - // Start by marking everything as not runnable - for (FSLeafQueue leafQueue : queueMgr.getLeafQueues()) { - for (AppSchedulable a : leafQueue.getAppSchedulables()) { - a.setRunnable(false); - apps.add(a); - } - } - // Create a list of sorted jobs in order of start time and priority - Collections.sort(apps, new FifoAppComparator()); - // Mark jobs as runnable in order of start time and priority, until - // user or queue limits have been reached. - Map userApps = new HashMap(); - Map queueApps = new HashMap(); - - for (AppSchedulable app : apps) { - String user = app.getApp().getUser(); - String queue = app.getApp().getQueueName(); - int userCount = userApps.containsKey(user) ? userApps.get(user) : 0; - int queueCount = queueApps.containsKey(queue) ? queueApps.get(queue) : 0; - if (userCount < queueMgr.getUserMaxApps(user) && - queueCount < queueMgr.getQueueMaxApps(queue)) { - userApps.put(user, userCount + 1); - queueApps.put(queue, queueCount + 1); - app.setRunnable(true); - } - } - } - public RMContainerTokenSecretManager getContainerTokenSecretManager() { return rmContext.getContainerTokenSecretManager(); } // synchronized for sizeBasedWeight public synchronized ResourceWeights getAppWeight(AppSchedulable app) { - if (!app.getRunnable()) { - // Job won't launch tasks, but don't return 0 to avoid division errors - return ResourceWeights.NEUTRAL; - } else { - double weight = 1.0; - if (sizeBasedWeight) { - // Set weight based on current memory demand - weight = Math.log1p(app.getDemand().getMemory()) / Math.log(2); - } - weight *= app.getPriority().getPriority(); - if (weightAdjuster != null) { - // Run weight through the user-supplied weightAdjuster - weight = weightAdjuster.adjustWeight(app, weight); - } - return new ResourceWeights((float)weight); + double weight = 1.0; + if (sizeBasedWeight) { + // Set weight based on current memory demand + weight = Math.log1p(app.getDemand().getMemory()) / Math.log(2); } + weight *= app.getPriority().getPriority(); + if (weightAdjuster != null) { + // Run weight through the user-supplied weightAdjuster + weight = weightAdjuster.adjustWeight(app, weight); + } + return new ResourceWeights((float)weight); } @Override @@ -662,7 +625,14 @@ public class FairScheduler implements ResourceScheduler { return; } - queue.addApp(schedulerApp); + boolean runnable = maxRunningEnforcer.canAppBeRunnable(queue, user); + queue.addApp(schedulerApp, runnable); + if (runnable) { + maxRunningEnforcer.trackRunnableApp(schedulerApp); + } else { + maxRunningEnforcer.trackNonRunnableApp(schedulerApp); + } + queue.getMetrics().submitApp(user, applicationAttemptId.getAttemptId()); applications.put(applicationAttemptId, schedulerApp); @@ -736,8 +706,14 @@ public class FairScheduler implements ResourceScheduler { // Inform the queue FSLeafQueue queue = queueMgr.getLeafQueue(application.getQueue() .getQueueName(), false); - queue.removeApp(application); + boolean wasRunnable = queue.removeApp(application); + if (wasRunnable) { + maxRunningEnforcer.updateRunnabilityOnAppRemoval(application); + } else { + maxRunningEnforcer.untrackNonRunnableApp(application); + } + // Remove from our data-structure applications.remove(applicationAttemptId); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java index 4dd9c461001..95dfa4aff6e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java @@ -89,7 +89,8 @@ public class QueueManager { private final Map queues = new HashMap(); private FSParentQueue rootQueue; - private volatile QueueManagerInfo info = new QueueManagerInfo(); + @VisibleForTesting + volatile QueueManagerInfo info = new QueueManagerInfo(); @VisibleForTesting volatile QueuePlacementPolicy placementPolicy; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerLeafQueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerLeafQueueInfo.java index c7ae9181f43..5cdfb2abdf7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerLeafQueueInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerLeafQueueInfo.java @@ -39,7 +39,7 @@ public class FairSchedulerLeafQueueInfo extends FairSchedulerQueueInfo { public FairSchedulerLeafQueueInfo(FSLeafQueue queue, FairScheduler scheduler) { super(queue, scheduler); - Collection apps = queue.getAppSchedulables(); + Collection apps = queue.getRunnableAppSchedulables(); for (AppSchedulable app : apps) { if (app.getApp().isPending()) { numPendingApps++; @@ -47,6 +47,7 @@ public class FairSchedulerLeafQueueInfo extends FairSchedulerQueueInfo { numActiveApps++; } } + numPendingApps += queue.getNonRunnableAppSchedulables().size(); } public int getNumActiveApplications() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index 627fae8522c..a5628febee8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -596,23 +596,24 @@ public class TestCapacityScheduler { public void testConcurrentAccessOnApplications() throws Exception { CapacityScheduler cs = new CapacityScheduler(); verifyConcurrentAccessOnApplications( - cs.applications, FiCaSchedulerApp.class); + cs.applications, FiCaSchedulerApp.class, Queue.class); } - public static + public static void verifyConcurrentAccessOnApplications( - final Map applications, Class clazz) + final Map applications, Class appClazz, + final Class queueClazz) throws Exception { final int size = 10000; final ApplicationId appId = ApplicationId.newInstance(0, 0); - final Constructor ctor = clazz.getDeclaredConstructor( - ApplicationAttemptId.class, String.class, Queue.class, + final Constructor ctor = appClazz.getDeclaredConstructor( + ApplicationAttemptId.class, String.class, queueClazz, ActiveUsersManager.class, RMContext.class); ApplicationAttemptId appAttemptId0 = ApplicationAttemptId.newInstance(appId, 0); applications.put(appAttemptId0, ctor.newInstance( - appAttemptId0, null, mock(Queue.class), null, null)); + appAttemptId0, null, mock(queueClazz), null, null)); assertNotNull(applications.get(appAttemptId0)); // Imitating the thread of scheduler that will add and remove apps @@ -627,7 +628,7 @@ public class TestCapacityScheduler { = ApplicationAttemptId.newInstance(appId, i); try { applications.put(appAttemptId, ctor.newInstance( - appAttemptId, null, mock(Queue.class), null, null)); + appAttemptId, null, mock(queueClazz), null, null)); } catch (Exception e) { failed.set(true); finished.set(true); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSSchedulerApp.java index 491235e7605..c651cb66bd8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSSchedulerApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSSchedulerApp.java @@ -24,7 +24,6 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue; import org.apache.hadoop.yarn.util.Clock; import org.junit.Test; import org.mockito.Mockito; @@ -53,7 +52,7 @@ public class TestFSSchedulerApp { @Test public void testDelayScheduling() { - Queue queue = Mockito.mock(Queue.class); + FSLeafQueue queue = Mockito.mock(FSLeafQueue.class); Priority prio = Mockito.mock(Priority.class); Mockito.when(prio.getPriority()).thenReturn(1); double nodeLocalityThreshold = .5; @@ -110,7 +109,7 @@ public class TestFSSchedulerApp { @Test public void testDelaySchedulingForContinuousScheduling() throws InterruptedException { - Queue queue = Mockito.mock(Queue.class); + FSLeafQueue queue = Mockito.mock(FSLeafQueue.class); Priority prio = Mockito.mock(Priority.class); Mockito.when(prio.getPriority()).thenReturn(1); @@ -170,7 +169,7 @@ public class TestFSSchedulerApp { * no tin use), the least restrictive locality level is returned. */ public void testLocalityLevelWithoutDelays() { - Queue queue = Mockito.mock(Queue.class); + FSLeafQueue queue = Mockito.mock(FSLeafQueue.class); Priority prio = Mockito.mock(Priority.class); Mockito.when(prio.getPriority()).thenReturn(1); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index 14daf33aaf6..e739a1452f4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -100,7 +100,7 @@ import com.google.common.collect.Sets; public class TestFairScheduler { - private class MockClock implements Clock { + static class MockClock implements Clock { private long time = 0; @Override public long getTime() { @@ -613,9 +613,9 @@ public class TestFairScheduler { appAttemptId, "default", "user1"); scheduler.handle(appAddedEvent); assertEquals(1, scheduler.getQueueManager().getLeafQueue("user1", true) - .getAppSchedulables().size()); + .getRunnableAppSchedulables().size()); assertEquals(0, scheduler.getQueueManager().getLeafQueue("default", true) - .getAppSchedulables().size()); + .getRunnableAppSchedulables().size()); assertEquals("root.user1", rmApp.getQueue()); conf.set(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE, "false"); @@ -625,11 +625,11 @@ public class TestFairScheduler { createAppAttemptId(2, 1), "default", "user2"); scheduler.handle(appAddedEvent2); assertEquals(1, scheduler.getQueueManager().getLeafQueue("user1", true) - .getAppSchedulables().size()); + .getRunnableAppSchedulables().size()); assertEquals(1, scheduler.getQueueManager().getLeafQueue("default", true) - .getAppSchedulables().size()); + .getRunnableAppSchedulables().size()); assertEquals(0, scheduler.getQueueManager().getLeafQueue("user2", true) - .getAppSchedulables().size()); + .getRunnableAppSchedulables().size()); } @Test @@ -821,7 +821,7 @@ public class TestFairScheduler { // That queue should have one app assertEquals(1, scheduler.getQueueManager().getLeafQueue("user1", true) - .getAppSchedulables().size()); + .getRunnableAppSchedulables().size()); AppRemovedSchedulerEvent appRemovedEvent1 = new AppRemovedSchedulerEvent( createAppAttemptId(1, 1), RMAppAttemptState.FINISHED); @@ -831,7 +831,7 @@ public class TestFairScheduler { // Queue should have no apps assertEquals(0, scheduler.getQueueManager().getLeafQueue("user1", true) - .getAppSchedulables().size()); + .getRunnableAppSchedulables().size()); } @Test @@ -2400,7 +2400,158 @@ public class TestFairScheduler { public void testConcurrentAccessOnApplications() throws Exception { FairScheduler fs = new FairScheduler(); TestCapacityScheduler.verifyConcurrentAccessOnApplications( - fs.applications, FSSchedulerApp.class); + fs.applications, FSSchedulerApp.class, FSLeafQueue.class); + } + + + private void verifyAppRunnable(ApplicationAttemptId attId, boolean runnable) { + FSSchedulerApp app = scheduler.applications.get(attId); + FSLeafQueue queue = app.getQueue(); + Collection runnableApps = + queue.getRunnableAppSchedulables(); + Collection nonRunnableApps = + queue.getNonRunnableAppSchedulables(); + assertEquals(runnable, runnableApps.contains(app.getAppSchedulable())); + assertEquals(!runnable, nonRunnableApps.contains(app.getAppSchedulable())); + } + + private void verifyQueueNumRunnable(String queueName, int numRunnableInQueue, + int numNonRunnableInQueue) { + FSLeafQueue queue = scheduler.getQueueManager().getLeafQueue(queueName, false); + assertEquals(numRunnableInQueue, + queue.getRunnableAppSchedulables().size()); + assertEquals(numNonRunnableInQueue, + queue.getNonRunnableAppSchedulables().size()); + } + + @Test + public void testUserAndQueueMaxRunningApps() throws Exception { + Configuration conf = createConfiguration(); + conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); + scheduler.reinitialize(conf, resourceManager.getRMContext()); + + PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); + out.println(""); + out.println(""); + out.println(""); + out.println("2"); + out.println(""); + out.println(""); + out.println("1"); + out.println(""); + out.println(""); + out.close(); + + QueueManager queueManager = scheduler.getQueueManager(); + queueManager.initialize(); + + // exceeds no limits + ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue1", "user1"); + verifyAppRunnable(attId1, true); + verifyQueueNumRunnable("queue1", 1, 0); + // exceeds user limit + ApplicationAttemptId attId2 = createSchedulingRequest(1024, "queue2", "user1"); + verifyAppRunnable(attId2, false); + verifyQueueNumRunnable("queue2", 0, 1); + // exceeds no limits + ApplicationAttemptId attId3 = createSchedulingRequest(1024, "queue1", "user2"); + verifyAppRunnable(attId3, true); + verifyQueueNumRunnable("queue1", 2, 0); + // exceeds queue limit + ApplicationAttemptId attId4 = createSchedulingRequest(1024, "queue1", "user2"); + verifyAppRunnable(attId4, false); + verifyQueueNumRunnable("queue1", 2, 1); + + // Remove app 1 and both app 2 and app 4 should becomes runnable in its place + AppRemovedSchedulerEvent appRemovedEvent1 = new AppRemovedSchedulerEvent( + attId1, RMAppAttemptState.FINISHED); + scheduler.handle(appRemovedEvent1); + verifyAppRunnable(attId2, true); + verifyQueueNumRunnable("queue2", 1, 0); + verifyAppRunnable(attId4, true); + verifyQueueNumRunnable("queue1", 2, 0); + + // A new app to queue1 should not be runnable + ApplicationAttemptId attId5 = createSchedulingRequest(1024, "queue1", "user2"); + verifyAppRunnable(attId5, false); + verifyQueueNumRunnable("queue1", 2, 1); + } + + @Test + public void testMaxRunningAppsHierarchicalQueues() throws Exception { + Configuration conf = createConfiguration(); + conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); + scheduler.reinitialize(conf, resourceManager.getRMContext()); + MockClock clock = new MockClock(); + scheduler.setClock(clock); + + PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); + out.println(""); + out.println(""); + out.println(""); + out.println(" 3"); + out.println(" "); + out.println(" "); + out.println(" "); + out.println(" 1"); + out.println(" "); + out.println(""); + out.println(""); + out.close(); + + QueueManager queueManager = scheduler.getQueueManager(); + queueManager.initialize(); + + // exceeds no limits + ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue1.sub1", "user1"); + verifyAppRunnable(attId1, true); + verifyQueueNumRunnable("queue1.sub1", 1, 0); + clock.tick(10); + // exceeds no limits + ApplicationAttemptId attId2 = createSchedulingRequest(1024, "queue1.sub3", "user1"); + verifyAppRunnable(attId2, true); + verifyQueueNumRunnable("queue1.sub3", 1, 0); + clock.tick(10); + // exceeds no limits + ApplicationAttemptId attId3 = createSchedulingRequest(1024, "queue1.sub2", "user1"); + verifyAppRunnable(attId3, true); + verifyQueueNumRunnable("queue1.sub2", 1, 0); + clock.tick(10); + // exceeds queue1 limit + ApplicationAttemptId attId4 = createSchedulingRequest(1024, "queue1.sub2", "user1"); + verifyAppRunnable(attId4, false); + verifyQueueNumRunnable("queue1.sub2", 1, 1); + clock.tick(10); + // exceeds sub3 limit + ApplicationAttemptId attId5 = createSchedulingRequest(1024, "queue1.sub3", "user1"); + verifyAppRunnable(attId5, false); + verifyQueueNumRunnable("queue1.sub3", 1, 1); + clock.tick(10); + + // Even though the app was removed from sub3, the app from sub2 gets to go + // because it came in first + AppRemovedSchedulerEvent appRemovedEvent1 = new AppRemovedSchedulerEvent( + attId2, RMAppAttemptState.FINISHED); + scheduler.handle(appRemovedEvent1); + verifyAppRunnable(attId4, true); + verifyQueueNumRunnable("queue1.sub2", 2, 0); + verifyAppRunnable(attId5, false); + verifyQueueNumRunnable("queue1.sub3", 0, 1); + + // Now test removal of a non-runnable app + AppRemovedSchedulerEvent appRemovedEvent2 = new AppRemovedSchedulerEvent( + attId5, RMAppAttemptState.KILLED); + scheduler.handle(appRemovedEvent2); + assertEquals(0, scheduler.maxRunningEnforcer.usersNonRunnableApps + .get("user1").size()); + // verify app gone in queue accounting + verifyQueueNumRunnable("queue1.sub3", 0, 0); + // verify it doesn't become runnable when there would be space for it + AppRemovedSchedulerEvent appRemovedEvent3 = new AppRemovedSchedulerEvent( + attId4, RMAppAttemptState.FINISHED); + scheduler.handle(appRemovedEvent3); + verifyQueueNumRunnable("queue1.sub2", 1, 0); + verifyQueueNumRunnable("queue1.sub3", 0, 0); } @Test (timeout = 10000) @@ -2499,23 +2650,23 @@ public class TestFairScheduler { // Should get put into jerry createSchedulingRequest(1024, "jerry", "someuser"); - assertEquals(1, jerryQueue.getAppSchedulables().size()); + assertEquals(1, jerryQueue.getRunnableAppSchedulables().size()); // Should get forced into default createSchedulingRequest(1024, "newqueue", "someuser"); - assertEquals(1, jerryQueue.getAppSchedulables().size()); - assertEquals(1, defaultQueue.getAppSchedulables().size()); + assertEquals(1, jerryQueue.getRunnableAppSchedulables().size()); + assertEquals(1, defaultQueue.getRunnableAppSchedulables().size()); // Would get put into someuser because of user-as-default-queue, but should // be forced into default createSchedulingRequest(1024, "default", "someuser"); - assertEquals(1, jerryQueue.getAppSchedulables().size()); - assertEquals(2, defaultQueue.getAppSchedulables().size()); + assertEquals(1, jerryQueue.getRunnableAppSchedulables().size()); + assertEquals(2, defaultQueue.getRunnableAppSchedulables().size()); // Should get put into jerry because of user-as-default-queue createSchedulingRequest(1024, "default", "jerry"); - assertEquals(2, jerryQueue.getAppSchedulables().size()); - assertEquals(2, defaultQueue.getAppSchedulables().size()); + assertEquals(2, jerryQueue.getRunnableAppSchedulables().size()); + assertEquals(2, defaultQueue.getRunnableAppSchedulables().size()); } @SuppressWarnings("resource") diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java index 00cb71ebe49..525fbefde72 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java @@ -56,6 +56,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.Task; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppReport; @@ -518,7 +519,7 @@ public class TestFifoScheduler { public void testConcurrentAccessOnApplications() throws Exception { FifoScheduler fs = new FifoScheduler(); TestCapacityScheduler.verifyConcurrentAccessOnApplications( - fs.applications, FiCaSchedulerApp.class); + fs.applications, FiCaSchedulerApp.class, Queue.class); } @SuppressWarnings("resource") From 61b6ed73f8a5a75951c7b355c673459874eef7c3 Mon Sep 17 00:00:00 2001 From: Sanford Ryza Date: Fri, 29 Nov 2013 19:08:48 +0000 Subject: [PATCH 23/27] YARN-1241: Include missing files git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1546625 13f79535-47bb-0310-9956-ffa450edef68 --- .../fair/MaxRunningAppsEnforcer.java | 302 ++++++++++++++++++ .../fair/TestMaxRunningAppsEnforcer.java | 152 +++++++++ 2 files changed, 454 insertions(+) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestMaxRunningAppsEnforcer.java diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java new file mode 100644 index 00000000000..e601086b8c4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java @@ -0,0 +1,302 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.PriorityQueue; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ArrayListMultimap; +import com.google.common.collect.ListMultimap; + +/** + * Handles tracking and enforcement for user and queue maxRunningApps + * constraints + */ +public class MaxRunningAppsEnforcer { + private final QueueManager queueMgr; + + // Tracks the number of running applications by user. + private final Map usersNumRunnableApps; + @VisibleForTesting + final ListMultimap usersNonRunnableApps; + + public MaxRunningAppsEnforcer(QueueManager queueMgr) { + this.queueMgr = queueMgr; + this.usersNumRunnableApps = new HashMap(); + this.usersNonRunnableApps = ArrayListMultimap.create(); + } + + /** + * Checks whether making the application runnable would exceed any + * maxRunningApps limits. + */ + public boolean canAppBeRunnable(FSQueue queue, String user) { + Integer userNumRunnable = usersNumRunnableApps.get(user); + if (userNumRunnable == null) { + userNumRunnable = 0; + } + if (userNumRunnable >= queueMgr.getUserMaxApps(user)) { + return false; + } + // Check queue and all parent queues + while (queue != null) { + int queueMaxApps = queueMgr.getQueueMaxApps(queue.getName()); + if (queue.getNumRunnableApps() >= queueMaxApps) { + return false; + } + queue = queue.getParent(); + } + + return true; + } + + /** + * Tracks the given new runnable app for purposes of maintaining max running + * app limits. + */ + public void trackRunnableApp(FSSchedulerApp app) { + String user = app.getUser(); + FSLeafQueue queue = app.getQueue(); + // Increment running counts for all parent queues + FSParentQueue parent = queue.getParent(); + while (parent != null) { + parent.incrementRunnableApps(); + parent = parent.getParent(); + } + + Integer userNumRunnable = usersNumRunnableApps.get(user); + usersNumRunnableApps.put(user, (userNumRunnable == null ? 0 + : userNumRunnable) + 1); + } + + /** + * Tracks the given new non runnable app so that it can be made runnable when + * it would not violate max running app limits. + */ + public void trackNonRunnableApp(FSSchedulerApp app) { + String user = app.getUser(); + usersNonRunnableApps.put(user, app.getAppSchedulable()); + } + + /** + * Updates the relevant tracking variables after a runnable app with the given + * queue and user has been removed. Checks to see whether any other applications + * are now runnable and makes them so. + * + * Runs in O(n log(n)) where n is the number of queues that are under the + * highest queue that went from having no slack to having slack. + */ + public void updateRunnabilityOnAppRemoval(FSSchedulerApp app) { + // Update usersRunnableApps + String user = app.getUser(); + int newUserNumRunning = usersNumRunnableApps.get(user) - 1; + if (newUserNumRunning == 0) { + usersNumRunnableApps.remove(user); + } else { + usersNumRunnableApps.put(user, newUserNumRunning); + } + + // Update runnable app bookkeeping for queues: + // childqueueX might have no pending apps itself, but if a queue higher up + // in the hierarchy parentqueueY has a maxRunningApps set, an app completion + // in childqueueX could allow an app in some other distant child of + // parentqueueY to become runnable. + // An app removal will only possibly allow another app to become runnable if + // the queue was already at its max before the removal. + // Thus we find the ancestor queue highest in the tree for which the app + // that was at its maxRunningApps before the removal. + FSLeafQueue queue = app.getQueue(); + FSQueue highestQueueWithAppsNowRunnable = (queue.getNumRunnableApps() == + queueMgr.getQueueMaxApps(queue.getName()) - 1) ? queue : null; + FSParentQueue parent = queue.getParent(); + while (parent != null) { + if (parent.getNumRunnableApps() == queueMgr.getQueueMaxApps(parent + .getName())) { + highestQueueWithAppsNowRunnable = parent; + } + parent.decrementRunnableApps(); + parent = parent.getParent(); + } + + List> appsNowMaybeRunnable = + new ArrayList>(); + + // Compile lists of apps which may now be runnable + // We gather lists instead of building a set of all non-runnable apps so + // that this whole operation can be O(number of queues) instead of + // O(number of apps) + if (highestQueueWithAppsNowRunnable != null) { + gatherPossiblyRunnableAppLists(highestQueueWithAppsNowRunnable, + appsNowMaybeRunnable); + } + if (newUserNumRunning == queueMgr.getUserMaxApps(user) - 1) { + List userWaitingApps = usersNonRunnableApps.get(user); + if (userWaitingApps != null) { + appsNowMaybeRunnable.add(userWaitingApps); + } + } + + // Scan through and check whether this means that any apps are now runnable + Iterator iter = new MultiListStartTimeIterator( + appsNowMaybeRunnable); + FSSchedulerApp prev = null; + int numNowRunnable = 0; + while (iter.hasNext()) { + FSSchedulerApp next = iter.next(); + if (next == prev) { + continue; + } + + if (canAppBeRunnable(next.getQueue(), next.getUser())) { + trackRunnableApp(next); + AppSchedulable appSched = next.getAppSchedulable(); + next.getQueue().makeAppRunnable(appSched); + if (!usersNonRunnableApps.remove(next.getUser(), appSched)) { + throw new IllegalStateException("Waiting app " + next + + " expected to be in usersNonRunnableApps"); + } + + // No more than one app per list will be able to be made runnable, so + // we can stop looking after we've found that many + if (numNowRunnable >= appsNowMaybeRunnable.size()) { + break; + } + } + + prev = next; + } + } + + /** + * Stops tracking the given non-runnable app + */ + public void untrackNonRunnableApp(FSSchedulerApp app) { + usersNonRunnableApps.remove(app.getUser(), app.getAppSchedulable()); + } + + /** + * Traverses the queue hierarchy under the given queue to gather all lists + * of non-runnable applications. + */ + private void gatherPossiblyRunnableAppLists(FSQueue queue, + List> appLists) { + if (queue.getNumRunnableApps() < queueMgr.getQueueMaxApps(queue.getName())) { + if (queue instanceof FSLeafQueue) { + appLists.add(((FSLeafQueue)queue).getNonRunnableAppSchedulables()); + } else { + for (FSQueue child : queue.getChildQueues()) { + gatherPossiblyRunnableAppLists(child, appLists); + } + } + } + } + + /** + * Takes a list of lists, each of which is ordered by start time, and returns + * their elements in order of start time. + * + * We maintain positions in each of the lists. Each next() call advances + * the position in one of the lists. We maintain a heap that orders lists + * by the start time of the app in the current position in that list. + * This allows us to pick which list to advance in O(log(num lists)) instead + * of O(num lists) time. + */ + private static class MultiListStartTimeIterator implements + Iterator { + + private List[] appLists; + private int[] curPositionsInAppLists; + private PriorityQueue appListsByCurStartTime; + + @SuppressWarnings("unchecked") + public MultiListStartTimeIterator(List> appListList) { + appLists = appListList.toArray(new List[appListList.size()]); + curPositionsInAppLists = new int[appLists.length]; + appListsByCurStartTime = new PriorityQueue(); + for (int i = 0; i < appLists.length; i++) { + long time = appLists[i].isEmpty() ? Long.MAX_VALUE : appLists[i].get(0) + .getStartTime(); + appListsByCurStartTime.add(new IndexAndTime(i, time)); + } + } + + @Override + public boolean hasNext() { + return !appListsByCurStartTime.isEmpty() + && appListsByCurStartTime.peek().time != Long.MAX_VALUE; + } + + @Override + public FSSchedulerApp next() { + IndexAndTime indexAndTime = appListsByCurStartTime.remove(); + int nextListIndex = indexAndTime.index; + AppSchedulable next = appLists[nextListIndex] + .get(curPositionsInAppLists[nextListIndex]); + curPositionsInAppLists[nextListIndex]++; + + if (curPositionsInAppLists[nextListIndex] < appLists[nextListIndex].size()) { + indexAndTime.time = appLists[nextListIndex] + .get(curPositionsInAppLists[nextListIndex]).getStartTime(); + } else { + indexAndTime.time = Long.MAX_VALUE; + } + appListsByCurStartTime.add(indexAndTime); + + return next.getApp(); + } + + @Override + public void remove() { + throw new UnsupportedOperationException("Remove not supported"); + } + + private static class IndexAndTime implements Comparable { + public int index; + public long time; + + public IndexAndTime(int index, long time) { + this.index = index; + this.time = time; + } + + @Override + public int compareTo(IndexAndTime o) { + return time < o.time ? -1 : (time > o.time ? 1 : 0); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof IndexAndTime)) { + return false; + } + IndexAndTime other = (IndexAndTime)o; + return other.time == time; + } + + @Override + public int hashCode() { + return (int)time; + } + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestMaxRunningAppsEnforcer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestMaxRunningAppsEnforcer.java new file mode 100644 index 00000000000..20f6e3d7757 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestMaxRunningAppsEnforcer.java @@ -0,0 +1,152 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; + +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.junit.Before; +import org.junit.Test; + +public class TestMaxRunningAppsEnforcer { + private QueueManager queueManager; + private Map queueMaxApps; + private Map userMaxApps; + private MaxRunningAppsEnforcer maxAppsEnforcer; + private int appNum; + private TestFairScheduler.MockClock clock; + + @Before + public void setup() throws Exception { + clock = new TestFairScheduler.MockClock(); + FairScheduler scheduler = mock(FairScheduler.class); + when(scheduler.getConf()).thenReturn( + new FairSchedulerConfiguration(new Configuration())); + when(scheduler.getClock()).thenReturn(clock); + + queueManager = new QueueManager(scheduler); + queueManager.initialize(); + + queueMaxApps = queueManager.info.queueMaxApps; + userMaxApps = queueManager.info.userMaxApps; + maxAppsEnforcer = new MaxRunningAppsEnforcer(queueManager); + appNum = 0; + } + + private FSSchedulerApp addApp(FSLeafQueue queue, String user) { + ApplicationId appId = ApplicationId.newInstance(0l, appNum++); + ApplicationAttemptId attId = ApplicationAttemptId.newInstance(appId, 0); + boolean runnable = maxAppsEnforcer.canAppBeRunnable(queue, user); + FSSchedulerApp app = new FSSchedulerApp(attId, user, queue, null, null); + queue.addApp(app, runnable); + if (runnable) { + maxAppsEnforcer.trackRunnableApp(app); + } else { + maxAppsEnforcer.trackNonRunnableApp(app); + } + return app; + } + + private void removeApp(FSSchedulerApp app) { + app.getQueue().removeApp(app); + maxAppsEnforcer.updateRunnabilityOnAppRemoval(app); + } + + @Test + public void testRemoveDoesNotEnableAnyApp() { + FSLeafQueue leaf1 = queueManager.getLeafQueue("root.queue1", true); + FSLeafQueue leaf2 = queueManager.getLeafQueue("root.queue2", true); + queueMaxApps.put("root", 2); + queueMaxApps.put("root.queue1", 1); + queueMaxApps.put("root.queue2", 1); + FSSchedulerApp app1 = addApp(leaf1, "user"); + addApp(leaf2, "user"); + addApp(leaf2, "user"); + assertEquals(1, leaf1.getRunnableAppSchedulables().size()); + assertEquals(1, leaf2.getRunnableAppSchedulables().size()); + assertEquals(1, leaf2.getNonRunnableAppSchedulables().size()); + removeApp(app1); + assertEquals(0, leaf1.getRunnableAppSchedulables().size()); + assertEquals(1, leaf2.getRunnableAppSchedulables().size()); + assertEquals(1, leaf2.getNonRunnableAppSchedulables().size()); + } + + @Test + public void testRemoveEnablesAppOnCousinQueue() { + FSLeafQueue leaf1 = queueManager.getLeafQueue("root.queue1.subqueue1.leaf1", true); + FSLeafQueue leaf2 = queueManager.getLeafQueue("root.queue1.subqueue2.leaf2", true); + queueMaxApps.put("root.queue1", 2); + FSSchedulerApp app1 = addApp(leaf1, "user"); + addApp(leaf2, "user"); + addApp(leaf2, "user"); + assertEquals(1, leaf1.getRunnableAppSchedulables().size()); + assertEquals(1, leaf2.getRunnableAppSchedulables().size()); + assertEquals(1, leaf2.getNonRunnableAppSchedulables().size()); + removeApp(app1); + assertEquals(0, leaf1.getRunnableAppSchedulables().size()); + assertEquals(2, leaf2.getRunnableAppSchedulables().size()); + assertEquals(0, leaf2.getNonRunnableAppSchedulables().size()); + } + + @Test + public void testRemoveEnablesOneByQueueOneByUser() { + FSLeafQueue leaf1 = queueManager.getLeafQueue("root.queue1.leaf1", true); + FSLeafQueue leaf2 = queueManager.getLeafQueue("root.queue1.leaf2", true); + queueMaxApps.put("root.queue1.leaf1", 2); + userMaxApps.put("user1", 1); + FSSchedulerApp app1 = addApp(leaf1, "user1"); + addApp(leaf1, "user2"); + addApp(leaf1, "user3"); + addApp(leaf2, "user1"); + assertEquals(2, leaf1.getRunnableAppSchedulables().size()); + assertEquals(1, leaf1.getNonRunnableAppSchedulables().size()); + assertEquals(1, leaf2.getNonRunnableAppSchedulables().size()); + removeApp(app1); + assertEquals(2, leaf1.getRunnableAppSchedulables().size()); + assertEquals(1, leaf2.getRunnableAppSchedulables().size()); + assertEquals(0, leaf1.getNonRunnableAppSchedulables().size()); + assertEquals(0, leaf2.getNonRunnableAppSchedulables().size()); + } + + @Test + public void testRemoveEnablingOrderedByStartTime() { + FSLeafQueue leaf1 = queueManager.getLeafQueue("root.queue1.subqueue1.leaf1", true); + FSLeafQueue leaf2 = queueManager.getLeafQueue("root.queue1.subqueue2.leaf2", true); + queueMaxApps.put("root.queue1", 2); + FSSchedulerApp app1 = addApp(leaf1, "user"); + addApp(leaf2, "user"); + addApp(leaf2, "user"); + clock.tick(20); + addApp(leaf1, "user"); + assertEquals(1, leaf1.getRunnableAppSchedulables().size()); + assertEquals(1, leaf2.getRunnableAppSchedulables().size()); + assertEquals(1, leaf1.getNonRunnableAppSchedulables().size()); + assertEquals(1, leaf2.getNonRunnableAppSchedulables().size()); + removeApp(app1); + assertEquals(0, leaf1.getRunnableAppSchedulables().size()); + assertEquals(2, leaf2.getRunnableAppSchedulables().size()); + assertEquals(0, leaf2.getNonRunnableAppSchedulables().size()); + } + +} From 7b60e94c098ac74eebdb6371c0e211345f17f00c Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Mon, 2 Dec 2013 11:03:00 +0000 Subject: [PATCH 24/27] HADOOP-10135 writes to swift fs over partition size leave temp files and empty output file git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1546959 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../apache/hadoop/fs/swift/util/SwiftObjectPath.java | 6 +++++- .../apache/hadoop/fs/swift/TestSwiftObjectPath.java | 12 ++++++++++++ 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 8dff925d861..dbb5b725530 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -452,6 +452,9 @@ Release 2.3.0 - UNRELEASED HADOOP-10107. Server.getNumOpenConnections may throw NPE. (Kihwal Lee via jing9) + HADOOP-10135 writes to swift fs over partition size leave temp files and + empty output file (David Dobbins via stevel) + Release 2.2.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftObjectPath.java b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftObjectPath.java index 264f6d9cc18..791509a9e03 100644 --- a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftObjectPath.java +++ b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftObjectPath.java @@ -51,8 +51,12 @@ public final class SwiftObjectPath { */ public SwiftObjectPath(String container, String object) { + if (object == null) { + throw new IllegalArgumentException("object name can't be null"); + } + this.container = container; - this.object = object; + this.object = URI.create(object).getPath(); uriPath = buildUriPath(); } diff --git a/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftObjectPath.java b/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftObjectPath.java index cb6ba09ef44..338763cfe84 100644 --- a/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftObjectPath.java +++ b/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftObjectPath.java @@ -71,6 +71,18 @@ public class TestSwiftObjectPath implements SwiftTestConstants { assertEquals(expected, actual); } + @Test(timeout = SWIFT_TEST_TIMEOUT) + public void testHandleUrlAsPath() throws Exception { + final String hostPart = "swift://container.service1"; + final String pathPart = "/home/user/files/file1"; + final String uriString = hostPart + pathPart; + + final SwiftObjectPath expected = new SwiftObjectPath(uriString, pathPart); + final SwiftObjectPath actual = new SwiftObjectPath(uriString, uriString); + + assertEquals(expected, actual); + } + @Test(timeout = SWIFT_TEST_TIMEOUT) public void testParseAuthenticatedUrl() throws Exception { final String pathString = "swift://container.service1/v2/AUTH_00345h34l93459y4/home/tom/documents/finance.docx"; From 13331a6863184d862f1252cb1084e4b1e12f10a0 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Mon, 2 Dec 2013 16:31:03 +0000 Subject: [PATCH 25/27] HDFS-5581. NameNodeFsck should use only one instance of BlockPlacementPolicy (vinay via cmccabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1547088 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../apache/hadoop/hdfs/server/namenode/NamenodeFsck.java | 9 ++++++--- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 10027631333..07f7d72f369 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -664,6 +664,9 @@ Release 2.2.1 - UNRELEASED HDFS-5568. Support includeSnapshots option with Fsck command. (Vinayakumar B via umamahesh) + HDFS-5581. NameNodeFsck should use only one instance of + BlockPlacementPolicy. (vinay via cmccabe) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index 7ed77585853..dfd01b5c39b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -144,6 +144,8 @@ public class NamenodeFsck { private final PrintWriter out; private List snapshottableDirs = null; + private BlockPlacementPolicy bpPolicy; + /** * Filesystem checker. * @param conf configuration (namenode config) @@ -166,6 +168,8 @@ public class NamenodeFsck { this.totalDatanodes = totalDatanodes; this.minReplication = minReplication; this.remoteAddress = remoteAddress; + this.bpPolicy = BlockPlacementPolicy.getInstance(conf, null, + networktopology); for (Iterator it = pmap.keySet().iterator(); it.hasNext();) { String key = it.next(); @@ -399,9 +403,8 @@ public class NamenodeFsck { locs.length + " replica(s)."); } // verify block placement policy - BlockPlacementStatus blockPlacementStatus = - BlockPlacementPolicy.getInstance(conf, null, networktopology). - verifyBlockPlacement(path, lBlk, targetFileReplication); + BlockPlacementStatus blockPlacementStatus = bpPolicy + .verifyBlockPlacement(path, lBlk, targetFileReplication); if (!blockPlacementStatus.isPlacementPolicySatisfied()) { res.numMisReplicatedBlocks++; misReplicatedPerFile++; From 58f73acdc17bb87ee50940b69331e8b6b7149e42 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Mon, 2 Dec 2013 16:40:52 +0000 Subject: [PATCH 26/27] move HDFS-5581 to 2.3 git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1547094 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 07f7d72f369..a6ba0c13acc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -561,6 +561,9 @@ Release 2.3.0 - UNRELEASED HDFS-5561. FSNameSystem#getNameJournalStatus() in JMX should return plain text instead of HTML. (Haohui Mai via jing9) + HDFS-5581. NameNodeFsck should use only one instance of + BlockPlacementPolicy. (vinay via cmccabe) + OPTIMIZATIONS HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn) @@ -664,9 +667,6 @@ Release 2.2.1 - UNRELEASED HDFS-5568. Support includeSnapshots option with Fsck command. (Vinayakumar B via umamahesh) - HDFS-5581. NameNodeFsck should use only one instance of - BlockPlacementPolicy. (vinay via cmccabe) - OPTIMIZATIONS BUG FIXES From 08d6213083891eb7e1661d1b5f56121161868a9e Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Mon, 2 Dec 2013 17:28:53 +0000 Subject: [PATCH 27/27] HADOOP-10130. RawLocalFS pread does not track FileSystem Statistics (Binglin Chang via Colin Patrick McCabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1547117 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 ++ .../apache/hadoop/fs/RawLocalFileSystem.java | 43 ++++--------------- .../hadoop/fs/FCStatisticsBaseTest.java | 1 + .../hadoop/fs/TestLocalFsFCStatistics.java | 3 +- 4 files changed, 14 insertions(+), 36 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index dbb5b725530..d3186bfc370 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -516,6 +516,9 @@ Release 2.2.1 - UNRELEASED HADOOP-9114. After defined the dfs.checksum.type as the NULL, write file and hflush will through java.lang.ArrayIndexOutOfBoundsException (Sathish via umamahesh) + HADOOP-10130. RawLocalFS::LocalFSFileInputStream.pread does not track + FS::Statistics (Binglin Chang via Colin Patrick McCabe) + Release 2.2.0 - 2013-10-13 INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java index c2e2458fe0c..7d70ada73b4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java @@ -83,39 +83,6 @@ public class RawLocalFileSystem extends FileSystem { setConf(conf); } - class TrackingFileInputStream extends FileInputStream { - public TrackingFileInputStream(File f) throws IOException { - super(f); - } - - @Override - public int read() throws IOException { - int result = super.read(); - if (result != -1) { - statistics.incrementBytesRead(1); - } - return result; - } - - @Override - public int read(byte[] data) throws IOException { - int result = super.read(data); - if (result != -1) { - statistics.incrementBytesRead(result); - } - return result; - } - - @Override - public int read(byte[] data, int offset, int length) throws IOException { - int result = super.read(data, offset, length); - if (result != -1) { - statistics.incrementBytesRead(result); - } - return result; - } - } - /******************************************************* * For open()'s FSInputStream. *******************************************************/ @@ -124,7 +91,7 @@ public class RawLocalFileSystem extends FileSystem { private long position; public LocalFSFileInputStream(Path f) throws IOException { - this.fis = new TrackingFileInputStream(pathToFile(f)); + fis = new FileInputStream(pathToFile(f)); } @Override @@ -159,6 +126,7 @@ public class RawLocalFileSystem extends FileSystem { int value = fis.read(); if (value >= 0) { this.position++; + statistics.incrementBytesRead(1); } return value; } catch (IOException e) { // unexpected exception @@ -172,6 +140,7 @@ public class RawLocalFileSystem extends FileSystem { int value = fis.read(b, off, len); if (value > 0) { this.position += value; + statistics.incrementBytesRead(value); } return value; } catch (IOException e) { // unexpected exception @@ -184,7 +153,11 @@ public class RawLocalFileSystem extends FileSystem { throws IOException { ByteBuffer bb = ByteBuffer.wrap(b, off, len); try { - return fis.getChannel().read(bb, position); + int value = fis.getChannel().read(bb, position); + if (value > 0) { + statistics.incrementBytesRead(value); + } + return value; } catch (IOException e) { throw new FSError(e); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java index cc80e7ced87..90337a64339 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java @@ -91,6 +91,7 @@ public abstract class FCStatisticsBaseTest { FSDataInputStream fstr = fc.open(filePath); byte[] buf = new byte[blockSize]; int bytesRead = fstr.read(buf, 0, blockSize); + fstr.read(0, buf, 0, blockSize); Assert.assertEquals(blockSize, bytesRead); verifyReadBytes(stats); verifyWrittenBytes(stats); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFsFCStatistics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFsFCStatistics.java index 8a4552299bc..fe26f73a2e8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFsFCStatistics.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFsFCStatistics.java @@ -47,7 +47,8 @@ public class TestLocalFsFCStatistics extends FCStatisticsBaseTest { @Override protected void verifyReadBytes(Statistics stats) { - Assert.assertEquals(blockSize, stats.getBytesRead()); + // one blockSize for read, one for pread + Assert.assertEquals(2*blockSize, stats.getBytesRead()); } @Override