From 950dea86f4e945fbf376ef3843c0101a2ca569b8 Mon Sep 17 00:00:00 2001 From: Anu Engineer Date: Thu, 31 May 2018 11:28:42 -0700 Subject: [PATCH 001/113] HDDS-112. OzoneShell should support commands with url without scheme. Contributed by Lokesh Jain. --- .../acceptance/ozone-shell.robot | 21 ++++++++ .../hadoop/ozone/web/ozShell/Handler.java | 49 ++++++++++++++++++- 2 files changed, 68 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-shell.robot b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-shell.robot index 0f01b8d8f0e..7ff491057f0 100644 --- a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-shell.robot +++ b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-shell.robot @@ -189,6 +189,27 @@ Test ozone shell (RpcClient without hostname) Execute on datanode ozone oz -deleteBucket o3:///hive/bb1 Execute on datanode ozone oz -deleteVolume o3:///hive -user bilbo +Test ozone shell (no scheme - RpcClient used by default) + Execute on datanode ozone oz -createVolume /hive -user bilbo -quota 100TB -root + ${result} = Execute on datanode ozone oz -listVolume / -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")' + Should contain ${result} createdOn + Execute on datanode ozone oz -updateVolume /hive -user bill -quota 10TB + ${result} = Execute on datanode ozone oz -infoVolume /hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name' + Should Be Equal ${result} bill + ${result} = Execute on datanode ozone oz -infoVolume /hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size' + Should Be Equal ${result} 10 + Execute on datanode ozone oz -createBucket /hive/bb1 + ${result} = Execute on datanode ozone oz -infoBucket /hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType' + Should Be Equal ${result} DISK + ${result} = Execute on datanode ozone oz -updateBucket /hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type' + Should Be Equal ${result} GROUP + ${result} = Execute on datanode ozone oz -updateBucket /hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type' + Should Be Equal ${result} USER + ${result} = Execute on datanode ozone oz -listBucket /hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName' + Should Be Equal ${result} hive + Execute on datanode ozone oz -deleteBucket /hive/bb1 + Execute on datanode ozone oz -deleteVolume /hive -user bilbo + *** Keywords *** Startup Ozone Cluster diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Handler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Handler.java index 7fe6bb86c14..a66e227d5cc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Handler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Handler.java @@ -69,13 +69,16 @@ public abstract class Handler { throw new OzoneClientException( "Ozone URI is needed to execute this command."); } - URIBuilder ozoneURI = new URIBuilder(uri); + URIBuilder ozoneURI = new URIBuilder(stringToUri(uri)); if (ozoneURI.getPort() == 0) { ozoneURI.setPort(Shell.DEFAULT_OZONE_PORT); } Configuration conf = new OzoneConfiguration(); String scheme = ozoneURI.getScheme(); + if (ozoneURI.getScheme() == null || scheme.isEmpty()) { + scheme = OZONE_URI_SCHEME; + } if (scheme.equals(OZONE_HTTP_SCHEME)) { if (ozoneURI.getHost() != null) { if (ozoneURI.getPort() == -1) { @@ -87,7 +90,7 @@ public abstract class Handler { } else { client = OzoneClientFactory.getRestClient(conf); } - } else if (scheme.equals(OZONE_URI_SCHEME) || scheme.isEmpty()) { + } else if (scheme.equals(OZONE_URI_SCHEME)) { if (ozoneURI.getHost() != null) { if (ozoneURI.getPort() == -1) { client = OzoneClientFactory.getRpcClient(ozoneURI.getHost()); @@ -103,4 +106,46 @@ public abstract class Handler { } return ozoneURI.build(); } + + /** Construct a URI from a String with unescaped special characters + * that have non-standard semantics. e.g. /, ?, #. A custom parsing + * is needed to prevent misbehavior. + * @param pathString The input path in string form + * @return URI + */ + private static URI stringToUri(String pathString) throws IOException { + // parse uri components + String scheme = null; + String authority = null; + int start = 0; + + // parse uri scheme, if any + int colon = pathString.indexOf(':'); + int slash = pathString.indexOf('/'); + if (colon > 0 && (slash == colon +1)) { + // has a non zero-length scheme + scheme = pathString.substring(0, colon); + start = colon + 1; + } + + // parse uri authority, if any + if (pathString.startsWith("//", start) && + (pathString.length()-start > 2)) { + start += 2; + int nextSlash = pathString.indexOf('/', start); + int authEnd = nextSlash > 0 ? nextSlash : pathString.length(); + authority = pathString.substring(start, authEnd); + start = authEnd; + } + // uri path is the rest of the string. ? or # are not interpreted, + // but any occurrence of them will be quoted by the URI ctor. + String path = pathString.substring(start, pathString.length()); + + // Construct the URI + try { + return new URI(scheme, authority, path, null, null); + } catch (URISyntaxException e) { + throw new IllegalArgumentException(e); + } + } } From ebe5853a458150b7e42fe7434851bfcbe25e354d Mon Sep 17 00:00:00 2001 From: Konstantin V Shvachko Date: Thu, 31 May 2018 14:56:32 -0700 Subject: [PATCH 002/113] HDFS-12978. Fine-grained locking while consuming journal stream. Contributed by Konstantin Shvachko. --- .../hdfs/server/namenode/FSEditLogLoader.java | 23 +++++++++++++--- .../hadoop/hdfs/server/namenode/FSImage.java | 16 ++++++----- .../server/namenode/ha/EditLogTailer.java | 27 +++++++++++++++++-- .../server/namenode/ha/TestEditLogTailer.java | 13 ++++++--- 4 files changed, 64 insertions(+), 15 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index b0fe60a77bb..82e35bd353e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -138,7 +138,7 @@ public class FSEditLogLoader { long loadFSEdits(EditLogInputStream edits, long expectedStartingTxId) throws IOException { - return loadFSEdits(edits, expectedStartingTxId, null, null); + return loadFSEdits(edits, expectedStartingTxId, Long.MAX_VALUE, null, null); } /** @@ -147,6 +147,7 @@ public class FSEditLogLoader { * along. */ long loadFSEdits(EditLogInputStream edits, long expectedStartingTxId, + long maxTxnsToRead, StartupOption startOpt, MetaRecoveryContext recovery) throws IOException { StartupProgress prog = NameNode.getStartupProgress(); Step step = createStartupProgressStep(edits); @@ -154,9 +155,10 @@ public class FSEditLogLoader { fsNamesys.writeLock(); try { long startTime = monotonicNow(); - FSImage.LOG.info("Start loading edits file " + edits.getName()); + FSImage.LOG.info("Start loading edits file " + edits.getName() + + " maxTxnsToRead = " + maxTxnsToRead); long numEdits = loadEditRecords(edits, false, expectedStartingTxId, - startOpt, recovery); + maxTxnsToRead, startOpt, recovery); FSImage.LOG.info("Edits file " + edits.getName() + " of size " + edits.length() + " edits # " + numEdits + " loaded in " + (monotonicNow()-startTime)/1000 + " seconds"); @@ -171,8 +173,13 @@ public class FSEditLogLoader { long loadEditRecords(EditLogInputStream in, boolean closeOnExit, long expectedStartingTxId, StartupOption startOpt, MetaRecoveryContext recovery) throws IOException { - FSDirectory fsDir = fsNamesys.dir; + return loadEditRecords(in, closeOnExit, expectedStartingTxId, + Long.MAX_VALUE, startOpt, recovery); + } + long loadEditRecords(EditLogInputStream in, boolean closeOnExit, + long expectedStartingTxId, long maxTxnsToRead, StartupOption startOpt, + MetaRecoveryContext recovery) throws IOException { EnumMap> opCounts = new EnumMap>(FSEditLogOpCodes.class); @@ -181,6 +188,7 @@ public class FSEditLogLoader { } fsNamesys.writeLock(); + FSDirectory fsDir = fsNamesys.dir; fsDir.writeLock(); long recentOpcodeOffsets[] = new long[4]; @@ -285,6 +293,9 @@ public class FSEditLogLoader { } numEdits++; totalEdits++; + if(numEdits >= maxTxnsToRead) { + break; + } } catch (RollingUpgradeOp.RollbackException e) { LOG.info("Stopped at OP_START_ROLLING_UPGRADE for rollback."); break; @@ -308,7 +319,11 @@ public class FSEditLogLoader { if (FSImage.LOG.isDebugEnabled()) { dumpOpCounts(opCounts); + FSImage.LOG.debug("maxTxnsToRead = " + maxTxnsToRead + + " actual edits read = " + numEdits); } + assert numEdits <= maxTxnsToRead || numEdits == 1 : + "should read at least one txn, but not more than the configured max"; } return numEdits; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index dd7df5ad6b4..5cfc0176f1d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -742,7 +742,8 @@ public class FSImage implements Closeable { prog.endPhase(Phase.LOADING_FSIMAGE); if (!rollingRollback) { - long txnsAdvanced = loadEdits(editStreams, target, startOpt, recovery); + long txnsAdvanced = loadEdits(editStreams, target, Long.MAX_VALUE, + startOpt, recovery); needToSave |= needsResaveBasedOnStaleCheckpoint(imageFile.getFile(), txnsAdvanced); } else { @@ -866,11 +867,12 @@ public class FSImage implements Closeable { */ public long loadEdits(Iterable editStreams, FSNamesystem target) throws IOException { - return loadEdits(editStreams, target, null, null); + return loadEdits(editStreams, target, Long.MAX_VALUE, null, null); } - private long loadEdits(Iterable editStreams, - FSNamesystem target, StartupOption startOpt, MetaRecoveryContext recovery) + public long loadEdits(Iterable editStreams, + FSNamesystem target, long maxTxnsToRead, + StartupOption startOpt, MetaRecoveryContext recovery) throws IOException { LOG.debug("About to load edits:\n " + Joiner.on("\n ").join(editStreams)); StartupProgress prog = NameNode.getStartupProgress(); @@ -885,14 +887,16 @@ public class FSImage implements Closeable { LOG.info("Reading " + editIn + " expecting start txid #" + (lastAppliedTxId + 1)); try { - loader.loadFSEdits(editIn, lastAppliedTxId + 1, startOpt, recovery); + loader.loadFSEdits(editIn, lastAppliedTxId + 1, maxTxnsToRead, + startOpt, recovery); } finally { // Update lastAppliedTxId even in case of error, since some ops may // have been successfully applied before the error. lastAppliedTxId = loader.getLastAppliedTxId(); } // If we are in recovery mode, we may have skipped over some txids. - if (editIn.getLastTxId() != HdfsServerConstants.INVALID_TXID) { + if (editIn.getLastTxId() != HdfsServerConstants.INVALID_TXID + && recovery != null) { lastAppliedTxId = editIn.getLastTxId(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java index f57cb4bd939..73a111ea6c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java @@ -73,7 +73,19 @@ import com.google.common.base.Preconditions; @InterfaceStability.Evolving public class EditLogTailer { public static final Log LOG = LogFactory.getLog(EditLogTailer.class); - + + /** + * StandbyNode will hold namesystem lock to apply at most this many journal + * transactions. + * It will then release the lock and re-acquire it to load more transactions. + * By default the write lock is held for the entire journal segment. + * Fine-grained locking allows read requests to get through. + */ + public static final String DFS_HA_TAILEDITS_MAX_TXNS_PER_LOCK_KEY = + "dfs.ha.tail-edits.max-txns-per-lock"; + public static final long DFS_HA_TAILEDITS_MAX_TXNS_PER_LOCK_DEFAULT = + Long.MAX_VALUE; + private final EditLogTailerThread tailerThread; private final Configuration conf; @@ -138,6 +150,12 @@ public class EditLogTailer { */ private final boolean inProgressOk; + /** + * Release the namesystem lock after loading this many transactions. + * Then re-acquire the lock to load more edits. + */ + private final long maxTxnsPerLock; + public EditLogTailer(FSNamesystem namesystem, Configuration conf) { this.tailerThread = new EditLogTailerThread(); this.conf = conf; @@ -198,6 +216,10 @@ public class EditLogTailer { DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_KEY, DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_DEFAULT); + this.maxTxnsPerLock = conf.getLong( + DFS_HA_TAILEDITS_MAX_TXNS_PER_LOCK_KEY, + DFS_HA_TAILEDITS_MAX_TXNS_PER_LOCK_DEFAULT); + nnCount = nns.size(); // setup the iterator to endlessly loop the nns this.nnLookup = Iterators.cycle(nns); @@ -290,7 +312,8 @@ public class EditLogTailer { // disk are ignored. long editsLoaded = 0; try { - editsLoaded = image.loadEdits(streams, namesystem); + editsLoaded = image.loadEdits( + streams, namesystem, maxTxnsPerLock, null, null); } catch (EditLogInputException elie) { editsLoaded = elie.getNumEditsLoaded(); throw elie; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java index 61f890c549b..c88ac57c27f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java @@ -98,8 +98,9 @@ public class TestEditLogTailer { public void testTailer() throws IOException, InterruptedException, ServiceFailedException { Configuration conf = getConf(); - conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); + conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 0); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_ALL_NAMESNODES_RETRY_KEY, 100); + conf.setLong(EditLogTailer.DFS_HA_TAILEDITS_MAX_TXNS_PER_LOCK_KEY, 3); HAUtil.setAllowStandbyReads(conf, true); @@ -121,7 +122,10 @@ public class TestEditLogTailer { } HATestUtil.waitForStandbyToCatchUp(nn1, nn2); - + assertEquals("Inconsistent number of applied txns on Standby", + nn1.getNamesystem().getEditLog().getLastWrittenTxId(), + nn2.getNamesystem().getFSImage().getLastAppliedTxId() + 1); + for (int i = 0; i < DIRS_TO_MAKE / 2; i++) { assertTrue(NameNodeAdapter.getFileInfo(nn2, getDirPath(i), false, false, false).isDirectory()); @@ -134,7 +138,10 @@ public class TestEditLogTailer { } HATestUtil.waitForStandbyToCatchUp(nn1, nn2); - + assertEquals("Inconsistent number of applied txns on Standby", + nn1.getNamesystem().getEditLog().getLastWrittenTxId(), + nn2.getNamesystem().getFSImage().getLastAppliedTxId() + 1); + for (int i = DIRS_TO_MAKE / 2; i < DIRS_TO_MAKE; i++) { assertTrue(NameNodeAdapter.getFileInfo(nn2, getDirPath(i), false, false, false).isDirectory()); From c95b9b5c077c5b95649d195bd7385a76604863c1 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Thu, 31 May 2018 15:20:59 -0700 Subject: [PATCH 003/113] HDFS-13440. Log HDFS file name when client fails to connect. Contributed by Gabor Bota. --- .../src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index b38e6299030..e2508735891 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -593,8 +593,8 @@ public class DFSInputStream extends FSInputStream fetchBlockAt(target); } else { connectFailedOnce = true; - DFSClient.LOG.warn("Failed to connect to {} for block {}, " + - "add to deadNodes and continue. ", targetAddr, + DFSClient.LOG.warn("Failed to connect to {} for file {} for block " + + "{}, add to deadNodes and continue. ", targetAddr, src, targetBlock.getBlock(), ex); // Put chosen node into dead list, continue addToDeadNodes(chosenNode); From 3f4a29813beccd85191886f4d7421c4f33180594 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Thu, 31 May 2018 15:26:08 -0700 Subject: [PATCH 004/113] HDFS-13646. DFSAdmin doesn't display specialized help for triggerBlockReport. Contributed by Takanobu Asanuma. --- .../src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index f7935572182..99a8e3e7886 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -1333,6 +1333,8 @@ public class DFSAdmin extends FsShell { System.out.println(evictWriters); } else if ("getDatanodeInfo".equalsIgnoreCase(cmd)) { System.out.println(getDatanodeInfo); + } else if ("triggerBlockReport".equalsIgnoreCase(cmd)) { + System.out.println(triggerBlockReport); } else if ("listOpenFiles".equalsIgnoreCase(cmd)) { System.out.println(listOpenFiles); } else if ("help".equals(cmd)) { From 32671d87135f22707ea03c3f17e99d41d82c0a39 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Thu, 31 May 2018 16:09:33 -0700 Subject: [PATCH 005/113] HADOOP-14783. [KMS] Add missing configuration properties into kms-default.xml. Contributed by Chetna Chaudhari. --- .../src/main/resources/kms-default.xml | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml b/hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml index 7055f2df615..9f4171bf793 100644 --- a/hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml +++ b/hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml @@ -259,4 +259,39 @@ + + hadoop.kms.key.authorization.enable + true + Boolean property to Enable/Disable per Key authorization + + + + hadoop.security.kms.encrypted.key.cache.size + 100 + The size of the cache. This is the maximum number of EEKs that + can be cached under each key name. + + + + hadoop.security.kms.encrypted.key.cache.low.watermark + 0.3 + A low watermark on the cache. For each key name, if after a get call, + the number of cached EEKs are less than (size * low watermark), + then the cache under this key name will be filled asynchronously. + For each key name, only 1 thread could be running for the asynchronous filling. + + + + hadoop.security.kms.encrypted.key.cache.num.fill.threads + 2 + The maximum number of asynchronous threads overall, across key names, + allowed to fill the queue in a cache. + + + + hadoop.security.kms.encrypted.key.cache.expiry + 43200000 + The cache expiry time, in milliseconds. Internally Guava cache is used as the cache implementation. + The expiry approach is expireAfterAccess + From 6b74f5d7fc509c55c331249256eec78b7e53b6ce Mon Sep 17 00:00:00 2001 From: "Vinod Kumar Vavilapalli (I am also known as @tshooter.)" Date: Thu, 31 May 2018 16:48:33 -0700 Subject: [PATCH 006/113] YARN-8197. Fixed AM IP Filter and Webapp proxy to redirect app tracking-URLs correctly when UI is secure. Contributed by Sunil Govindan. --- .../hadoop-yarn-server-web-proxy/pom.xml | 13 ++ .../server/webproxy/amfilter/AmIpFilter.java | 19 ++- .../webproxy/amfilter/TestSecureAmFilter.java | 159 ++++++++++++++++++ .../src/test/resources/krb5.conf | 33 ++++ 4 files changed, 220 insertions(+), 4 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestSecureAmFilter.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/resources/krb5.conf diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml index 61e0429f9f2..0d1b92b0894 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml @@ -51,6 +51,19 @@ test + + org.apache.hadoop + hadoop-auth + test-jar + test + + + + org.apache.hadoop + hadoop-minikdc + provided + + org.mockito mockito-all diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java index bd425a72902..c965283d6d5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java @@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.server.webproxy.amfilter; import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.server.webproxy.ProxyUtils; import org.apache.hadoop.yarn.server.webproxy.WebAppProxyServlet; @@ -216,15 +217,25 @@ public class AmIpFilter implements Filter { return addr; } - private boolean isValidUrl(String url) { + @VisibleForTesting + public boolean isValidUrl(String url) { boolean isValid = false; try { - HttpURLConnection conn = - (HttpURLConnection) new URL(url).openConnection(); + HttpURLConnection conn = (HttpURLConnection) new URL(url) + .openConnection(); conn.connect(); isValid = conn.getResponseCode() == HttpURLConnection.HTTP_OK; + // If security is enabled, any valid RM which can give 401 Unauthorized is + // good enough to access. Since AM doesn't have enough credential, auth + // cannot be completed and hence 401 is fine in such case. + if (!isValid && UserGroupInformation.isSecurityEnabled()) { + isValid = (conn + .getResponseCode() == HttpURLConnection.HTTP_UNAUTHORIZED) + || (conn.getResponseCode() == HttpURLConnection.HTTP_FORBIDDEN); + return isValid; + } } catch (Exception e) { - LOG.debug("Failed to connect to " + url + ": " + e.toString()); + LOG.warn("Failed to connect to " + url + ": " + e.toString()); } return isValid; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestSecureAmFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestSecureAmFilter.java new file mode 100644 index 00000000000..e87b76541e5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestSecureAmFilter.java @@ -0,0 +1,159 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.webproxy.amfilter; + +import java.io.File; +import java.net.URI; +import java.net.URL; +import java.util.Set; +import java.util.HashSet; +import java.util.HashMap; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.http.HttpServer2; +import org.apache.hadoop.minikdc.MiniKdc; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.KerberosTestUtils; +import org.apache.hadoop.security.authorize.AccessControlList; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.security.http.RMAuthenticationFilterInitializer; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.mockito.Mockito; + +/** + * Test AmIpFilter. Requests to a no declared hosts should has way through + * proxy. Another requests can be filtered with (without) user name. + * + */ +public class TestSecureAmFilter { + + private String proxyHost = "localhost"; + private static final File TEST_ROOT_DIR = new File("target", + TestSecureAmFilter.class.getName() + "-root"); + private static File httpSpnegoKeytabFile = new File( + KerberosTestUtils.getKeytabFile()); + private static Configuration rmconf = new Configuration(); + private static String httpSpnegoPrincipal = KerberosTestUtils + .getServerPrincipal(); + private static boolean miniKDCStarted = false; + private static MiniKdc testMiniKDC; + + @BeforeClass + public static void setUp() { + rmconf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true); + rmconf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, + "kerberos"); + rmconf.setBoolean(YarnConfiguration.RM_WEBAPP_DELEGATION_TOKEN_AUTH_FILTER, + true); + rmconf.set("hadoop.http.filter.initializers", + RMAuthenticationFilterInitializer.class.getName()); + rmconf.set(YarnConfiguration.RM_WEBAPP_SPNEGO_USER_NAME_KEY, + httpSpnegoPrincipal); + rmconf.set(YarnConfiguration.RM_KEYTAB, + httpSpnegoKeytabFile.getAbsolutePath()); + rmconf.set(YarnConfiguration.RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY, + httpSpnegoKeytabFile.getAbsolutePath()); + UserGroupInformation.setConfiguration(rmconf); + try { + testMiniKDC = new MiniKdc(MiniKdc.createConf(), TEST_ROOT_DIR); + setupKDC(); + } catch (Exception e) { + assertTrue("Couldn't create MiniKDC", false); + } + } + + @AfterClass + public static void tearDown() { + if (testMiniKDC != null) { + testMiniKDC.stop(); + } + } + + private static void setupKDC() throws Exception { + if (!miniKDCStarted) { + testMiniKDC.start(); + getKdc().createPrincipal(httpSpnegoKeytabFile, "HTTP/localhost"); + miniKDCStarted = true; + } + } + + private static MiniKdc getKdc() { + return testMiniKDC; + } + + private class TestAmIpFilter extends AmIpFilter { + + private Set proxyAddresses = null; + + protected Set getProxyAddresses() { + if (proxyAddresses == null) { + proxyAddresses = new HashSet(); + } + proxyAddresses.add(proxyHost); + return proxyAddresses; + } + } + + @Test + public void testFindRedirectUrl() throws Exception { + final String rm1 = "rm1"; + final String rm2 = "rm2"; + // generate a valid URL + final String rm1Url = startSecureHttpServer(); + // invalid url + final String rm2Url = "host2:8088"; + + TestAmIpFilter filter = new TestAmIpFilter(); + TestAmIpFilter spy = Mockito.spy(filter); + // make sure findRedirectUrl() go to HA branch + spy.proxyUriBases = new HashMap<>(); + spy.proxyUriBases.put(rm1, rm1Url); + spy.proxyUriBases.put(rm2, rm2Url); + spy.rmUrls = new String[] {rm1, rm2}; + + assertTrue(spy.isValidUrl(rm1Url)); + assertFalse(spy.isValidUrl(rm2Url)); + assertEquals(spy.findRedirectUrl(), rm1Url); + } + + private String startSecureHttpServer() throws Exception { + HttpServer2.Builder builder = new HttpServer2.Builder() + .setName("test").setConf(rmconf) + .addEndpoint(new URI("http://localhost")).setACL( + new AccessControlList(rmconf.get(YarnConfiguration.YARN_ADMIN_ACL, + YarnConfiguration.DEFAULT_YARN_ADMIN_ACL))); + + builder.setUsernameConfKey(YarnConfiguration.RM_WEBAPP_SPNEGO_USER_NAME_KEY) + .setKeytabConfKey(YarnConfiguration.RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY) + .setSecurityEnabled(UserGroupInformation.isSecurityEnabled()); + HttpServer2 server = builder.build(); + server.start(); + URL baseUrl = new URL( + "http://" + NetUtils.getHostPortString(server.getConnectorAddress(0))); + return baseUrl.toString(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/resources/krb5.conf b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/resources/krb5.conf new file mode 100644 index 00000000000..6cdd3d6923f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/resources/krb5.conf @@ -0,0 +1,33 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +[libdefaults] + default_realm = APACHE.ORG + extra_addresses = 127.0.0.1 + kdc_realm = _REALM_ + udp_preference_limit = _UDP_LIMIT_ + #_KDC_TCP_PORT_ + #_KDC_UDP_PORT_ + +[realms] + _REALM_ = { + admin_server = localhost:_KDC_PORT_ + kdc = localhost:_KDC_PORT_ + } +[domain_realm] + localhost = _REALM_ From ff013d2c952272f3176dcf624251b05d610503b5 Mon Sep 17 00:00:00 2001 From: Chao Sun Date: Thu, 31 May 2018 15:24:16 -0700 Subject: [PATCH 007/113] HDFS-13602. Add checkOperation(WRITE) checks in FSNamesystem. Contributed by Chao Sun. --- .../hadoop/hdfs/server/namenode/FSNamesystem.java | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 752c830f958..c02eb84d5f4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1952,6 +1952,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, if (!isInSafeMode() && res.updateAccessTime()) { String src = srcArg; + checkOperation(OperationCategory.WRITE); writeLock(); final long now = now(); try { @@ -2034,6 +2035,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, FileStatus stat = null; boolean success = false; final FSPermissionChecker pc = getPermissionChecker(); + checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -2137,6 +2139,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, throw new UnsupportedOperationException("Symlinks not supported"); } FileStatus auditStat = null; + checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -5306,6 +5309,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, String clientName, ExtendedBlock oldBlock, ExtendedBlock newBlock, DatanodeID[] newNodes, String[] newStorageIDs, boolean logRetryCache) throws IOException { + checkOperation(OperationCategory.WRITE); LOG.info("updatePipeline(" + oldBlock.getLocalBlock() + ", newGS=" + newBlock.getGenerationStamp() + ", newLength=" + newBlock.getNumBytes() @@ -6605,6 +6609,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, String rootPath = null; BlocksMapUpdateInfo blocksToBeDeleted = null; final FSPermissionChecker pc = getPermissionChecker(); + checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -6645,6 +6650,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, checkOperation(OperationCategory.READ); readLock(); try { + checkOperation(OperationCategory.READ); if (!isRollingUpgrade()) { return null; } @@ -6897,6 +6903,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, if (!flags.contains(CacheFlag.FORCE)) { cacheManager.waitForRescanIfNeeded(); } + checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -6928,6 +6935,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, if (!flags.contains(CacheFlag.FORCE)) { cacheManager.waitForRescanIfNeeded(); } + checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -6953,6 +6961,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, final String operationName = "removeCacheDirective"; boolean success = false; String idStr = "{id: " + Long.toString(id) + "}"; + checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -6997,6 +7006,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, void addCachePool(CachePoolInfo req, boolean logRetryCache) throws IOException { final String operationName = "addCachePool"; + checkOperation(OperationCategory.WRITE); writeLock(); boolean success = false; String poolInfoStr = null; @@ -7021,6 +7031,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, void modifyCachePool(CachePoolInfo req, boolean logRetryCache) throws IOException { final String operationName = "modifyCachePool"; + checkOperation(OperationCategory.WRITE); writeLock(); boolean success = false; String poolNameStr = "{poolName: " + @@ -7047,6 +7058,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, void removeCachePool(String cachePoolName, boolean logRetryCache) throws IOException { final String operationName = "removeCachePool"; + checkOperation(OperationCategory.WRITE); writeLock(); boolean success = false; String poolNameStr = "{poolName: " + cachePoolName + "}"; From 7dd26d5378483cdbce1dc594e8650a96c2976281 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Thu, 31 May 2018 21:06:25 -0400 Subject: [PATCH 008/113] YARN-8308. Fixed YARN Service AM failure with HDFS token renewal. Contributed by Gour Saha --- .../hadoop/yarn/service/ServiceMaster.java | 56 +++++++++++++------ .../yarn/service/client/ServiceClient.java | 11 ++++ 2 files changed, 50 insertions(+), 17 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java index 28881aca3f4..0caa119cf80 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java @@ -71,8 +71,14 @@ public class ServiceMaster extends CompositeService { LoggerFactory.getLogger(ServiceMaster.class); public static final String YARNFILE_OPTION = "yarnfile"; + public static final String SERVICE_NAME_OPTION = "service_name"; + public static final String KEYTAB_OPTION = "keytab"; + public static final String PRINCIPAL_NAME_OPTION = "principal_name"; - private static String serviceDefPath; + private String serviceDefPath; + private String serviceName; + private String serviceKeytab; + private String servicePrincipalName; protected ServiceContext context; public ServiceMaster(String name) { @@ -85,15 +91,24 @@ public class ServiceMaster extends CompositeService { context = new ServiceContext(); Path appDir = getAppDir(); context.serviceHdfsDir = appDir.toString(); - SliderFileSystem fs = new SliderFileSystem(conf); - context.fs = fs; - fs.setAppDir(appDir); - loadApplicationJson(context, fs); - context.tokens = recordTokensForContainers(); + Credentials credentials = null; if (UserGroupInformation.isSecurityEnabled()) { + credentials = UserGroupInformation.getCurrentUser().getCredentials(); doSecureLogin(); } + SliderFileSystem fs = new SliderFileSystem(conf); + fs.setAppDir(appDir); + context.fs = fs; + loadApplicationJson(context, fs); + if (UserGroupInformation.isSecurityEnabled()) { + // add back the credentials + if (credentials != null) { + UserGroupInformation.getCurrentUser().addCredentials(credentials); + } + removeHdfsDelegationToken(UserGroupInformation.getLoginUser()); + } + // Take yarn config from YarnFile and merge them into YarnConfiguration for (Map.Entry entry : context.service .getConfiguration().getProperties().entrySet()) { @@ -157,13 +172,12 @@ public class ServiceMaster extends CompositeService { private void doSecureLogin() throws IOException, URISyntaxException { // read the localized keytab specified by user - File keytab = new File(String.format(KEYTAB_LOCATION, - context.service.getName())); + File keytab = new File(String.format(KEYTAB_LOCATION, getServiceName())); if (!keytab.exists()) { LOG.info("No keytab localized at " + keytab); // Check if there exists a pre-installed keytab at host - String preInstalledKeytab = context.service.getKerberosPrincipal() - .getKeytab(); + String preInstalledKeytab = context.service == null ? this.serviceKeytab + : context.service.getKerberosPrincipal().getKeytab(); if (!StringUtils.isEmpty(preInstalledKeytab)) { URI uri = new URI(preInstalledKeytab); if (uri.getScheme().equals("file")) { @@ -177,29 +191,24 @@ public class ServiceMaster extends CompositeService { LOG.info("No keytab exists: " + keytab); return; } - String principal = context.service.getKerberosPrincipal() - .getPrincipalName(); + String principal = context.service == null ? this.servicePrincipalName + : context.service.getKerberosPrincipal().getPrincipalName(); if (StringUtils.isEmpty((principal))) { principal = UserGroupInformation.getLoginUser().getShortUserName(); LOG.info("No principal name specified. Will use AM " + "login identity {} to attempt keytab-based login", principal); } - Credentials credentials = UserGroupInformation.getCurrentUser() - .getCredentials(); LOG.info("User before logged in is: " + UserGroupInformation .getCurrentUser()); String principalName = SecurityUtil.getServerPrincipal(principal, ServiceUtils.getLocalHostName(getConfig())); UserGroupInformation.loginUserFromKeytab(principalName, keytab.getAbsolutePath()); - // add back the credentials - UserGroupInformation.getCurrentUser().addCredentials(credentials); LOG.info("User after logged in is: " + UserGroupInformation .getCurrentUser()); context.principal = principalName; context.keytab = keytab.getAbsolutePath(); - removeHdfsDelegationToken(UserGroupInformation.getLoginUser()); } // Remove HDFS delegation token from login user and ensure AM to use keytab @@ -231,6 +240,10 @@ public class ServiceMaster extends CompositeService { return new Path(serviceDefPath).getParent(); } + protected String getServiceName() { + return serviceName; + } + protected ServiceScheduler createServiceScheduler(ServiceContext context) throws IOException, YarnException { return new ServiceScheduler(context); @@ -310,9 +323,18 @@ public class ServiceMaster extends CompositeService { opts.addOption(YARNFILE_OPTION, true, "HDFS path to JSON service " + "specification"); opts.getOption(YARNFILE_OPTION).setRequired(true); + opts.addOption(SERVICE_NAME_OPTION, true, "Service name"); + opts.getOption(SERVICE_NAME_OPTION).setRequired(true); + opts.addOption(KEYTAB_OPTION, true, "Service AM keytab"); + opts.addOption(PRINCIPAL_NAME_OPTION, true, + "Service AM keytab principal"); GenericOptionsParser parser = new GenericOptionsParser(conf, opts, args); CommandLine cmdLine = parser.getCommandLine(); serviceMaster.serviceDefPath = cmdLine.getOptionValue(YARNFILE_OPTION); + serviceMaster.serviceName = cmdLine.getOptionValue(SERVICE_NAME_OPTION); + serviceMaster.serviceKeytab = cmdLine.getOptionValue(KEYTAB_OPTION); + serviceMaster.servicePrincipalName = cmdLine + .getOptionValue(PRINCIPAL_NAME_OPTION); serviceMaster.init(conf); serviceMaster.start(); } catch (Throwable t) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java index e86ecbc61aa..c86f5de5947 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java @@ -871,6 +871,17 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes, //TODO debugAM CLI.add(Arguments.ARG_DEBUG) CLI.add("-" + ServiceMaster.YARNFILE_OPTION, new Path(appRootDir, app.getName() + ".json")); + CLI.add("-" + ServiceMaster.SERVICE_NAME_OPTION, app.getName()); + if (app.getKerberosPrincipal() != null) { + if (!StringUtils.isEmpty(app.getKerberosPrincipal().getKeytab())) { + CLI.add("-" + ServiceMaster.KEYTAB_OPTION, + app.getKerberosPrincipal().getKeytab()); + } + if (!StringUtils.isEmpty(app.getKerberosPrincipal().getPrincipalName())) { + CLI.add("-" + ServiceMaster.PRINCIPAL_NAME_OPTION, + app.getKerberosPrincipal().getPrincipalName()); + } + } // pass the registry binding CLI.addConfOptionToCLI(conf, RegistryConstants.KEY_REGISTRY_ZK_ROOT, RegistryConstants.DEFAULT_ZK_REGISTRY_ROOT); From 6b21a599fbd53e3d6113fcd9a984f6ac59ccd302 Mon Sep 17 00:00:00 2001 From: Yufei Gu Date: Thu, 31 May 2018 22:28:49 -0700 Subject: [PATCH 009/113] YARN-7340. Fix the missing time stamp in exception message in Class NoOverCommitPolicy. Contributed by Dinesh Chitlangia. --- .../resourcemanager/reservation/NoOverCommitPolicy.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/NoOverCommitPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/NoOverCommitPolicy.java index 98ef5828760..7792e54ed9c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/NoOverCommitPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/NoOverCommitPolicy.java @@ -55,8 +55,9 @@ public class NoOverCommitPolicy implements SharingPolicy { reservation.getStartTime(), reservation.getEndTime()); } catch (PlanningException p) { throw new ResourceOverCommitException( - "Resources at time " + " would be overcommitted by " - + "accepting reservation: " + reservation.getReservationId()); + "Resources at time " + reservation.getStartTime() + + " would be overcommitted by accepting reservation: " + + reservation.getReservationId(), p); } } From 19560bb70421fda094f3f2af09e67501099b7ab7 Mon Sep 17 00:00:00 2001 From: Anu Engineer Date: Fri, 1 Jun 2018 12:54:47 -0700 Subject: [PATCH 010/113] HDDS-137. Cleanup Hdds-ozone CheckStyle Issues. Contributed by Anu Engineer. --- .../common/helpers/ContainerData.java | 3 ++- .../common/impl/ContainerManagerImpl.java | 8 ++++--- .../container/common/impl/KeyManagerImpl.java | 12 ++++++---- .../server/ratis/ContainerStateMachine.java | 3 ++- .../common/utils/ContainerCache.java | 6 +++-- .../ozone/container/common/package-info.java | 22 +++++++++++++++++++ 6 files changed, 43 insertions(+), 11 deletions(-) create mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java index d1746f2bde6..020f45d5895 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java @@ -285,7 +285,8 @@ public class ContainerData { * * @return String Name. */ - // TODO: check the ContainerCache class to see if we are using the ContainerID instead. + // TODO: check the ContainerCache class to see if + // we are using the ContainerID instead. /* public String getName() { return getContainerID(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java index af470153244..b09d324e2f8 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java @@ -314,7 +314,8 @@ public class ContainerManagerImpl implements ContainerManager { writeLock(); try { if (containerMap.containsKey(containerData.getContainerID())) { - LOG.debug("container already exists. {}", containerData.getContainerID()); + LOG.debug("container already exists. {}", + containerData.getContainerID()); throw new StorageContainerException("container already exists.", CONTAINER_EXISTS); } @@ -595,7 +596,8 @@ public class ContainerManagerImpl implements ContainerManager { @Override public void updateContainer(long containerID, ContainerData data, boolean forceUpdate) throws StorageContainerException { - Preconditions.checkState(containerID >= 0, "Container ID cannot be negative."); + Preconditions.checkState(containerID >= 0, + "Container ID cannot be negative."); Preconditions.checkNotNull(data, "Container data cannot be null"); FileOutputStream containerStream = null; DigestOutputStream dos = null; @@ -711,7 +713,7 @@ public class ContainerManagerImpl implements ContainerManager { } /** - * Returns LifeCycle State of the container + * Returns LifeCycle State of the container. * @param containerID - Id of the container * @return LifeCycle State of the container * @throws StorageContainerException diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java index 0ca73545f6f..40ae1c70a7d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java @@ -72,8 +72,10 @@ public class KeyManagerImpl implements KeyManager { */ @Override public void putKey(KeyData data) throws IOException { - Preconditions.checkNotNull(data, "KeyData cannot be null for put operation."); - Preconditions.checkState(data.getContainerID() >= 0, "Container ID cannot be negative"); + Preconditions.checkNotNull(data, + "KeyData cannot be null for put operation."); + Preconditions.checkState(data.getContainerID() >= 0, + "Container ID cannot be negative"); containerManager.readLock(); try { // We are not locking the key manager since LevelDb serializes all actions @@ -169,8 +171,10 @@ public class KeyManagerImpl implements KeyManager { public List listKey( long containerID, long startLocalID, int count) throws IOException { - Preconditions.checkState(containerID >= 0, "Container ID cannot be negative"); - Preconditions.checkState(startLocalID >= 0, "startLocal ID cannot be negative"); + Preconditions.checkState(containerID >= 0, + "Container ID cannot be negative"); + Preconditions.checkState(startLocalID >= 0, + "startLocal ID cannot be negative"); Preconditions.checkArgument(count > 0, "Count must be a positive number."); ContainerData cData = containerManager.readContainer(containerID); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index 56c52bb80e1..176407d66d2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -265,7 +265,8 @@ public class ContainerStateMachine extends BaseStateMachine { Message message = runCommand(requestProto); if (cmdType == ContainerProtos.Type.CreateContainer) { long containerID = - requestProto.getCreateContainer().getContainerData().getContainerID(); + requestProto.getCreateContainer() + .getContainerData().getContainerID(); createContainerFutureMap.remove(containerID).complete(message); } return CompletableFuture.completedFuture(message); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java index 4d9c6903111..7f611b92bcd 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java @@ -125,7 +125,8 @@ public final class ContainerCache extends LRUMap { */ public MetadataStore getDB(long containerID, String containerDBPath) throws IOException { - Preconditions.checkState(containerID >= 0, "Container ID cannot be negative."); + Preconditions.checkState(containerID >= 0, + "Container ID cannot be negative."); lock.lock(); try { MetadataStore db = (MetadataStore) this.get(containerID); @@ -153,7 +154,8 @@ public final class ContainerCache extends LRUMap { * @param containerID - ID of the container. */ public void removeDB(long containerID) { - Preconditions.checkState(containerID >= 0, "Container ID cannot be negative."); + Preconditions.checkState(containerID >= 0, + "Container ID cannot be negative."); lock.lock(); try { MetadataStore db = (MetadataStore)this.get(containerID); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java new file mode 100644 index 00000000000..ca3d29dada1 --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +/** + * SCM Testing and Mocking Utils. + */ +package org.apache.hadoop.ozone.container.common; \ No newline at end of file From 1be05a3623da22ed053ed9898df23c85981772e7 Mon Sep 17 00:00:00 2001 From: Anu Engineer Date: Fri, 1 Jun 2018 14:21:35 -0700 Subject: [PATCH 011/113] =?UTF-8?q?HDDS-142.=20TestMetadataStore=20fails?= =?UTF-8?q?=20on=20Windows.=20Contributed=20by=20=20=C3=8D=C3=B1igo=20Goir?= =?UTF-8?q?i.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../apache/hadoop/ozone/TestMetadataStore.java | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/TestMetadataStore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/TestMetadataStore.java index 6b26b60350e..a946c097a78 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/TestMetadataStore.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/TestMetadataStore.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.ozone; +import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows; + import com.google.common.collect.Lists; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.ImmutablePair; @@ -81,6 +83,11 @@ public class TestMetadataStore { @Before public void init() throws IOException { + if (OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB.equals(storeImpl)) { + // The initialization of RocksDB fails on Windows + assumeNotWindows(); + } + testDir = GenericTestUtils.getTestDir(getClass().getSimpleName() + "-" + storeImpl.toLowerCase()); @@ -104,9 +111,13 @@ public class TestMetadataStore { @After public void cleanup() throws IOException { - store.close(); - store.destroy(); - FileUtils.deleteDirectory(testDir); + if (store != null) { + store.close(); + store.destroy(); + } + if (testDir != null) { + FileUtils.deleteDirectory(testDir); + } } private byte[] getBytes(String str) { From cba319499822a2475c60c43ea71f8e78237e139f Mon Sep 17 00:00:00 2001 From: Daniel Templeton Date: Fri, 1 Jun 2018 14:42:39 -0700 Subject: [PATCH 012/113] HDFS-13636. Cross-Site Scripting vulnerability in HttpServer2 (Contributed by Haibo Yan via Daniel Templeton) Change-Id: I28edde8125dd20d8d270f0e609d1c04d8173c8b7 --- .../src/main/java/org/apache/hadoop/http/HttpServer2.java | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java index c273c7852b4..2435671a31a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java @@ -1420,8 +1420,11 @@ public final class HttpServer2 implements FilterContainer { if (servletContext.getAttribute(ADMINS_ACL) != null && !userHasAdministratorAccess(servletContext, remoteUser)) { - response.sendError(HttpServletResponse.SC_FORBIDDEN, "User " - + remoteUser + " is unauthorized to access this page."); + response.sendError(HttpServletResponse.SC_FORBIDDEN, + "Unauthenticated users are not " + + "authorized to access this page."); + LOG.warn("User " + remoteUser + " is unauthorized to access the page " + + request.getRequestURI() + "."); return false; } From 3a6bd775500343632bad5f9c1a2bfacd408f7760 Mon Sep 17 00:00:00 2001 From: Wangda Tan Date: Fri, 1 Jun 2018 13:58:46 -0700 Subject: [PATCH 013/113] YARN-8384. stdout.txt, stderr.txt logs of a launched docker container is coming with primary group of submit user instead of hadoop. (Eric Yang via wangda) Change-Id: Idbb2cd250f4f62dc32993e0d3ca3ec5684616baa --- .../main/native/container-executor/impl/container-executor.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c index 7b622235bcd..1b8842a01c9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c @@ -1140,8 +1140,8 @@ char *init_log_path(const char *container_log_dir, const char *logfile) { int fd = open(tmp_buffer, O_CREAT | O_WRONLY, permissions); if (fd >= 0) { close(fd); - if (change_owner(tmp_buffer, user_detail->pw_uid, user_detail->pw_gid) != 0) { - fprintf(ERRORFILE, "Failed to chown %s to %d:%d: %s\n", tmp_buffer, user_detail->pw_uid, user_detail->pw_gid, + if (change_owner(tmp_buffer, user_detail->pw_uid, nm_gid) != 0) { + fprintf(ERRORFILE, "Failed to chown %s to %d:%d: %s\n", tmp_buffer, user_detail->pw_uid, nm_gid, strerror(errno)); free(tmp_buffer); tmp_buffer = NULL; From 931f78718f3a09775bfa1f9a952c069c416d0914 Mon Sep 17 00:00:00 2001 From: Wangda Tan Date: Fri, 1 Jun 2018 14:00:18 -0700 Subject: [PATCH 014/113] YARN-7962. Race Condition When Stopping DelegationTokenRenewer causes RM crash during failover. (BELUGA BEHR via wangda) Change-Id: I617e2645f60a57080058ad5f06af860fb3f682c8 --- .../security/DelegationTokenRenewer.java | 10 +++++++- .../security/TestDelegationTokenRenewer.java | 24 +++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java index 220787c1604..a9f8cd16bee 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java @@ -209,7 +209,15 @@ public class DelegationTokenRenewer extends AbstractService { } appTokens.clear(); allTokens.clear(); - this.renewerService.shutdown(); + + serviceStateLock.writeLock().lock(); + try { + isServiceStarted = false; + this.renewerService.shutdown(); + } finally { + serviceStateLock.writeLock().unlock(); + } + dtCancelThread.interrupt(); try { dtCancelThread.join(1000); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java index 8351860df2b..9b2c0b327f1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java @@ -1420,4 +1420,28 @@ public class TestDelegationTokenRenewer { delegationTokenRenewer.setTimerForTokenRenewal(mockDttr); assertNull(mockDttr.timerTask); } + + /** + * Test that the DelegationTokenRenewer class can gracefully handle + * interactions that occur when it has been stopped. + */ + @Test + public void testShutDown() { + DelegationTokenRenewer dtr = createNewDelegationTokenRenewer(conf, counter); + RMContext mockContext = mock(RMContext.class); + when(mockContext.getSystemCredentialsForApps()).thenReturn( + new ConcurrentHashMap()); + ClientRMService mockClientRMService = mock(ClientRMService.class); + when(mockContext.getClientRMService()).thenReturn(mockClientRMService); + InetSocketAddress sockAddr = + InetSocketAddress.createUnresolved("localhost", 1234); + when(mockClientRMService.getBindAddress()).thenReturn(sockAddr); + dtr.setRMContext(mockContext); + when(mockContext.getDelegationTokenRenewer()).thenReturn(dtr); + dtr.init(conf); + dtr.start(); + delegationTokenRenewer.stop(); + delegationTokenRenewer.applicationFinished( + BuilderUtils.newApplicationId(0, 1)); + } } From 8956e5b8db3059e0872e49f59adc6affc76e2274 Mon Sep 17 00:00:00 2001 From: Wangda Tan Date: Fri, 1 Jun 2018 14:05:02 -0700 Subject: [PATCH 015/113] YARN-8372. Distributed shell app master should not release containers when shutdown if keep-container is true. (Suma Shivaprasad via wangda) Change-Id: Ief04d1ca865621f348fba4ac85fa78bc47465904 --- .../distributedshell/ApplicationMaster.java | 24 ++++++++++++++++--- .../applications/distributedshell/Client.java | 14 +++++++---- 2 files changed, 31 insertions(+), 7 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java index bc018b1d964..76fa38f922a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java @@ -269,6 +269,8 @@ public class ApplicationMaster { private String containerResourceProfile = ""; Map resourceProfiles; + private boolean keepContainersAcrossAttempts = false; + // Counter for completed containers ( complete denotes successful or failed ) private AtomicInteger numCompletedContainers = new AtomicInteger(); // Allocated container count so that we know how many containers has the RM @@ -483,6 +485,13 @@ public class ApplicationMaster { + " the number of container retry attempts"); opts.addOption("placement_spec", true, "Placement specification"); opts.addOption("debug", false, "Dump out debug information"); + opts.addOption("keep_containers_across_application_attempts", false, + "Flag to indicate whether to keep containers across application " + + "attempts." + + " If the flag is true, running containers will not be killed when" + + " application attempt fails and these containers will be " + + "retrieved by" + + " the new application attempt "); opts.addOption("help", false, "Print usage"); CommandLine cliParser = new GnuParser().parse(opts, args); @@ -646,6 +655,9 @@ public class ApplicationMaster { containerResourceProfile = cliParser.getOptionValue("container_resource_profile", ""); + keepContainersAcrossAttempts = cliParser.hasOption( + "keep_containers_across_application_attempts"); + if (this.placementSpecs == null) { numTotalContainers = Integer.parseInt(cliParser.getOptionValue( "num_containers", "1")); @@ -1152,9 +1164,15 @@ public class ApplicationMaster { } } - @Override - public void onShutdownRequest() { - done = true; + @Override public void onShutdownRequest() { + if (keepContainersAcrossAttempts) { + LOG.info("Shutdown request received. Ignoring since " + + "keep_containers_across_application_attempts is enabled"); + } else{ + LOG.info("Shutdown request received. Processing since " + + "keep_containers_across_application_attempts is disabled"); + done = true; + } } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java index 61879d0374c..976e6a33362 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java @@ -332,10 +332,12 @@ public class Client { + " containers to guaranteed."); opts.addOption("log_properties", true, "log4j.properties file"); opts.addOption("keep_containers_across_application_attempts", false, - "Flag to indicate whether to keep containers across application attempts." + - " If the flag is true, running containers will not be killed when" + - " application attempt fails and these containers will be retrieved by" + - " the new application attempt "); + "Flag to indicate whether to keep containers across application " + + "attempts." + + " If the flag is true, running containers will not be killed when" + + " application attempt fails and these containers will be " + + "retrieved by" + + " the new application attempt "); opts.addOption("attempt_failures_validity_interval", true, "when attempt_failures_validity_interval in milliseconds is set to > 0," + "the failure number will not take failures which happen out of " + @@ -891,6 +893,10 @@ public class Client { } vargs.add("--priority " + String.valueOf(shellCmdPriority)); + if (keepContainers) { + vargs.add("--keep_containers_across_application_attempts"); + } + for (Map.Entry entry : shellEnv.entrySet()) { vargs.add("--shell_env " + entry.getKey() + "=" + entry.getValue()); } From ff583d3fa3325029bc691ec22d817aee37e5e85d Mon Sep 17 00:00:00 2001 From: Wangda Tan Date: Fri, 1 Jun 2018 14:07:23 -0700 Subject: [PATCH 016/113] YARN-8349. Remove YARN registry entries when a service is killed by the RM. (Billie Rinaldi via wangda) Change-Id: Ia58db3637789a8921482f564aa9bdf99c45cc36c --- hadoop-project/pom.xml | 7 + .../hadoop-yarn-services-api/pom.xml | 16 +++ .../yarn/service/client/ApiServiceClient.java | 11 ++ .../yarn/service/TestCleanupAfterKill.java | 94 ++++++++++++ .../src/test/resources/yarn-site.xml | 19 +++ .../yarn/service/client/ServiceClient.java | 24 ++++ .../hadoop/yarn/service/ServiceTestUtils.java | 135 ++++++++++++++++++ .../yarn/service/TestYarnNativeServices.java | 129 ----------------- .../yarn/client/api/AppAdminClient.java | 12 ++ .../client/binding/RegistryUtils.java | 10 ++ .../resourcemanager/rmapp/RMAppImpl.java | 30 ++++ 11 files changed, 358 insertions(+), 129 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestCleanupAfterKill.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/yarn-site.xml rename hadoop-yarn-project/hadoop-yarn/{hadoop-yarn-client => hadoop-yarn-common}/src/main/java/org/apache/hadoop/yarn/client/api/AppAdminClient.java (96%) diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 59a9bd2b891..12897a7f3e1 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -450,6 +450,13 @@ ${hadoop.version} + + org.apache.hadoop + hadoop-yarn-services-core + ${hadoop.version} + test-jar + + org.apache.hadoop hadoop-mapreduce-client-jobclient diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/pom.xml index 45168a9fbc4..ab762187830 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/pom.xml @@ -139,6 +139,22 @@ junit test + + org.apache.hadoop + hadoop-yarn-services-core + test-jar + test + + + org.apache.hadoop + hadoop-minicluster + test + + + org.apache.curator + curator-test + test + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java index a8e2f511f8e..18d45fae781 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java @@ -588,6 +588,17 @@ public class ApiServiceClient extends AppAdminClient { return result; } + @Override + public int actionCleanUp(String appName, String userName) throws + IOException, YarnException { + ServiceClient sc = new ServiceClient(); + sc.init(getConfig()); + sc.start(); + int result = sc.actionCleanUp(appName, userName); + sc.close(); + return result; + } + private static final JsonSerDeser CONTAINER_JSON_SERDE = new JsonSerDeser<>(Container[].class, PropertyNamingStrategy.CAMEL_CASE_TO_LOWER_CASE_WITH_UNDERSCORES); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestCleanupAfterKill.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestCleanupAfterKill.java new file mode 100644 index 00000000000..51e834a34d9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestCleanupAfterKill.java @@ -0,0 +1,94 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.registry.client.binding.RegistryUtils; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.client.ServiceClient; +import org.apache.hadoop.yarn.service.conf.YarnServiceConstants; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; + +/** + * Minicluster test that verifies registry cleanup when app lifetime is + * exceeded. + */ +public class TestCleanupAfterKill extends ServiceTestUtils { + private static final Logger LOG = + LoggerFactory.getLogger(TestCleanupAfterKill.class); + + @Rule + public TemporaryFolder tmpFolder = new TemporaryFolder(); + + @Before + public void setup() throws Exception { + File tmpYarnDir = new File("target", "tmp"); + FileUtils.deleteQuietly(tmpYarnDir); + } + + @After + public void tearDown() throws IOException { + shutdown(); + } + + @Test(timeout = 200000) + public void testRegistryCleanedOnLifetimeExceeded() throws Exception { + setupInternal(NUM_NMS); + ServiceClient client = createClient(getConf()); + Service exampleApp = createExampleApplication(); + exampleApp.setLifetime(30L); + client.actionCreate(exampleApp); + waitForServiceToBeStable(client, exampleApp); + String serviceZKPath = RegistryUtils.servicePath(RegistryUtils + .currentUser(), YarnServiceConstants.APP_TYPE, exampleApp.getName()); + Assert.assertTrue("Registry ZK service path doesn't exist", + getCuratorService().zkPathExists(serviceZKPath)); + + // wait for app to be killed by RM + ApplicationId exampleAppId = ApplicationId.fromString(exampleApp.getId()); + GenericTestUtils.waitFor(() -> { + try { + ApplicationReport ar = client.getYarnClient() + .getApplicationReport(exampleAppId); + return ar.getYarnApplicationState() == YarnApplicationState.KILLED; + } catch (YarnException | IOException e) { + throw new RuntimeException("while waiting", e); + } + }, 2000, 200000); + Assert.assertFalse("Registry ZK service path still exists after killed", + getCuratorService().zkPathExists(serviceZKPath)); + + LOG.info("Destroy the service"); + Assert.assertEquals(0, client.actionDestroy(exampleApp.getName())); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/yarn-site.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/yarn-site.xml new file mode 100644 index 00000000000..daac23adcd4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/yarn-site.xml @@ -0,0 +1,19 @@ + + + + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java index c86f5de5947..3f6e8966e4f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java @@ -308,6 +308,16 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes, return actionUpgrade(persistedService, containersToUpgrade); } + @Override + public int actionCleanUp(String appName, String userName) throws + IOException, YarnException { + if (cleanUpRegistry(appName, userName)) { + return EXIT_SUCCESS; + } else { + return EXIT_FALSE; + } + } + public int actionUpgrade(Service service, List compInstances) throws IOException, YarnException { ApplicationReport appReport = @@ -639,9 +649,23 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes, } } + private boolean cleanUpRegistry(String serviceName, String user) throws + SliderException { + String encodedName = RegistryUtils.registryUser(user); + + String registryPath = RegistryUtils.servicePath(encodedName, + YarnServiceConstants.APP_TYPE, serviceName); + return cleanUpRegistryPath(registryPath, serviceName); + } + private boolean cleanUpRegistry(String serviceName) throws SliderException { String registryPath = ServiceRegistryUtils.registryPathForInstance(serviceName); + return cleanUpRegistryPath(registryPath, serviceName); + } + + private boolean cleanUpRegistryPath(String registryPath, String + serviceName) throws SliderException { try { if (getRegistryClient().exists(registryPath)) { getRegistryClient().delete(registryPath, true); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java index 86b4cea649c..3d1412dfe71 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java @@ -19,6 +19,8 @@ package org.apache.hadoop.yarn.service; import com.google.common.base.Throwables; +import com.google.common.collect.HashMultimap; +import com.google.common.collect.Multimap; import org.apache.commons.io.FileUtils; import org.apache.curator.test.TestingCluster; import org.apache.hadoop.conf.Configuration; @@ -29,13 +31,17 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.http.HttpServer2; import org.apache.hadoop.registry.client.impl.zk.CuratorService; import org.apache.hadoop.service.ServiceOperations; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.MiniYARNCluster; import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.api.records.Container; +import org.apache.hadoop.yarn.service.api.records.ContainerState; import org.apache.hadoop.yarn.service.api.records.Resource; import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.api.records.ServiceState; import org.apache.hadoop.yarn.service.client.ServiceClient; import org.apache.hadoop.yarn.service.conf.YarnServiceConf; import org.apache.hadoop.yarn.service.exceptions.SliderException; @@ -60,6 +66,7 @@ import java.nio.file.Paths; import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.concurrent.TimeoutException; import static org.apache.hadoop.registry.client.api.RegistryConstants.KEY_REGISTRY_ZK_QUORUM; import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC; @@ -418,4 +425,132 @@ public class ServiceTestUtils { return serviceBasePath; } } + + /** + * Wait until all the containers for all components become ready state. + * + * @param client + * @param exampleApp + * @return all ready containers of a service. + * @throws TimeoutException + * @throws InterruptedException + */ + protected Multimap waitForAllCompToBeReady(ServiceClient + client, Service exampleApp) throws TimeoutException, + InterruptedException { + int expectedTotalContainers = countTotalContainers(exampleApp); + + Multimap allContainers = HashMultimap.create(); + + GenericTestUtils.waitFor(() -> { + try { + Service retrievedApp = client.getStatus(exampleApp.getName()); + int totalReadyContainers = 0; + allContainers.clear(); + LOG.info("Num Components " + retrievedApp.getComponents().size()); + for (Component component : retrievedApp.getComponents()) { + LOG.info("looking for " + component.getName()); + LOG.info(component.toString()); + if (component.getContainers() != null) { + if (component.getContainers().size() == exampleApp + .getComponent(component.getName()).getNumberOfContainers()) { + for (Container container : component.getContainers()) { + LOG.info( + "Container state " + container.getState() + ", component " + + component.getName()); + if (container.getState() == ContainerState.READY) { + totalReadyContainers++; + allContainers.put(component.getName(), container.getId()); + LOG.info("Found 1 ready container " + container.getId()); + } + } + } else { + LOG.info(component.getName() + " Expected number of containers " + + exampleApp.getComponent(component.getName()) + .getNumberOfContainers() + ", current = " + component + .getContainers()); + } + } + } + LOG.info("Exit loop, totalReadyContainers= " + totalReadyContainers + + " expected = " + expectedTotalContainers); + return totalReadyContainers == expectedTotalContainers; + } catch (Exception e) { + e.printStackTrace(); + return false; + } + }, 2000, 200000); + return allContainers; + } + + /** + * Wait until service state becomes stable. A service is stable when all + * requested containers of all components are running and in ready state. + * + * @param client + * @param exampleApp + * @throws TimeoutException + * @throws InterruptedException + */ + protected void waitForServiceToBeStable(ServiceClient client, + Service exampleApp) throws TimeoutException, InterruptedException { + waitForServiceToBeStable(client, exampleApp, 200000); + } + + protected void waitForServiceToBeStable(ServiceClient client, + Service exampleApp, int waitForMillis) + throws TimeoutException, InterruptedException { + waitForServiceToBeInState(client, exampleApp, ServiceState.STABLE, + waitForMillis); + } + + /** + * Wait until service is started. It does not have to reach a stable state. + * + * @param client + * @param exampleApp + * @throws TimeoutException + * @throws InterruptedException + */ + protected void waitForServiceToBeStarted(ServiceClient client, + Service exampleApp) throws TimeoutException, InterruptedException { + waitForServiceToBeInState(client, exampleApp, ServiceState.STARTED); + } + + protected void waitForServiceToBeInState(ServiceClient client, + Service exampleApp, ServiceState desiredState) throws TimeoutException, + InterruptedException { + waitForServiceToBeInState(client, exampleApp, desiredState, 200000); + } + + /** + * Wait until service is started. It does not have to reach a stable state. + * + * @param client + * @param exampleApp + * @throws TimeoutException + * @throws InterruptedException + */ + protected void waitForServiceToBeInState(ServiceClient client, + Service exampleApp, ServiceState desiredState, int waitForMillis) throws + TimeoutException, InterruptedException { + GenericTestUtils.waitFor(() -> { + try { + Service retrievedApp = client.getStatus(exampleApp.getName()); + System.out.println(retrievedApp); + return retrievedApp.getState() == desiredState; + } catch (Exception e) { + e.printStackTrace(); + return false; + } + }, 2000, waitForMillis); + } + + private int countTotalContainers(Service service) { + int totalContainers = 0; + for (Component component : service.getComponents()) { + totalContainers += component.getNumberOfContainers(); + } + return totalContainers; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java index ae209b929ed..8b13b2495b8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java @@ -18,7 +18,6 @@ package org.apache.hadoop.yarn.service; -import com.google.common.collect.HashMultimap; import com.google.common.collect.Multimap; import org.apache.commons.io.FileUtils; import org.apache.hadoop.fs.Path; @@ -36,7 +35,6 @@ import org.apache.hadoop.yarn.service.api.records.Component; import org.apache.hadoop.yarn.service.api.records.ComponentState; import org.apache.hadoop.yarn.service.api.records.Configuration; import org.apache.hadoop.yarn.service.api.records.Container; -import org.apache.hadoop.yarn.service.api.records.ContainerState; import org.apache.hadoop.yarn.service.api.records.PlacementConstraint; import org.apache.hadoop.yarn.service.api.records.PlacementPolicy; import org.apache.hadoop.yarn.service.api.records.PlacementScope; @@ -806,131 +804,4 @@ public class TestYarnNativeServices extends ServiceTestUtils { i++; } } - - /** - * Wait until all the containers for all components become ready state. - * - * @param client - * @param exampleApp - * @return all ready containers of a service. - * @throws TimeoutException - * @throws InterruptedException - */ - private Multimap waitForAllCompToBeReady(ServiceClient client, - Service exampleApp) throws TimeoutException, InterruptedException { - int expectedTotalContainers = countTotalContainers(exampleApp); - - Multimap allContainers = HashMultimap.create(); - - GenericTestUtils.waitFor(() -> { - try { - Service retrievedApp = client.getStatus(exampleApp.getName()); - int totalReadyContainers = 0; - allContainers.clear(); - LOG.info("Num Components " + retrievedApp.getComponents().size()); - for (Component component : retrievedApp.getComponents()) { - LOG.info("looking for " + component.getName()); - LOG.info(component.toString()); - if (component.getContainers() != null) { - if (component.getContainers().size() == exampleApp - .getComponent(component.getName()).getNumberOfContainers()) { - for (Container container : component.getContainers()) { - LOG.info( - "Container state " + container.getState() + ", component " - + component.getName()); - if (container.getState() == ContainerState.READY) { - totalReadyContainers++; - allContainers.put(component.getName(), container.getId()); - LOG.info("Found 1 ready container " + container.getId()); - } - } - } else { - LOG.info(component.getName() + " Expected number of containers " - + exampleApp.getComponent(component.getName()) - .getNumberOfContainers() + ", current = " + component - .getContainers()); - } - } - } - LOG.info("Exit loop, totalReadyContainers= " + totalReadyContainers - + " expected = " + expectedTotalContainers); - return totalReadyContainers == expectedTotalContainers; - } catch (Exception e) { - e.printStackTrace(); - return false; - } - }, 2000, 200000); - return allContainers; - } - - /** - * Wait until service state becomes stable. A service is stable when all - * requested containers of all components are running and in ready state. - * - * @param client - * @param exampleApp - * @throws TimeoutException - * @throws InterruptedException - */ - private void waitForServiceToBeStable(ServiceClient client, - Service exampleApp) throws TimeoutException, InterruptedException { - waitForServiceToBeStable(client, exampleApp, 200000); - } - - private void waitForServiceToBeStable(ServiceClient client, - Service exampleApp, int waitForMillis) - throws TimeoutException, InterruptedException { - waitForServiceToBeInState(client, exampleApp, ServiceState.STABLE, - waitForMillis); - } - - /** - * Wait until service is started. It does not have to reach a stable state. - * - * @param client - * @param exampleApp - * @throws TimeoutException - * @throws InterruptedException - */ - private void waitForServiceToBeStarted(ServiceClient client, - Service exampleApp) throws TimeoutException, InterruptedException { - waitForServiceToBeInState(client, exampleApp, ServiceState.STARTED); - } - - private void waitForServiceToBeInState(ServiceClient client, - Service exampleApp, ServiceState desiredState) throws TimeoutException, - InterruptedException { - waitForServiceToBeInState(client, exampleApp, desiredState, 200000); - } - - /** - * Wait until service is started. It does not have to reach a stable state. - * - * @param client - * @param exampleApp - * @throws TimeoutException - * @throws InterruptedException - */ - private void waitForServiceToBeInState(ServiceClient client, - Service exampleApp, ServiceState desiredState, int waitForMillis) throws - TimeoutException, InterruptedException { - GenericTestUtils.waitFor(() -> { - try { - Service retrievedApp = client.getStatus(exampleApp.getName()); - System.out.println(retrievedApp); - return retrievedApp.getState() == desiredState; - } catch (Exception e) { - e.printStackTrace(); - return false; - } - }, 2000, waitForMillis); - } - - private int countTotalContainers(Service service) { - int totalContainers = 0; - for (Component component : service.getComponents()) { - totalContainers += component.getNumberOfContainers(); - } - return totalContainers; - } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AppAdminClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/AppAdminClient.java similarity index 96% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AppAdminClient.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/AppAdminClient.java index 91f899c82af..3cd1a787103 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AppAdminClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/AppAdminClient.java @@ -270,4 +270,16 @@ public abstract class AppAdminClient extends CompositeService { public abstract int actionUpgradeComponents(String appName, List components) throws IOException, YarnException; + /** + * Operation to be performed by the RM after an application has completed. + * + * @param appName the name of the application. + * @param userName the name of the user. + * @return exit code + */ + @Public + @Unstable + public abstract int actionCleanUp(String appName, String userName) throws + IOException, YarnException; + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryUtils.java index 4ef7b8d404b..fcfc5bf570f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryUtils.java @@ -296,6 +296,16 @@ public class RegistryUtils { */ public static String currentUser() { String shortUserName = currentUsernameUnencoded(); + return registryUser(shortUserName); + } + + /** + * Convert the given user name formatted for the registry. + * + * @param shortUserName + * @return converted user name + */ + public static String registryUser(String shortUserName) { String encodedName = encodeForRegistry(shortUserName); // DNS name doesn't allow "_", replace it with "-" encodedName = RegistryUtils.convertUsername(encodedName); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java index 6aee8132962..73191562c97 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java @@ -65,6 +65,7 @@ import org.apache.hadoop.yarn.api.records.ReservationId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.client.api.AppAdminClient; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.EventHandler; @@ -1470,6 +1471,33 @@ public class RMAppImpl implements RMApp, Recoverable { }; } + /** + * Attempt to perform a type-specific cleanup after application has completed. + * + * @param app application to clean up + */ + static void appAdminClientCleanUp(RMAppImpl app) { + try { + AppAdminClient client = AppAdminClient.createAppAdminClient(app + .applicationType, app.conf); + int result = client.actionCleanUp(app.name, app.user); + if (result == 0) { + LOG.info("Type-specific cleanup of application " + app.applicationId + + " of type " + app.applicationType + " succeeded"); + } else { + LOG.warn("Type-specific cleanup of application " + app.applicationId + + " of type " + app.applicationType + " did not succeed with exit" + + " code " + result); + } + } catch (IllegalArgumentException e) { + // no AppAdminClient class has been specified for the application type, + // so this does not need to be logged + } catch (Exception e) { + LOG.warn("Could not run type-specific cleanup on application " + + app.applicationId + " of type " + app.applicationType, e); + } + } + private static class FinalTransition extends RMAppTransition { private final RMAppState finalState; @@ -1504,6 +1532,8 @@ public class RMAppImpl implements RMApp, Recoverable { .appFinished(app, finalState, app.finishTime); // set the memory free app.clearUnusedFields(); + + appAdminClientCleanUp(app); }; } From 9e50dce46c22d78f28d3ea91dcdb6e9b3bcd6047 Mon Sep 17 00:00:00 2001 From: Anu Engineer Date: Fri, 1 Jun 2018 14:42:03 -0700 Subject: [PATCH 017/113] HDDS-144. Fix TestEndPoint#testHeartbeat and TestEndPoint#testRegister. Contributed by Shashikant Banerjee. --- .../ozone/container/common/ScmTestMock.java | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java index 0ee6321c992..14da9601e45 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java @@ -16,6 +16,7 @@ */ package org.apache.hadoop.ozone.container.common; +import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.scm.VersionInfo; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -179,6 +180,7 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol { List cmdResponses = new LinkedList<>(); return SCMHeartbeatResponseProto.newBuilder().addAllCommands(cmdResponses) + .setDatanodeUUID(heartbeat.getDatanodeDetails().getUuid()) .build(); } @@ -197,6 +199,7 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol { throws IOException { rpcCount.incrementAndGet(); updateNodeReport(datanodeDetailsProto, nodeReport); + updateContainerReport(containerReportsRequestProto, datanodeDetailsProto); sleepIfNeeded(); return StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto .newBuilder().setClusterID(UUID.randomUUID().toString()) @@ -227,6 +230,35 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol { } + /** + * Update the cotainerReport. + * + * @param reports Container report + * @param datanodeDetails DataNode Info + * @throws IOException + */ + public void updateContainerReport( + StorageContainerDatanodeProtocolProtos.ContainerReportsProto reports, + DatanodeDetailsProto datanodeDetails) throws IOException { + Preconditions.checkNotNull(reports); + containerReportsCount.incrementAndGet(); + DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf( + datanodeDetails); + if (reports.getReportsCount() > 0) { + Map containers = nodeContainers.get(datanode); + if (containers == null) { + containers = new LinkedHashMap(); + nodeContainers.put(datanode, containers); + } + + for (StorageContainerDatanodeProtocolProtos.ContainerInfo report : reports + .getReportsList()) { + containers.put(report.getContainerID(), report); + } + } + } + + /** * Return the number of StorageReports of a datanode. * @param datanodeDetails From 34710c66d61bb09ffcd5c4cb3b9c92506b272720 Mon Sep 17 00:00:00 2001 From: Anu Engineer Date: Fri, 1 Jun 2018 15:07:25 -0700 Subject: [PATCH 018/113] HDDS-143. Provide docker compose files to measure performance in a pseudo cluster. Contributed by Elek, Marton. --- hadoop-dist/src/main/compose/ozoneperf/.env | 17 ++++ .../src/main/compose/ozoneperf/README.md | 73 ++++++++++++++++++ .../src/main/compose/ozoneperf/compose-all.sh | 18 +++++ .../ozoneperf/docker-compose-freon.yaml | 26 +++++++ .../compose/ozoneperf/docker-compose.yaml | 77 +++++++++++++++++++ .../src/main/compose/ozoneperf/docker-config | 37 +++++++++ .../src/main/compose/ozoneperf/init.sh | 21 +++++ .../src/main/compose/ozoneperf/prometheus.yml | 24 ++++++ 8 files changed, 293 insertions(+) create mode 100644 hadoop-dist/src/main/compose/ozoneperf/.env create mode 100644 hadoop-dist/src/main/compose/ozoneperf/README.md create mode 100755 hadoop-dist/src/main/compose/ozoneperf/compose-all.sh create mode 100644 hadoop-dist/src/main/compose/ozoneperf/docker-compose-freon.yaml create mode 100644 hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml create mode 100644 hadoop-dist/src/main/compose/ozoneperf/docker-config create mode 100755 hadoop-dist/src/main/compose/ozoneperf/init.sh create mode 100644 hadoop-dist/src/main/compose/ozoneperf/prometheus.yml diff --git a/hadoop-dist/src/main/compose/ozoneperf/.env b/hadoop-dist/src/main/compose/ozoneperf/.env new file mode 100644 index 00000000000..cac418ae59e --- /dev/null +++ b/hadoop-dist/src/main/compose/ozoneperf/.env @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +HDDS_VERSION=${hdds.version} diff --git a/hadoop-dist/src/main/compose/ozoneperf/README.md b/hadoop-dist/src/main/compose/ozoneperf/README.md new file mode 100644 index 00000000000..a78f2087328 --- /dev/null +++ b/hadoop-dist/src/main/compose/ozoneperf/README.md @@ -0,0 +1,73 @@ + + +# Compose files for local performance tests + +This directory contains docker-compose definition for an ozone cluster where +all the metrics are saved to a prometheus instance. + + Prometheus follows a pull based approach where the metrics are published + on a HTTP endpoint. + + Our current approach: + + 1. A Java agent activates a prometheus metrics endpoint in every JVM instance + (use `init.sh` to download the agent) + + 2. The Java agent publishes all the jmx parameters in prometheus format AND + register the endpoint address to the consul. + + 3. Prometheus polls all the endpoints which are registered to consul. + + + +## How to use + +First of all download the required Java agent with running `./init.sh` + +After that you can start the cluster with docker-compose: + +``` +docker-compose up -d +``` + +After a while the cluster will be started. You can check the ozone web ui-s: + +https://localhost:9874 +https://localhost:9876 + +You can also scale up the datanodes: + +``` +docker-compose scale datanode=3 +``` + +Freon (Ozone test generator tool) is not part of docker-compose by default, +you can activate it using `compose-all.sh` instead of `docker-compose`: + +``` +compose-all.sh up -d +``` + +Now Freon is running. Let's try to check the metrics from the local Prometheus: + +http://localhost:9090/graph + +Example queries: + +``` +Hadoop_KeySpaceManager_NumKeyCommits +rate(Hadoop_KeySpaceManager_NumKeyCommits[10m]) +rate(Hadoop_Ozone_BYTES_WRITTEN[10m]) +``` diff --git a/hadoop-dist/src/main/compose/ozoneperf/compose-all.sh b/hadoop-dist/src/main/compose/ozoneperf/compose-all.sh new file mode 100755 index 00000000000..82ab8b3101d --- /dev/null +++ b/hadoop-dist/src/main/compose/ozoneperf/compose-all.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +docker-compose -f docker-compose.yaml -f docker-compose-freon.yaml "$@" diff --git a/hadoop-dist/src/main/compose/ozoneperf/docker-compose-freon.yaml b/hadoop-dist/src/main/compose/ozoneperf/docker-compose-freon.yaml new file mode 100644 index 00000000000..60bdc4a503f --- /dev/null +++ b/hadoop-dist/src/main/compose/ozoneperf/docker-compose-freon.yaml @@ -0,0 +1,26 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +version: "3" +services: + freon: + image: apache/hadoop-runner + volumes: + - ../../ozone:/opt/hadoop + - ./jmxpromo.jar:/opt/jmxpromo.jar + env_file: + - ./docker-config + command: ["/opt/hadoop/bin/ozone","freon"] diff --git a/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml b/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml new file mode 100644 index 00000000000..fb7873bf880 --- /dev/null +++ b/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml @@ -0,0 +1,77 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +version: "3" +services: + namenode: + image: apache/hadoop-runner + hostname: namenode + volumes: + - ../../ozone:/opt/hadoop + - ./jmxpromo.jar:/opt/jmxpromo.jar + ports: + - 9870:9870 + environment: + ENSURE_NAMENODE_DIR: /data/namenode + env_file: + - ./docker-config + command: ["/opt/hadoop/bin/hdfs","namenode"] + datanode: + image: apache/hadoop-runner + volumes: + - ../../ozone:/opt/hadoop + - ./jmxpromo.jar:/opt/jmxpromo.jar + ports: + - 9864 + command: ["/opt/hadoop/bin/ozone","datanode"] + env_file: + - ./docker-config + ksm: + image: apache/hadoop-runner + volumes: + - ../../ozone:/opt/hadoop + - ./jmxpromo.jar:/opt/jmxpromo.jar + ports: + - 9874:9874 + environment: + ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION + env_file: + - ./docker-config + command: ["/opt/hadoop/bin/ozone","ksm"] + scm: + image: apache/hadoop-runner + volumes: + - ../../ozone:/opt/hadoop + - ./jmxpromo.jar:/opt/jmxpromo.jar + ports: + - 9876:9876 + env_file: + - ./docker-config + environment: + ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION + command: ["/opt/hadoop/bin/ozone","scm"] + consul: + image: consul + command: ["agent", "-dev", "-ui", "-client", "0.0.0.0"] + ports: + - 8500:8500 + prometheus: + image: prom/prometheus + volumes: + - "./prometheus.yml:/etc/prometheus.yml" + command: ["--config.file","/etc/prometheus.yml"] + ports: + - 9090:9090 diff --git a/hadoop-dist/src/main/compose/ozoneperf/docker-config b/hadoop-dist/src/main/compose/ozoneperf/docker-config new file mode 100644 index 00000000000..e4f5485ac57 --- /dev/null +++ b/hadoop-dist/src/main/compose/ozoneperf/docker-config @@ -0,0 +1,37 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000 +OZONE-SITE.XML_ozone.ksm.address=ksm +OZONE-SITE.XML_ozone.scm.names=scm +OZONE-SITE.XML_ozone.enabled=True +OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id +OZONE-SITE.XML_ozone.scm.block.client.address=scm +OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata +OZONE-SITE.XML_ozone.handler.type=distributed +OZONE-SITE.XML_ozone.scm.client.address=scm +OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService +HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000 +HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode +HDFS-SITE.XML_rpc.metrics.quantile.enable=true +HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 +HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService +LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout +LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender +LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n +HADOOP_OPTS=-javaagent:/opt/jmxpromo.jar=port=0:consulHost=consul:consulMode=node + diff --git a/hadoop-dist/src/main/compose/ozoneperf/init.sh b/hadoop-dist/src/main/compose/ozoneperf/init.sh new file mode 100755 index 00000000000..cf25398bc20 --- /dev/null +++ b/hadoop-dist/src/main/compose/ozoneperf/init.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +EXPORTER_FILE="$DIR/jmxpromo.jar" +if [ ! -f "$EXPORTER_FILE" ]; then + wget https://github.com/flokkr/jmxpromo/releases/download/0.11/jmx_prometheus_javaagent-0.11.jar -O $EXPORTER_FILE +fi diff --git a/hadoop-dist/src/main/compose/ozoneperf/prometheus.yml b/hadoop-dist/src/main/compose/ozoneperf/prometheus.yml new file mode 100644 index 00000000000..80aa5203a20 --- /dev/null +++ b/hadoop-dist/src/main/compose/ozoneperf/prometheus.yml @@ -0,0 +1,24 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +global: + scrape_interval: 15s # By default, scrape targets every 15 seconds. + +scrape_configs: + - job_name: jmxexporter + consul_sd_configs: + - server: consul:8500 + services: + - jmxexporter From 4880d890ee4716dafc5ff464c92a3c6d83b1db9b Mon Sep 17 00:00:00 2001 From: Haibo Chen Date: Fri, 1 Jun 2018 16:28:13 -0700 Subject: [PATCH 019/113] YARN-8375. TestCGroupElasticMemoryController fails surefire build. (Miklos Szegedi via Haibo Chen) --- .../hadoop/test/PlatformAssumptions.java | 8 +++ .../TestCGroupElasticMemoryController.java | 65 +++++++++++-------- 2 files changed, 45 insertions(+), 28 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/PlatformAssumptions.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/PlatformAssumptions.java index 4e831625023..fdbb71f5f1d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/PlatformAssumptions.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/PlatformAssumptions.java @@ -25,6 +25,7 @@ import org.junit.internal.AssumptionViolatedException; public final class PlatformAssumptions { public static final String OS_NAME = System.getProperty("os.name"); public static final boolean WINDOWS = OS_NAME.startsWith("Windows"); + public static final boolean MAC_OS = OS_NAME.startsWith("Mac OS X"); private PlatformAssumptions() { } @@ -44,4 +45,11 @@ public final class PlatformAssumptions { "Expected Windows platform but got " + OS_NAME); } } + + public static void assumeMacOS() { + if (!MAC_OS) { + throw new AssumptionViolatedException( + "Expected MacOS platform but got " + OS_NAME); + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java index 118d1723a1c..c263c79a65b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java @@ -18,6 +18,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; import org.apache.commons.io.FileUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; @@ -36,11 +38,14 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static org.apache.hadoop.test.PlatformAssumptions.assumeMacOS; /** * Test for elastic non-strict memory controller based on cgroups. */ public class TestCGroupElasticMemoryController { + protected static final Log LOG = LogFactory + .getLog(TestCGroupElasticMemoryController.class); private YarnConfiguration conf = new YarnConfiguration(); private File script = new File("target/" + TestCGroupElasticMemoryController.class.getName()); @@ -52,15 +57,14 @@ public class TestCGroupElasticMemoryController { @Test(expected = YarnException.class) public void testConstructorOff() throws YarnException { - CGroupElasticMemoryController controller = - new CGroupElasticMemoryController( - conf, - null, - null, - false, - false, - 10000 - ); + new CGroupElasticMemoryController( + conf, + null, + null, + false, + false, + 10000 + ); } /** @@ -74,22 +78,21 @@ public class TestCGroupElasticMemoryController { DummyRunnableWithContext.class, Runnable.class); CGroupsHandler handler = mock(CGroupsHandler.class); when(handler.getPathForCGroup(any(), any())).thenReturn(""); - CGroupElasticMemoryController controller = - new CGroupElasticMemoryController( - conf, - null, - handler, - true, - false, - 10000 - ); + new CGroupElasticMemoryController( + conf, + null, + handler, + true, + false, + 10000 + ); } /** * Test that the handler is notified about multiple OOM events. * @throws Exception on exception */ - @Test + @Test(timeout = 20000) public void testMultipleOOMEvents() throws Exception { conf.set(YarnConfiguration.NM_ELASTIC_MEMORY_CONTROL_OOM_LISTENER_PATH, script.getAbsolutePath()); @@ -131,7 +134,7 @@ public class TestCGroupElasticMemoryController { * the child process starts * @throws Exception one exception */ - @Test + @Test(timeout = 20000) public void testStopBeforeStart() throws Exception { conf.set(YarnConfiguration.NM_ELASTIC_MEMORY_CONTROL_OOM_LISTENER_PATH, script.getAbsolutePath()); @@ -173,7 +176,7 @@ public class TestCGroupElasticMemoryController { * Test the edge case that OOM is never resolved. * @throws Exception on exception */ - @Test(expected = YarnRuntimeException.class) + @Test(timeout = 20000, expected = YarnRuntimeException.class) public void testInfiniteOOM() throws Exception { conf.set(YarnConfiguration.NM_ELASTIC_MEMORY_CONTROL_OOM_LISTENER_PATH, script.getAbsolutePath()); @@ -215,7 +218,7 @@ public class TestCGroupElasticMemoryController { * containers. * @throws Exception on exception */ - @Test(expected = YarnRuntimeException.class) + @Test(timeout = 20000, expected = YarnRuntimeException.class) public void testNothingToKill() throws Exception { conf.set(YarnConfiguration.NM_ELASTIC_MEMORY_CONTROL_OOM_LISTENER_PATH, script.getAbsolutePath()); @@ -258,13 +261,16 @@ public class TestCGroupElasticMemoryController { * Then we wait for 2 seconds and stop listening. * @throws Exception exception occurred */ - @Test + @Test(timeout = 20000) public void testNormalExit() throws Exception { + // TODO This may hang on Linux + assumeMacOS(); conf.set(YarnConfiguration.NM_ELASTIC_MEMORY_CONTROL_OOM_LISTENER_PATH, script.getAbsolutePath()); + ExecutorService service = Executors.newFixedThreadPool(1); try { FileUtils.writeStringToFile(script, - "#!/bin/bash\nsleep 10000;\n", + "#!/bin/bash\nsleep 10000;", Charset.defaultCharset(), false); assertTrue("Could not set executable", script.setExecutable(true)); @@ -287,17 +293,21 @@ public class TestCGroupElasticMemoryController { 10000, handler ); - ExecutorService service = Executors.newFixedThreadPool(1); + long start = System.currentTimeMillis(); service.submit(() -> { try { Thread.sleep(2000); } catch (InterruptedException ex) { assertTrue("Wait interrupted.", false); } + LOG.info(String.format("Calling process destroy in %d ms", + System.currentTimeMillis() - start)); controller.stopListening(); + LOG.info("Called process destroy."); }); controller.run(); } finally { + service.shutdown(); assertTrue(String.format("Could not clean up script %s", script.getAbsolutePath()), script.delete()); } @@ -312,8 +322,7 @@ public class TestCGroupElasticMemoryController { public void testDefaultConstructor() throws YarnException{ CGroupsHandler handler = mock(CGroupsHandler.class); when(handler.getPathForCGroup(any(), any())).thenReturn(""); - CGroupElasticMemoryController controller = - new CGroupElasticMemoryController( - conf, null, handler, true, false, 10); + new CGroupElasticMemoryController( + conf, null, handler, true, false, 10); } } From e11d67404945d80db2eb8a99453606419dbdc938 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Fri, 1 Jun 2018 16:59:04 -0700 Subject: [PATCH 020/113] HDFS-13637. RBF: Router fails when threadIndex (in ConnectionPool) wraps around Integer.MIN_VALUE. Contributed by CR Hota. --- .../server/federation/router/ConnectionPool.java | 12 +++++++++++- .../federation/router/TestConnectionManager.java | 13 +++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java index 6b416dd25e5..5fcde5b2e85 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java @@ -29,6 +29,7 @@ import java.util.concurrent.atomic.AtomicInteger; import javax.net.SocketFactory; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -148,6 +149,14 @@ public class ConnectionPool { return this.connectionPoolId; } + /** + * Get the clientIndex used to calculate index for lookup. + */ + @VisibleForTesting + public AtomicInteger getClientIndex() { + return this.clientIndex; + } + /** * Return the next connection round-robin. * @@ -161,7 +170,8 @@ public class ConnectionPool { ConnectionContext conn = null; List tmpConnections = this.connections; int size = tmpConnections.size(); - int threadIndex = this.clientIndex.getAndIncrement(); + // Inc and mask off sign bit, lookup index should be non-negative int + int threadIndex = this.clientIndex.getAndIncrement() & 0x7FFFFFFF; for (int i=0; i poolMap = connManager.getPools(); From d5e69d89942fd5dcfafd56bf653565557ff85501 Mon Sep 17 00:00:00 2001 From: Rohith Sharma K S Date: Sat, 2 Jun 2018 09:27:03 +0530 Subject: [PATCH 021/113] MapReduce JHS should honor yarn.webapp.filter-entity-list-by-user(addendum). Contributed by Sunil Govindan. --- .../mapreduce/v2/hs/webapp/HsJobBlock.java | 18 +-------- .../mapreduce/v2/hs/webapp/HsJobsBlock.java | 40 ++++++++++++++++++- .../mapreduce/v2/hs/webapp/TestBlocks.java | 17 ++++++-- .../v2/hs/webapp/TestHsJobBlock.java | 20 +--------- 4 files changed, 56 insertions(+), 39 deletions(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java index 9b845cd99c3..18040f00440 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java @@ -27,8 +27,6 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH; import java.util.Date; import java.util.List; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapreduce.JobACL; import org.apache.hadoop.mapreduce.TaskID; import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; import org.apache.hadoop.mapreduce.v2.api.records.JobId; @@ -41,10 +39,8 @@ import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo; import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.util.Times; import org.apache.hadoop.yarn.webapp.ResponseInfo; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; @@ -60,14 +56,9 @@ import com.google.inject.Inject; */ public class HsJobBlock extends HtmlBlock { final AppContext appContext; - private UserGroupInformation ugi; - private boolean isFilterAppListByUserEnabled; - @Inject HsJobBlock(Configuration conf, AppContext appctx, ViewContext ctx) { - super(ctx); + @Inject HsJobBlock(AppContext appctx) { appContext = appctx; - isFilterAppListByUserEnabled = conf - .getBoolean(YarnConfiguration.FILTER_ENTITY_LIST_BY_USER, false); } /* @@ -87,13 +78,6 @@ public class HsJobBlock extends HtmlBlock { html.p().__("Sorry, ", jid, " not found.").__(); return; } - ugi = getCallerUGI(); - if (isFilterAppListByUserEnabled && ugi != null - && !j.checkAccess(ugi, JobACL.VIEW_JOB)) { - html.p().__("Sorry, ", jid, " could not be viewed for '", - ugi.getUserName(), "'.").__(); - return; - } if(j instanceof UnparsedJob) { final int taskCount = j.getTotalMaps() + j.getTotalReduces(); UnparsedJob oversizedJob = (UnparsedJob) j; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java index 1a2b2fe7a8a..216bdcee983 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java @@ -22,10 +22,15 @@ import java.text.SimpleDateFormat; import java.util.Date; import org.apache.commons.lang3.StringEscapeUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapreduce.MRConfig; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.util.Times; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE; @@ -42,9 +47,19 @@ public class HsJobsBlock extends HtmlBlock { final AppContext appContext; final SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy.MM.dd HH:mm:ss z"); + private UserGroupInformation ugi; + private boolean isFilterAppListByUserEnabled; + private boolean areAclsEnabled; + private AccessControlList adminAclList; - @Inject HsJobsBlock(AppContext appCtx) { + @Inject + HsJobsBlock(Configuration conf, AppContext appCtx, ViewContext ctx) { + super(ctx); appContext = appCtx; + isFilterAppListByUserEnabled = conf + .getBoolean(YarnConfiguration.FILTER_ENTITY_LIST_BY_USER, false); + areAclsEnabled = conf.getBoolean(MRConfig.MR_ACLS_ENABLED, false); + adminAclList = new AccessControlList(conf.get(MRConfig.MR_ADMINS, " ")); } /* @@ -77,6 +92,12 @@ public class HsJobsBlock extends HtmlBlock { StringBuilder jobsTableData = new StringBuilder("[\n"); for (Job j : appContext.getAllJobs().values()) { JobInfo job = new JobInfo(j); + ugi = getCallerUGI(); + // Allow to list only per-user apps if incoming ugi has permission. + if (isFilterAppListByUserEnabled && ugi != null + && !checkAccess(job.getUserName())) { + continue; + } jobsTableData.append("[\"") .append(dateFormat.format(new Date(job.getSubmitTime()))).append("\",\"") .append(job.getFormattedStartTimeStr(dateFormat)).append("\",\"") @@ -139,4 +160,21 @@ public class HsJobsBlock extends HtmlBlock { __(). __(); } + + private boolean checkAccess(String userName) { + if(!areAclsEnabled) { + return true; + } + + // User could see its own job. + if (ugi.getShortUserName().equals(userName)) { + return true; + } + + // Admin could also see all jobs + if (adminAclList != null && adminAclList.isUserAllowed(ugi)) { + return true; + } + return false; + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestBlocks.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestBlocks.java index d1a704f5f0f..27c412b8a80 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestBlocks.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestBlocks.java @@ -59,6 +59,8 @@ import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl; import org.apache.hadoop.yarn.webapp.Controller.RequestContext; +import org.apache.hadoop.yarn.webapp.View.ViewContext; +import org.apache.hadoop.yarn.webapp.Controller; import org.apache.hadoop.yarn.webapp.Params; import org.apache.hadoop.yarn.webapp.View; import org.apache.hadoop.yarn.webapp.log.AggregatedLogsPage; @@ -223,7 +225,14 @@ public class TestBlocks { jobs.put(job.getID(), job); when(ctx.getAllJobs()).thenReturn(jobs); - HsJobsBlock block = new HsJobsBlockForTest(ctx); + Controller.RequestContext rc = mock(Controller.RequestContext.class); + ViewContext view = mock(ViewContext.class); + HttpServletRequest req =mock(HttpServletRequest.class); + when(rc.getRequest()).thenReturn(req); + when(view.requestContext()).thenReturn(rc); + + Configuration conf = new Configuration(); + HsJobsBlock block = new HsJobsBlockForTest(conf, ctx, view); PrintWriter pWriter = new PrintWriter(data); Block html = new BlockForTest(new HtmlBlockForTest(), pWriter, 0, false); block.render(html); @@ -400,8 +409,10 @@ public class TestBlocks { } private class HsJobsBlockForTest extends HsJobsBlock { - HsJobsBlockForTest(AppContext appCtx) { - super(appCtx); + + HsJobsBlockForTest(Configuration conf, AppContext appCtx, + ViewContext view) { + super(conf, appCtx, view); } @Override diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsJobBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsJobBlock.java index 48e3d3b231a..7fa238e1cef 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsJobBlock.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsJobBlock.java @@ -33,10 +33,8 @@ import org.apache.hadoop.mapreduce.v2.hs.UnparsedJob; import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.util.StringHelper; -import org.apache.hadoop.yarn.webapp.Controller; import org.apache.hadoop.yarn.webapp.ResponseInfo; import org.apache.hadoop.yarn.webapp.SubView; -import org.apache.hadoop.yarn.webapp.View.ViewContext; import org.apache.hadoop.yarn.webapp.view.BlockForTest; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import org.apache.hadoop.yarn.webapp.view.HtmlBlockForTest; @@ -51,8 +49,6 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.Map; -import javax.servlet.http.HttpServletRequest; - import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -73,13 +69,7 @@ public class TestHsJobBlock { new JobHistoryStubWithAllOversizeJobs(maxAllowedTaskNum); jobHistory.init(config); - Controller.RequestContext rc = mock(Controller.RequestContext.class); - ViewContext view = mock(ViewContext.class); - HttpServletRequest req =mock(HttpServletRequest.class); - when(rc.getRequest()).thenReturn(req); - when(view.requestContext()).thenReturn(rc); - - HsJobBlock jobBlock = new HsJobBlock(config, jobHistory, view) { + HsJobBlock jobBlock = new HsJobBlock(jobHistory) { // override this so that job block can fetch a job id. @Override public Map moreParams() { @@ -111,13 +101,7 @@ public class TestHsJobBlock { JobHistory jobHistory = new JobHitoryStubWithAllNormalSizeJobs(); jobHistory.init(config); - Controller.RequestContext rc = mock(Controller.RequestContext.class); - ViewContext view = mock(ViewContext.class); - HttpServletRequest req =mock(HttpServletRequest.class); - when(rc.getRequest()).thenReturn(req); - when(view.requestContext()).thenReturn(rc); - - HsJobBlock jobBlock = new HsJobBlock(config, jobHistory, view) { + HsJobBlock jobBlock = new HsJobBlock(jobHistory) { // override this so that the job block can fetch a job id. @Override public Map moreParams() { From 8261f9e5710038ccbc475dbfcea3b9ae79b6f482 Mon Sep 17 00:00:00 2001 From: Rohith Sharma K S Date: Sat, 2 Jun 2018 09:29:06 +0530 Subject: [PATCH 022/113] More YARN pages need to honor yarn.resourcemanager.display.per-user-apps(addendum). Contributed by Sunil G. --- .../reader/TimelineReaderManager.java | 21 +++++++++++++++++++ .../reader/TimelineReaderWebServices.java | 18 ++++++++++------ 2 files changed, 33 insertions(+), 6 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java index 67e5849ed6a..8c7c974b5b3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java @@ -24,12 +24,14 @@ import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.api.records.timelineservice.FlowActivityEntity; import org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.security.AdminACLsManager; import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader; /** @@ -42,12 +44,19 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader; public class TimelineReaderManager extends AbstractService { private TimelineReader reader; + private AdminACLsManager adminACLsManager; public TimelineReaderManager(TimelineReader timelineReader) { super(TimelineReaderManager.class.getName()); this.reader = timelineReader; } + @Override + protected void serviceInit(Configuration conf) throws Exception { + // TODO Once ACLS story is played, this need to be removed or modified. + this.adminACLsManager = new AdminACLsManager(conf); + } + /** * Gets cluster ID from config yarn.resourcemanager.cluster-id * if not supplied by client. @@ -198,4 +207,16 @@ public class TimelineReaderManager extends AbstractService { context.setClusterId(getClusterID(context.getClusterId(), getConfig())); return reader.getEntityTypes(new TimelineReaderContext(context)); } + + /** + * The API to confirm is a User is allowed to read this data. + * @param callerUGI UserGroupInformation of the user + */ + public boolean checkAccess(UserGroupInformation callerUGI) { + // TODO to be removed or modified once ACL story is played + if (!adminACLsManager.areACLsEnabled()) { + return true; + } + return callerUGI != null && adminACLsManager.isAdmin(callerUGI); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java index a671f33cbdd..7bf66b0bd76 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java @@ -1435,6 +1435,7 @@ public class TimelineReaderWebServices { long startTime = Time.monotonicNow(); init(res); TimelineReaderManager timelineReaderManager = getTimelineReaderManager(); + Configuration config = timelineReaderManager.getConfig(); Set entities = null; try { DateRange range = parseDateRange(dateRange); @@ -1454,15 +1455,15 @@ public class TimelineReaderWebServices { long endTime = Time.monotonicNow(); if (entities == null) { entities = Collections.emptySet(); - } else if (isDisplayEntityPerUserFilterEnabled( - timelineReaderManager.getConfig())) { + } else if (isDisplayEntityPerUserFilterEnabled(config)) { Set userEntities = new LinkedHashSet<>(); userEntities.addAll(entities); for (TimelineEntity entity : userEntities) { if (entity.getInfo() != null) { String userId = (String) entity.getInfo().get(FlowActivityEntity.USER_INFO_KEY); - if (!validateAuthUserWithEntityUser(callerUGI, userId)) { + if (!validateAuthUserWithEntityUser(timelineReaderManager, callerUGI, + userId)) { entities.remove(entity); } } @@ -3422,11 +3423,16 @@ public class TimelineReaderWebServices { } private boolean isDisplayEntityPerUserFilterEnabled(Configuration config) { - return config + return !config + .getBoolean(YarnConfiguration.TIMELINE_SERVICE_READ_AUTH_ENABLED, + YarnConfiguration.DEFAULT_TIMELINE_SERVICE_READ_AUTH_ENABLED) + && config .getBoolean(YarnConfiguration.FILTER_ENTITY_LIST_BY_USER, false); } - private boolean validateAuthUserWithEntityUser(UserGroupInformation ugi, + // TODO to be removed/modified once ACL story has played + private boolean validateAuthUserWithEntityUser( + TimelineReaderManager readerManager, UserGroupInformation ugi, String entityUser) { String authUser = TimelineReaderWebServicesUtils.getUserName(ugi); String requestedUser = TimelineReaderWebServicesUtils.parseStr(entityUser); @@ -3434,6 +3440,6 @@ public class TimelineReaderWebServices { LOG.debug( "Authenticated User: " + authUser + " Requested User:" + entityUser); } - return authUser.equals(requestedUser); + return (readerManager.checkAccess(ugi) || authUser.equals(requestedUser)); } } From 31998643a51f1e08f723f18dc5476ac1512d5b81 Mon Sep 17 00:00:00 2001 From: Billie Rinaldi Date: Sat, 2 Jun 2018 14:46:32 -0700 Subject: [PATCH 023/113] YARN-8342. Enable untrusted docker image to run with launch command. Contributed by Eric Yang --- .../hadoop-yarn/conf/yarn-env.sh | 1 + .../docker/DockerProviderService.java | 24 ++++++++-- .../runtime/DockerLinuxContainerRuntime.java | 23 ++++++++- .../impl/utils/docker-util.c | 7 +-- .../test/utils/test_docker_util.cc | 48 +++++++++---------- .../src/site/markdown/DockerContainers.md | 33 +++++++++++-- .../site/markdown/yarn-service/Examples.md | 37 +++++++++++++- .../markdown/yarn-service/YarnServiceAPI.md | 2 +- 8 files changed, 135 insertions(+), 40 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh b/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh index d8650236b71..76d1d6b4f0b 100644 --- a/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh +++ b/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh @@ -166,3 +166,4 @@ ### # Directory containing service examples # export YARN_SERVICE_EXAMPLES_DIR = $HADOOP_YARN_HOME/share/hadoop/yarn/yarn-service-examples +# export YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE=true diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java index 821682dcd53..071b30a29f1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java @@ -50,6 +50,26 @@ public class DockerProviderService extends AbstractProviderService compInstance.getCompSpec().getRunPrivilegedContainer()); } + /** + * Check if system is default to disable docker override or + * user requested a Docker container with ENTRY_POINT support. + * + * @param component - YARN Service component + * @return true if Docker launch command override is disabled + */ + private boolean checkUseEntryPoint(Component component) { + boolean overrideDisable = false; + String overrideDisableKey = Environment. + YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE. + name(); + String overrideDisableValue = (component + .getConfiguration().getEnv(overrideDisableKey) != null) ? + component.getConfiguration().getEnv(overrideDisableKey) : + System.getenv(overrideDisableKey); + overrideDisable = Boolean.parseBoolean(overrideDisableValue); + return overrideDisable; + } + @Override public void buildContainerLaunchCommand(AbstractLauncher launcher, Service service, ComponentInstance instance, @@ -58,9 +78,7 @@ public class DockerProviderService extends AbstractProviderService Map tokensForSubstitution) throws IOException, SliderException { Component component = instance.getComponent().getComponentSpec(); - boolean useEntryPoint = Boolean.parseBoolean(component - .getConfiguration().getEnv(Environment - .YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE.name())); + boolean useEntryPoint = checkUseEntryPoint(component); if (useEntryPoint) { String launchCommand = component.getLaunchCommand(); if (!StringUtils.isEmpty(launchCommand)) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java index fc095d55819..e19379f3bfe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java @@ -22,6 +22,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker.DockerCommand; @@ -724,6 +725,25 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime { return id; } + /** + * Check if system is default to disable docker override or + * user requested a Docker container with ENTRY_POINT support. + * + * @param environment - Docker container environment variables + * @return true if Docker launch command override is disabled + */ + private boolean checkUseEntryPoint(Map environment) { + boolean overrideDisable = false; + String overrideDisableKey = Environment. + YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE. + name(); + String overrideDisableValue = (environment.get(overrideDisableKey) != null) + ? environment.get(overrideDisableKey) : + System.getenv(overrideDisableKey); + overrideDisable = Boolean.parseBoolean(overrideDisableValue); + return overrideDisable; + } + @Override public void launchContainer(ContainerRuntimeContext ctx) throws ContainerExecutionException { @@ -734,8 +754,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime { String imageName = environment.get(ENV_DOCKER_CONTAINER_IMAGE); String network = environment.get(ENV_DOCKER_CONTAINER_NETWORK); String hostname = environment.get(ENV_DOCKER_CONTAINER_HOSTNAME); - boolean useEntryPoint = Boolean.parseBoolean(environment - .get(ENV_DOCKER_CONTAINER_RUN_OVERRIDE_DISABLE)); + boolean useEntryPoint = checkUseEntryPoint(environment); if(network == null || network.isEmpty()) { network = defaultNetwork; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c index d34a5b21f6e..ffc349a10fd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c @@ -114,7 +114,7 @@ int check_trusted_image(const struct configuration *command_config, const struct int i = 0; int ret = 0; char *image_name = get_configuration_value("image", DOCKER_COMMAND_FILE_SECTION, command_config); - char **privileged_registry = get_configuration_values_delimiter("docker.privileged-containers.registries", CONTAINER_EXECUTOR_CFG_DOCKER_SECTION, conf, ","); + char **privileged_registry = get_configuration_values_delimiter("docker.trusted.registries", CONTAINER_EXECUTOR_CFG_DOCKER_SECTION, conf, ","); char *registry_ptr = NULL; if (image_name == NULL) { ret = INVALID_DOCKER_IMAGE_NAME; @@ -1097,7 +1097,6 @@ static int add_mounts(const struct configuration *command_config, const struct c if (ro != 0) { ro_suffix = ":ro"; } - if (values != NULL) { // Disable mount volumes if image is not trusted. if (check_trusted_image(command_config, conf) != 0) { @@ -1480,10 +1479,6 @@ int get_docker_run_command(const char *command_file, const struct configuration launch_command = get_configuration_values_delimiter("launch-command", DOCKER_COMMAND_FILE_SECTION, &command_config, ","); - if (check_trusted_image(&command_config, conf) != 0) { - launch_command = NULL; - } - if (launch_command != NULL) { for (i = 0; launch_command[i] != NULL; ++i) { ret = add_to_args(args, launch_command[i]); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc index 613755cec6b..cd671ceff39 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc @@ -639,9 +639,9 @@ namespace ContainerExecutor { struct configuration container_cfg, cmd_cfg; struct args buff = ARGS_INITIAL_VALUE; int ret = 0; - std::string container_executor_cfg_contents[] = {"[docker]\n docker.privileged-containers.enabled=1\n docker.privileged-containers.registries=hadoop", - "[docker]\n docker.privileged-containers.enabled=true\n docker.privileged-containers.registries=hadoop", - "[docker]\n docker.privileged-containers.enabled=True\n docker.privileged-containers.registries=hadoop", + std::string container_executor_cfg_contents[] = {"[docker]\n docker.privileged-containers.enabled=1\n docker.trusted.registries=hadoop", + "[docker]\n docker.privileged-containers.enabled=true\n docker.trusted.registries=hadoop", + "[docker]\n docker.privileged-containers.enabled=True\n docker.trusted.registries=hadoop", "[docker]\n docker.privileged-containers.enabled=0", "[docker]\n docker.privileged-containers.enabled=false", "[docker]\n"}; @@ -727,7 +727,7 @@ namespace ContainerExecutor { int ret = 0; std::string container_executor_cfg_contents = "[docker]\n" " docker.allowed.capabilities=CHROOT,MKNOD\n" - " docker.privileged-containers.registries=hadoop\n"; + " docker.trusted.registries=hadoop\n"; std::vector > file_cmd_vec; file_cmd_vec.push_back(std::make_pair( "[docker-command-execution]\n docker-command=run\n image=hadoop/docker-image\n cap-add=CHROOT,MKNOD", @@ -773,7 +773,7 @@ namespace ContainerExecutor { ret = set_capabilities(&cmd_cfg, &container_cfg, &buff); ASSERT_EQ(INVALID_DOCKER_CAPABILITY, ret); - container_executor_cfg_contents = "[docker]\n docker.privileged-containers.registries=hadoop\n"; + container_executor_cfg_contents = "[docker]\n docker.trusted.registries=hadoop\n"; write_container_executor_cfg(container_executor_cfg_contents); ret = read_config(container_executor_cfg_file.c_str(), &container_cfg); if (ret != 0) { @@ -790,7 +790,7 @@ namespace ContainerExecutor { reset_args(&buff); int ret = 0; std::string container_executor_cfg_contents = "[docker]\n" - " docker.privileged-containers.registries=hadoop\n" + " docker.trusted.registries=hadoop\n" " docker.allowed.devices=/dev/test-device,/dev/device2,regex:/dev/nvidia.*,regex:/dev/gpu-uvm.*"; std::vector > file_cmd_vec; file_cmd_vec.push_back(std::make_pair( @@ -910,7 +910,7 @@ namespace ContainerExecutor { struct configuration container_cfg, cmd_cfg; struct args buff = ARGS_INITIAL_VALUE; int ret = 0; - std::string container_executor_cfg_contents = "[docker]\n docker.privileged-containers.registries=hadoop\n " + std::string container_executor_cfg_contents = "[docker]\n docker.trusted.registries=hadoop\n " "docker.allowed.rw-mounts=/opt,/var,/usr/bin/cut\n " "docker.allowed.ro-mounts=/etc/passwd"; std::vector > file_cmd_vec; @@ -1037,7 +1037,7 @@ namespace ContainerExecutor { struct args buff = ARGS_INITIAL_VALUE; int ret = 0; - std::string container_executor_cfg_contents = "[docker]\n docker.privileged-containers.registries=hadoop\n " + std::string container_executor_cfg_contents = "[docker]\n docker.trusted.registries=hadoop\n " "docker.allowed.rw-mounts=/home/,/var,/usr/bin/cut\n " "docker.allowed.ro-mounts=/etc/passwd,/etc/group"; std::vector > file_cmd_vec; @@ -1118,7 +1118,7 @@ namespace ContainerExecutor { free(actual); } - container_executor_cfg_contents = "[docker]\n docker.privileged-containers.registries=hadoop\n"; + container_executor_cfg_contents = "[docker]\n docker.trusted.registries=hadoop\n"; write_container_executor_cfg(container_executor_cfg_contents); ret = read_config(container_executor_cfg_file.c_str(), &container_cfg); if (ret != 0) { @@ -1136,7 +1136,7 @@ namespace ContainerExecutor { std::string container_executor_contents = "[docker]\n docker.allowed.ro-mounts=/var,/etc,/usr/bin/cut\n" " docker.allowed.rw-mounts=/tmp\n docker.allowed.networks=bridge\n " " docker.privileged-containers.enabled=1\n docker.allowed.capabilities=CHOWN,SETUID\n" - " docker.allowed.devices=/dev/test\n docker.privileged-containers.registries=hadoop\n"; + " docker.allowed.devices=/dev/test\n docker.trusted.registries=hadoop\n"; write_file(container_executor_cfg_file, container_executor_contents); int ret = read_config(container_executor_cfg_file.c_str(), &container_executor_cfg); if (ret != 0) { @@ -1180,7 +1180,7 @@ namespace ContainerExecutor { " cap-add=CHOWN,SETUID\n cgroup-parent=ctr-cgroup\n detach=true\n rm=true\n" " launch-command=bash,test_script.sh,arg1,arg2", "run --name=container_e1_12312_11111_02_000001 --user=nobody -d --rm" - " --cgroup-parent=ctr-cgroup --cap-drop=ALL --hostname=host-id nothadoop/docker-image")); + " --cgroup-parent=ctr-cgroup --cap-drop=ALL --hostname=host-id nothadoop/docker-image bash test_script.sh arg1 arg2")); // Test non-privileged container and drop all privileges file_cmd_vec.push_back(std::make_pair( @@ -1202,7 +1202,7 @@ namespace ContainerExecutor { " cap-add=CHOWN,SETUID\n cgroup-parent=ctr-cgroup\n detach=true\n rm=true\n" " launch-command=bash,test_script.sh,arg1,arg2", "run --name=container_e1_12312_11111_02_000001 --user=nobody -d --rm --net=bridge" - " --cgroup-parent=ctr-cgroup --cap-drop=ALL --hostname=host-id nothadoop/docker-image")); + " --cgroup-parent=ctr-cgroup --cap-drop=ALL --hostname=host-id nothadoop/docker-image bash test_script.sh arg1 arg2")); // Test privileged container file_cmd_vec.push_back(std::make_pair( @@ -1237,7 +1237,7 @@ namespace ContainerExecutor { " launch-command=bash,test_script.sh,arg1,arg2", "run --name=container_e1_12312_11111_02_000001 --user=nobody -d --rm --net=bridge --cap-drop=ALL " "--hostname=host-id --group-add 1000 --group-add 1001 " - "docker-image")); + "docker-image bash test_script.sh arg1 arg2")); std::vector > bad_file_cmd_vec; @@ -1318,7 +1318,7 @@ namespace ContainerExecutor { " docker.allowed.ro-mounts=/var,/etc,/usr/bin/cut\n" " docker.allowed.rw-mounts=/tmp\n docker.allowed.networks=bridge\n " " docker.privileged-containers.enabled=1\n docker.allowed.capabilities=CHOWN,SETUID\n" - " docker.allowed.devices=/dev/test\n docker.privileged-containers.registries=hadoop\n"; + " docker.allowed.devices=/dev/test\n docker.trusted.registries=hadoop\n"; write_file(container_executor_cfg_file, container_executor_contents); int ret = read_config(container_executor_cfg_file.c_str(), &container_executor_cfg); if (ret != 0) { @@ -1357,12 +1357,12 @@ namespace ContainerExecutor { TEST_F(TestDockerUtil, test_docker_run_no_privileged) { std::string container_executor_contents[] = {"[docker]\n docker.allowed.ro-mounts=/var,/etc,/usr/bin/cut\n" - " docker.privileged-containers.registries=hadoop\n" + " docker.trusted.registries=hadoop\n" " docker.allowed.rw-mounts=/tmp\n docker.allowed.networks=bridge\n" " docker.allowed.capabilities=CHOWN,SETUID\n" " docker.allowed.devices=/dev/test", "[docker]\n docker.allowed.ro-mounts=/var,/etc,/usr/bin/cut\n" - " docker.privileged-containers.registries=hadoop\n" + " docker.trusted.registries=hadoop\n" " docker.allowed.rw-mounts=/tmp\n docker.allowed.networks=bridge\n" " docker.allowed.capabilities=CHOWN,SETUID\n" " privileged=0\n" @@ -1386,7 +1386,7 @@ namespace ContainerExecutor { file_cmd_vec.push_back(std::make_pair( "[docker-command-execution]\n docker-command=run\n name=container_e1_12312_11111_02_000001\n image=docker-image\n" " user=nobody\n launch-command=bash,test_script.sh,arg1,arg2", - "run --name=container_e1_12312_11111_02_000001 --user=nobody --cap-drop=ALL docker-image")); + "run --name=container_e1_12312_11111_02_000001 --user=nobody --cap-drop=ALL docker-image bash test_script.sh arg1 arg2")); file_cmd_vec.push_back(std::make_pair( "[docker-command-execution]\n" @@ -1407,7 +1407,7 @@ namespace ContainerExecutor { " cap-add=CHOWN,SETUID\n cgroup-parent=ctr-cgroup\n detach=true\n rm=true\n" " launch-command=bash,test_script.sh,arg1,arg2", "run --name=container_e1_12312_11111_02_000001 --user=nobody -d --rm" - " --cgroup-parent=ctr-cgroup --cap-drop=ALL --hostname=host-id nothadoop/docker-image")); + " --cgroup-parent=ctr-cgroup --cap-drop=ALL --hostname=host-id nothadoop/docker-image bash test_script.sh arg1 arg2")); file_cmd_vec.push_back(std::make_pair( "[docker-command-execution]\n" @@ -1428,7 +1428,7 @@ namespace ContainerExecutor { " cap-add=CHOWN,SETUID\n cgroup-parent=ctr-cgroup\n detach=true\n rm=true\n" " launch-command=bash,test_script.sh,arg1,arg2", "run --name=container_e1_12312_11111_02_000001 --user=nobody -d --rm --net=bridge" - " --cgroup-parent=ctr-cgroup --cap-drop=ALL --hostname=host-id nothadoop/docker-image")); + " --cgroup-parent=ctr-cgroup --cap-drop=ALL --hostname=host-id nothadoop/docker-image bash test_script.sh arg1 arg2")); std::vector > bad_file_cmd_vec; bad_file_cmd_vec.push_back(std::make_pair( @@ -1549,23 +1549,23 @@ namespace ContainerExecutor { TEST_F(TestDockerUtil, test_docker_no_new_privileges) { std::string container_executor_contents[] = {"[docker]\n" - " docker.privileged-containers.registries=hadoop\n" + " docker.trusted.registries=hadoop\n" " docker.privileged-containers.enabled=false\n" " docker.no-new-privileges.enabled=true", "[docker]\n" - " docker.privileged-containers.registries=hadoop\n" + " docker.trusted.registries=hadoop\n" " docker.privileged-containers.enabled=true\n" " docker.no-new-privileges.enabled=true", "[docker]\n" - " docker.privileged-containers.registries=hadoop\n" + " docker.trusted.registries=hadoop\n" " docker.privileged-containers.enabled=true\n" " docker.no-new-privileges.enabled=true", "[docker]\n" - " docker.privileged-containers.registries=hadoop\n" + " docker.trusted.registries=hadoop\n" " docker.privileged-containers.enabled=false\n" " docker.no-new-privileges.enabled=false", "[docker]\n" - " docker.privileged-containers.registries=hadoop\n" + " docker.trusted.registries=hadoop\n" " docker.privileged-containers.enabled=true\n" " docker.no-new-privileges.enabled=false"}; for (int i = 0; i < 2; ++i) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md index 0f49a06a6dc..c6f965a4106 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md @@ -206,7 +206,7 @@ are allowed. It contains the following properties: | `docker.allowed.rw-mounts` | Comma separated directories that containers are allowed to mount in read-write mode. By default, no directories are allowed to mounted. | | `docker.host-pid-namespace.enabled` | Set to "true" or "false" to enable or disable using the host's PID namespace. Default value is "false". | | `docker.privileged-containers.enabled` | Set to "true" or "false" to enable or disable launching privileged containers. Default value is "false". | -| `docker.privileged-containers.registries` | Comma separated list of trusted docker registries for running trusted privileged docker containers. By default, no registries are defined. | +| `docker.trusted.registries` | Comma separated list of trusted docker registries for running trusted privileged docker containers. By default, no registries are defined. | | `docker.inspect.max.retries` | Integer value to check docker container readiness. Each inspection is set with 3 seconds delay. Default value of 10 will wait 30 seconds for docker container to become ready before marked as container failed. | | `docker.no-new-privileges.enabled` | Enable/disable the no-new-privileges flag for docker run. Set to "true" to enable, disabled by default. | @@ -230,7 +230,7 @@ yarn.nodemanager.linux-container-executor.group=yarn [docker] module.enabled=true docker.privileged-containers.enabled=true - docker.privileged-containers.registries=centos + docker.trusted.registries=centos docker.allowed.capabilities=SYS_CHROOT,MKNOD,SETFCAP,SETPCAP,FSETID,CHOWN,AUDIT_WRITE,SETGID,NET_RAW,FOWNER,SETUID,DAC_OVERRIDE,KILL,NET_BIND_SERVICE docker.allowed.networks=bridge,host,none docker.allowed.ro-mounts=/sys/fs/cgroup @@ -372,7 +372,7 @@ Privileged docker container can interact with host system devices. This can cau The default behavior is disallow any privileged docker containers. When `docker.privileged-containers.enabled` is set to enabled, docker image can run with root privileges in the docker container, but access to host level devices are disabled. This allows developer and tester to run docker images from internet without causing harm to host operating system. -When docker images have been certified by developers and testers to be trustworthy. The trusted image can be promoted to trusted docker registry. System administrator can define `docker.privileged-containers.registries`, and setup private docker registry server to promote trusted images. +When docker images have been certified by developers and testers to be trustworthy. The trusted image can be promoted to trusted docker registry. System administrator can define `docker.trusted.registries`, and setup private docker registry server to promote trusted images. Trusted images are allowed to mount external devices such as HDFS via NFS gateway, or host level Hadoop configuration. If system administrators allow writing to external volumes using `docker.allow.rw-mounts directive`, privileged docker container can have full control of host level files in the predefined volumes. @@ -436,3 +436,30 @@ To run a Spark shell in Docker containers, run the following command: Note that the application master and executors are configured independently. In this example, we are using the hadoop-docker image for both. + +Docker Container ENTRYPOINT Support +------------------------------------ + +When Docker support was introduced to Hadoop 2.x, the platform was designed to +run existing Hadoop programs inside Docker container. Log redirection and +environment setup are integrated with Node Manager. In Hadoop 3.x, Hadoop +Docker support extends beyond running Hadoop workload, and support Docker container +in Docker native form using ENTRYPOINT from dockerfile. Application can decide to +support YARN mode as default or Docker mode as default by defining +YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE environment variable. +System administrator can also set as default setting for the cluster to make +ENTRY_POINT as default mode of operation. + +In yarn-site.xml, add YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE to +node manager environment white list: +``` + + yarn.nodemanager.env-whitelist + JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME,YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE + +``` + +In yarn-env.sh, define: +``` +export YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE=true +``` diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md index 4163635576b..03fec79e8ab 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md @@ -20,7 +20,7 @@ This document describes some example service definitions (`Yarnfile`). ## Apache web server - httpd (with registry DNS) -For this example to work, centos/httpd-24-centos7 image must be included in `docker.privileged-containers.registries`. +For this example to work, centos/httpd-24-centos7 image must be included in `docker.trusted.registries`. For server side configuration, please refer to [Running Applications in Docker Containers](../DockerContainers.html) document. Below is the `Yarnfile` for a service called `httpd-service` with two `httpd` instances. @@ -163,3 +163,38 @@ where `service-name` is optional. If omitted, it uses the name defined in the `Y Look up your IPs at the RM REST endpoint `http://:8088/app/v1/services/httpd-service`. Then visit port 8080 for each IP to view the pages. + +## Docker image ENTRYPOINT support + +Docker images may have built with ENTRYPOINT to enable start up of docker image without any parameters. +When passing parameters to ENTRYPOINT enabled image, `launch_command` is delimited by comma (,). + +{ + "name": "sleeper-service", + "version": "1", + "components" : + [ + { + "name": "sleeper", + "number_of_containers": 2, + "artifact": { + "id": "hadoop/centos:latest", + "type": "DOCKER" + }, + "launch_command": "sleep,90000", + "resource": { + "cpus": 1, + "memory": "256" + }, + "restart_policy": "ON_FAILURE", + "configuration": { + "env": { + "YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE":"true" + }, + "properties": { + "docker.network": "host" + } + } + } + ] +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md index f1dc81b5de1..4bfa742c454 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md @@ -225,7 +225,7 @@ One or more components of the service. If the service is HBase say, then the com |dependencies|An array of service components which should be in READY state (as defined by readiness check), before this component can be started. The dependencies across all components of a service should be represented as a DAG.|false|string array|| |readiness_check|Readiness check for this component.|false|ReadinessCheck|| |artifact|Artifact of the component (optional). If not specified, the service level global artifact takes effect.|false|Artifact|| -|launch_command|The custom launch command of this component (optional for DOCKER component, required otherwise). When specified at the component level, it overrides the value specified at the global level (if any).|false|string|| +|launch_command|The custom launch command of this component (optional for DOCKER component, required otherwise). When specified at the component level, it overrides the value specified at the global level (if any). If docker image supports ENTRYPOINT, launch_command is delimited by comma(,) instead of space.|false|string|| |resource|Resource of this component (optional). If not specified, the service level global resource takes effect.|false|Resource|| |number_of_containers|Number of containers for this component (optional). If not specified, the service level global number_of_containers takes effect.|false|integer (int64)|| |containers|Containers of a started component. Specifying a value for this attribute for the POST payload raises a validation error. This blob is available only in the GET response of a started service.|false|Container array|| From e308ac88d399e9bed3581ff4b1450aa2070dcedf Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Sat, 2 Jun 2018 20:12:35 -0700 Subject: [PATCH 024/113] HDFS-13651. TestReencryptionHandler fails on Windows. Contributed by Anbang Hu. --- .../hdfs/server/namenode/TestReencryptionHandler.java | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java index a148edc724a..d4f79b56a47 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java @@ -36,6 +36,7 @@ import org.mockito.Mockito; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; +import java.io.File; import java.io.IOException; import java.util.HashMap; import java.util.Map; @@ -67,9 +68,11 @@ public class TestReencryptionHandler { private ReencryptionHandler mockReencryptionhandler(final Configuration conf) throws IOException { // mock stuff to create a mocked ReencryptionHandler + FileSystemTestHelper helper = new FileSystemTestHelper(); + Path targetFile = new Path(new File(helper.getTestRootDir()) + .getAbsolutePath(), "test.jks"); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, - JavaKeyStoreProvider.SCHEME_NAME + "://file" + new Path( - new FileSystemTestHelper().getTestRootDir(), "test.jks").toUri()); + JavaKeyStoreProvider.SCHEME_NAME + "://file" + targetFile.toUri()); final EncryptionZoneManager ezm = Mockito.mock(EncryptionZoneManager.class); final KeyProvider kp = KMSUtil.createKeyProvider(conf, CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH); From 8f83b9abf9a7462c50a1db18d4d99e80c535b09c Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Sat, 2 Jun 2018 20:19:59 -0700 Subject: [PATCH 025/113] HDFS-13648. Fix TestGetConf#testGetJournalNodes on Windows due to a mismatch line separator. Contributed by Giovanni Matteo Fumarola. --- .../test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java index 15c20b4cb64..13fee1f3938 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java @@ -388,7 +388,7 @@ public class TestGetConf { } buffer.append(val); } - buffer.append("\n"); + buffer.append(System.lineSeparator()); expected1 = buffer.toString(); Set actual = DFSUtil.getJournalNodeAddresses(conf); @@ -462,7 +462,7 @@ public class TestGetConf { actual = DFSUtil.getJournalNodeAddresses(conf); assertEquals(expected.toString(), actual.toString()); - actual1 = "\n"; + actual1 = System.lineSeparator(); expected1 = getAddressListFromTool(TestType.JOURNALNODE, conf, true); assertEquals(expected1, actual1); @@ -479,7 +479,7 @@ public class TestGetConf { expected1 = getAddressListFromTool(TestType.JOURNALNODE, conf, true); - actual1 = "\n"; + actual1 = System.lineSeparator(); assertEquals(expected1, actual1); conf.clear(); } From a804b7c9d2986556c91c1741c1ae7ac2b9c579f3 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Sat, 2 Jun 2018 20:25:42 -0700 Subject: [PATCH 026/113] MAPREDUCE-7102. Fix TestJavaSerialization for Windows due a mismatch line separator. Contributed by Giovanni Matteo Fumarola. --- .../hadoop/mapred/TestJavaSerialization.java | 23 +++++++++++-------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJavaSerialization.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJavaSerialization.java index a787e68c124..e5ef4d26873 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJavaSerialization.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJavaSerialization.java @@ -91,6 +91,8 @@ public class TestJavaSerialization { wr.write("b a\n"); wr.close(); } + + @SuppressWarnings("deprecation") @Test public void testMapReduceJob() throws Exception { @@ -131,16 +133,17 @@ public class TestJavaSerialization { FileUtil.stat2Paths(fs.listStatus(OUTPUT_DIR, new Utils.OutputFileUtils.OutputFilesFilter())); assertEquals(1, outputFiles.length); - InputStream is = fs.open(outputFiles[0]); - String reduceOutput = org.apache.commons.io.IOUtils.toString(is); - String[] lines = reduceOutput.split(System.getProperty("line.separator")); - assertEquals("Unexpected output; received output '" + reduceOutput + "'", - "a\t1", lines[0]); - assertEquals("Unexpected output; received output '" + reduceOutput + "'", - "b\t1", lines[1]); - assertEquals("Reduce output has extra lines; output is '" + reduceOutput - + "'", 2, lines.length); - is.close(); + try (InputStream is = fs.open(outputFiles[0])) { + String reduceOutput = org.apache.commons.io.IOUtils.toString(is); + String[] lines = reduceOutput.split("\n"); + assertEquals("Unexpected output; received output '" + reduceOutput + "'", + "a\t1", lines[0]); + assertEquals("Unexpected output; received output '" + reduceOutput + "'", + "b\t1", lines[1]); + assertEquals( + "Reduce output has extra lines; output is '" + reduceOutput + "'", 2, + lines.length); + } } /** From 9c4cbed8d19ec0f486af454de6b117d77a0a0b84 Mon Sep 17 00:00:00 2001 From: Sunil G Date: Mon, 4 Jun 2018 00:06:56 +0530 Subject: [PATCH 027/113] =?UTF-8?q?YARN-8276.=20[UI2]=20After=20version=20?= =?UTF-8?q?field=20became=20mandatory,=20form-based=20submission=20of=20ne?= =?UTF-8?q?w=20YARN=20service=20doesn't=20work.=20Contributed=20by=20Gerge?= =?UTF-8?q?ly=20Nov=C3=A1k.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../src/main/webapp/app/components/deploy-service.js | 3 ++- .../src/main/webapp/app/models/yarn-servicedef.js | 8 +++++++- .../app/templates/components/deploy-service.hbs | 11 +++++++++++ .../src/main/webapp/app/utils/info-seeder.js | 1 + 4 files changed, 21 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js index 18e4d36b81b..ff939855e24 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js @@ -152,7 +152,8 @@ export default Ember.Component.extend({ isUserNameGiven: Ember.computed.empty('userName'), - isValidServiceDef: Ember.computed('serviceDef.name', 'serviceDef.queue', 'serviceDef.serviceComponents.[]', function () { + isValidServiceDef: Ember.computed('serviceDef.name', 'serviceDef.queue', + 'serviceDef.version', 'serviceDef.serviceComponents.[]', function () { return this.get('serviceDef').isValidServiceDef(); }), diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-servicedef.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-servicedef.js index 2a9953dc979..c0153a5a2b9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-servicedef.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-servicedef.js @@ -22,6 +22,7 @@ import Ember from 'ember'; export default DS.Model.extend({ name: DS.attr('string', {defaultValue: ''}), queue: DS.attr('string', {defaultValue: ''}), + version: DS.attr('string', {defaultValue: ''}), lifetime: DS.attr('string', {defaultValue: ''}), isCached: DS.attr('boolean', {defaultValue: false}), @@ -44,6 +45,7 @@ export default DS.Model.extend({ clear() { this.set('name', ''); this.set('queue', ''); + this.set('version', ''); this.set('lifetime', ''); this.get('serviceComponents').clear(); this.get('serviceConfigs').clear(); @@ -52,7 +54,8 @@ export default DS.Model.extend({ }, isValidServiceDef() { - return this.get('name') !== '' && this.get('queue') !== '' && this.get('serviceComponents.length') > 0; + return this.get('name') !== '' && this.get('queue') !== '' && + this.get('version') !== '' && this.get('serviceComponents.length') > 0; }, createNewServiceComponent() { @@ -115,6 +118,7 @@ export default DS.Model.extend({ var json = { name: "", queue: "", + version: "", lifetime: "-1", components: [], configuration: { @@ -131,6 +135,7 @@ export default DS.Model.extend({ json['name'] = this.get('name'); json['queue'] = this.get('queue'); + json['version'] = this.get('version'); if (this.get('lifetime')) { json['lifetime'] = this.get('lifetime'); @@ -266,6 +271,7 @@ export default DS.Model.extend({ var clone = this.createNewServiceDef(); clone.set('name', this.get('name')); clone.set('queue', this.get('queue')); + clone.set('version', this.get('version')); clone.set('lifetime', this.get('lifetime')); clone.get('serviceComponents', this.get('serviceComponents')); clone.get('serviceConfigs', this.get('serviceConfigs')); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/deploy-service.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/deploy-service.hbs index 2d186106b45..d348200920d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/deploy-service.hbs +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/deploy-service.hbs @@ -99,6 +99,17 @@ +

+
+
+ + + {{input type="text" class="form-control" placeholder="1.0.0" value=serviceDef.version}} +
+
+
+
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/info-seeder.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/info-seeder.js index 3d013917493..cfff9d0ebd6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/info-seeder.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/info-seeder.js @@ -19,6 +19,7 @@ export default { serviceName: "A unique application name", queueName: "The YARN queue that this application should be submitted to", + version: "Version of the service", lifetime: "Life time (in seconds) of the application from the time it reaches the STARTED state (after which it is automatically destroyed by YARN). For unlimited lifetime do not set a lifetime value.", components: "One or more components of the application. If the application is HBase say, then the component can be a simple role like master or regionserver. If the application is a complex business webapp then a component can be other applications say Kafka or Storm. Thereby it opens up the support for complex and nested applications.", configurations: "Set of configuration properties that can be injected into the application components via envs, files and custom pluggable helper docker containers. Files of several standard formats like xml, properties, json, yaml and templates will be supported.", From 9efb4b7db00d79aded52997ec89a1be94ecdd268 Mon Sep 17 00:00:00 2001 From: Xiao Chen Date: Sun, 3 Jun 2018 22:29:52 -0700 Subject: [PATCH 028/113] HDFS-13339. Volume reference can't be released and may lead to deadlock when DataXceiver does a check volume. Contributed by liaoyuxiangqin and Zsolt Venczel. --- .../datanode/checker/DatasetVolumeChecker.java | 13 ++++++++++++- .../datanode/checker/TestDatasetVolumeChecker.java | 3 +++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java index 6ab6425ad79..3889e2317c2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java @@ -46,6 +46,7 @@ import java.util.HashSet; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; @@ -103,6 +104,8 @@ public class DatasetVolumeChecker { private static final VolumeCheckContext IGNORED_CONTEXT = new VolumeCheckContext(); + private final ExecutorService checkVolumeResultHandlerExecutorService; + /** * @param conf Configuration object. * @param timer {@link Timer} object used for throttling checks. @@ -163,6 +166,12 @@ public class DatasetVolumeChecker { .setNameFormat("DataNode DiskChecker thread %d") .setDaemon(true) .build())); + + checkVolumeResultHandlerExecutorService = Executors.newCachedThreadPool( + new ThreadFactoryBuilder() + .setNameFormat("VolumeCheck ResultHandler thread %d") + .setDaemon(true) + .build()); } /** @@ -292,7 +301,9 @@ public class DatasetVolumeChecker { numVolumeChecks.incrementAndGet(); Futures.addCallback(olf.get(), new ResultHandler(volumeReference, new HashSet<>(), new HashSet<>(), - new AtomicLong(1), callback)); + new AtomicLong(1), callback), + checkVolumeResultHandlerExecutorService + ); return true; } else { IOUtils.cleanup(null, volumeReference); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java index b37cc75e6e5..b0314f9317b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import org.apache.hadoop.hdfs.server.datanode.fsdataset.*; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.VolumeCheckContext; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.FakeTimer; import org.junit.Rule; @@ -122,6 +123,8 @@ public class TestDatasetVolumeChecker { } }); + GenericTestUtils.waitFor(() -> numCallbackInvocations.get() > 0, 5, 10000); + // Ensure that the check was invoked at least once. verify(volume, times(1)).check(anyObject()); if (result) { From bccdfeee0aaef9cb98d09ee39909b63fdcbeeafc Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Mon, 4 Jun 2018 07:02:05 -0700 Subject: [PATCH 029/113] HDFS-13155. BlockPlacementPolicyDefault.chooseTargetInOrder Not Checking Return Value for NULL. Contributed by Zsolt Venczel. --- .../blockmanagement/BlockPlacementPolicyDefault.java | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java index 518e62c6b2e..c94232fbcb1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java @@ -485,9 +485,13 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { throws NotEnoughReplicasException { final int numOfResults = results.size(); if (numOfResults == 0) { - writer = chooseLocalStorage(writer, excludedNodes, blocksize, - maxNodesPerRack, results, avoidStaleNodes, storageTypes, true) - .getDatanodeDescriptor(); + DatanodeStorageInfo storageInfo = chooseLocalStorage(writer, + excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, + storageTypes, true); + + writer = (storageInfo != null) ? storageInfo.getDatanodeDescriptor() + : null; + if (--numOfReplicas == 0) { return writer; } From e2289c8d1496a5eff88e6bcb8776a11d45371ffc Mon Sep 17 00:00:00 2001 From: Rushabh Shah Date: Mon, 4 Jun 2018 09:19:03 -0500 Subject: [PATCH 030/113] HDFS-13281 Namenode#createFile should be /.reserved/raw/ aware.. Contributed by Rushabh S Shah --- .../hdfs/server/namenode/FSNamesystem.java | 2 +- .../hadoop/hdfs/TestEncryptionZones.java | 29 +++++++++++++++++++ 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index c02eb84d5f4..19ff08d4165 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2416,7 +2416,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, } FileEncryptionInfo feInfo = null; - if (provider != null) { + if (!iip.isRaw() && provider != null) { EncryptionKeyInfo ezInfo = FSDirEncryptionZoneOp.getEncryptionKeyInfo( this, iip, supportedVersions); // if the path has an encryption zone, the lock was released while diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java index 6f9ef290a76..d8524aeea01 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java @@ -21,6 +21,7 @@ import java.io.ByteArrayOutputStream; import java.io.File; import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; import java.io.PrintStream; import java.io.RandomAccessFile; import java.io.StringReader; @@ -77,6 +78,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.EncryptionFaultInjector; import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; @@ -2302,4 +2304,31 @@ public class TestEncryptionZones { Assert.assertEquals((data[i] & 0XFF), in.read()); } } + + /** + * Tests that namenode doesn't generate edek if we are writing to + * /.reserved/raw directory. + * @throws Exception + */ + @Test + public void testWriteToEZReservedRaw() throws Exception { + String unEncryptedBytes = "hello world"; + // Create an Encryption Zone. + final Path zonePath = new Path("/zone"); + fsWrapper.mkdir(zonePath, FsPermission.getDirDefault(), false); + dfsAdmin.createEncryptionZone(zonePath, TEST_KEY, NO_TRASH); + Path p1 = new Path(zonePath, "p1"); + Path reservedRawPath = new Path("/.reserved/raw/" + p1.toString()); + // Create an empty file with /.reserved/raw/ path. + OutputStream os = fs.create(reservedRawPath); + os.close(); + try { + fs.getXAttr(reservedRawPath, HdfsServerConstants + .CRYPTO_XATTR_FILE_ENCRYPTION_INFO); + fail("getXAttr should have thrown an exception"); + } catch (IOException ioe) { + assertExceptionContains("At least one of the attributes provided was " + + "not found.", ioe); + } + } } From ab3885f2c8cfd63ff94e548c40db3b4ea52c12e8 Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Mon, 4 Jun 2018 08:55:01 -0700 Subject: [PATCH 031/113] HDDS-126. Fix findbugs warning in MetadataKeyFilters.java. Contributed by Akira Ajisaka. --- .../main/java/org/apache/hadoop/utils/MetadataKeyFilters.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java index d3a29435de3..153e2f79441 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java @@ -19,6 +19,7 @@ package org.apache.hadoop.utils; import com.google.common.base.Preconditions; import com.google.common.base.Strings; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.ozone.OzoneConsts; /** @@ -94,7 +95,7 @@ public final class MetadataKeyFilters { if (Strings.isNullOrEmpty(keyPrefix)) { accept = true; } else { - byte [] prefixBytes = keyPrefix.getBytes(); + byte [] prefixBytes = DFSUtil.string2Bytes(keyPrefix); if (currentKey != null && prefixMatch(prefixBytes, currentKey)) { keysHinted++; accept = true; From ba12f87dcb0e406da57cdd1ad17677ac2367f425 Mon Sep 17 00:00:00 2001 From: Haibo Chen Date: Mon, 4 Jun 2018 09:14:53 -0700 Subject: [PATCH 032/113] YARN-8390. Fix API incompatible changes in FairScheduler's AllocationFileLoaderService. (Gergo Repas via Haibo Chen) --- .../fair/AllocationFileLoaderService.java | 16 ++-- .../scheduler/fair/FairScheduler.java | 4 +- .../fair/TestAllocationFileLoaderService.java | 96 +++++++++---------- 3 files changed, 58 insertions(+), 58 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java index 7a40b6a2022..e541ab7b0dd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java @@ -87,7 +87,7 @@ public class AllocationFileLoaderService extends AbstractService { private Path allocFile; private FileSystem fs; - private final Listener reloadListener; + private Listener reloadListener; @VisibleForTesting long reloadIntervalMs = ALLOC_RELOAD_INTERVAL_MS; @@ -95,16 +95,15 @@ public class AllocationFileLoaderService extends AbstractService { private Thread reloadThread; private volatile boolean running = true; - public AllocationFileLoaderService(Listener reloadListener) { - this(reloadListener, SystemClock.getInstance()); + public AllocationFileLoaderService() { + this(SystemClock.getInstance()); } private List defaultPermissions; - public AllocationFileLoaderService(Listener reloadListener, Clock clock) { + public AllocationFileLoaderService(Clock clock) { super(AllocationFileLoaderService.class.getName()); this.clock = clock; - this.reloadListener = reloadListener; } @Override @@ -209,6 +208,10 @@ public class AllocationFileLoaderService extends AbstractService { return allocPath; } + public synchronized void setReloadListener(Listener reloadListener) { + this.reloadListener = reloadListener; + } + /** * Updates the allocation list from the allocation config file. This file is * expected to be in the XML format specified in the design doc. @@ -350,6 +353,7 @@ public class AllocationFileLoaderService extends AbstractService { public interface Listener { void onReload(AllocationConfiguration info) throws IOException; - void onCheck(); + default void onCheck() { + } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index 4c84aa91ae2..123f7110cfe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -208,8 +208,7 @@ public class FairScheduler extends public FairScheduler() { super(FairScheduler.class.getName()); context = new FSContext(this); - allocsLoader = - new AllocationFileLoaderService(new AllocationReloadListener()); + allocsLoader = new AllocationFileLoaderService(); queueMgr = new QueueManager(this); maxRunningEnforcer = new MaxRunningAppsEnforcer(this); } @@ -1438,6 +1437,7 @@ public class FairScheduler extends } allocsLoader.init(conf); + allocsLoader.setReloadListener(new AllocationReloadListener()); // If we fail to load allocations file on initialize, we want to fail // immediately. After a successful load, exceptions on future reloads // will just result in leaving things as they are. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java index 30b8a917841..50a003ecd11 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java @@ -25,7 +25,6 @@ import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSchedulerConfiguration; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.AllocationFileLoaderService.Listener; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueuePlacementRule.NestedUserQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.allocationfile.AllocationFileWriter; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy; @@ -33,8 +32,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.Fai import org.apache.hadoop.yarn.util.ControlledClock; import org.apache.hadoop.yarn.util.resource.Resources; import org.junit.Test; -import org.mockito.Mockito; - import java.io.File; import java.io.FileOutputStream; import java.io.FileWriter; @@ -82,8 +79,7 @@ public class TestAllocationFileLoaderService { fs.copyFromLocalFile(new Path(fschedURL.toURI()), new Path(fsAllocPath)); conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, fsAllocPath); - AllocationFileLoaderService allocLoader = - new AllocationFileLoaderService(Mockito.mock(Listener.class)); + AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(); Path allocationFile = allocLoader.getAllocationFile(conf); assertEquals(fsAllocPath, allocationFile.toString()); assertTrue(fs.exists(allocationFile)); @@ -96,8 +92,7 @@ public class TestAllocationFileLoaderService { throws UnsupportedFileSystemException { Configuration conf = new YarnConfiguration(); conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, "badfs:///badfile"); - AllocationFileLoaderService allocLoader = - new AllocationFileLoaderService(Mockito.mock(Listener.class)); + AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(); allocLoader.getAllocationFile(conf); } @@ -110,7 +105,7 @@ public class TestAllocationFileLoaderService { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, TEST_FAIRSCHED_XML); AllocationFileLoaderService allocLoader = - new AllocationFileLoaderService(Mockito.mock(Listener.class)); + new AllocationFileLoaderService(); Path allocationFile = allocLoader.getAllocationFile(conf); assertEquals(TEST_FAIRSCHED_XML, allocationFile.getName()); assertTrue(fs.exists(allocationFile)); @@ -139,11 +134,12 @@ public class TestAllocationFileLoaderService { Configuration conf = new Configuration(); conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); - ReloadListener confHolder = new ReloadListener(); - AllocationFileLoaderService allocLoader = - new AllocationFileLoaderService(confHolder, clock); + AllocationFileLoaderService allocLoader = new AllocationFileLoaderService( + clock); allocLoader.reloadIntervalMs = 5; allocLoader.init(conf); + ReloadListener confHolder = new ReloadListener(); + allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); AllocationConfiguration allocConf = confHolder.allocConf; @@ -209,9 +205,7 @@ public class TestAllocationFileLoaderService { public void testAllocationFileParsing() throws Exception { Configuration conf = new Configuration(); conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); - ReloadListener confHolder = new ReloadListener(); - AllocationFileLoaderService allocLoader = - new AllocationFileLoaderService(confHolder); + AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(); AllocationFileWriter .create() @@ -284,6 +278,8 @@ public class TestAllocationFileLoaderService { .writeToFile(ALLOC_FILE); allocLoader.init(conf); + ReloadListener confHolder = new ReloadListener(); + allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); AllocationConfiguration queueConf = confHolder.allocConf; @@ -431,9 +427,7 @@ public class TestAllocationFileLoaderService { public void testBackwardsCompatibleAllocationFileParsing() throws Exception { Configuration conf = new Configuration(); conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); - ReloadListener confHolder = new ReloadListener(); - AllocationFileLoaderService allocLoader = - new AllocationFileLoaderService(confHolder); + AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(); PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); @@ -479,6 +473,8 @@ public class TestAllocationFileLoaderService { out.close(); allocLoader.init(conf); + ReloadListener confHolder = new ReloadListener(); + allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); AllocationConfiguration queueConf = confHolder.allocConf; @@ -554,10 +550,10 @@ public class TestAllocationFileLoaderService { out.println(""); out.close(); - ReloadListener confHolder = new ReloadListener(); - AllocationFileLoaderService allocLoader = - new AllocationFileLoaderService(confHolder); + AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(); allocLoader.init(conf); + ReloadListener confHolder = new ReloadListener(); + allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); AllocationConfiguration allocConf = confHolder.allocConf; @@ -588,10 +584,10 @@ public class TestAllocationFileLoaderService { out.println(""); out.close(); - ReloadListener confHolder = new ReloadListener(); - AllocationFileLoaderService allocLoader = - new AllocationFileLoaderService(confHolder); + AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(); allocLoader.init(conf); + ReloadListener confHolder = new ReloadListener(); + allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); } @@ -612,10 +608,10 @@ public class TestAllocationFileLoaderService { out.println(""); out.close(); - ReloadListener confHolder = new ReloadListener(); - AllocationFileLoaderService allocLoader = - new AllocationFileLoaderService(confHolder); + AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(); allocLoader.init(conf); + ReloadListener confHolder = new ReloadListener(); + allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); } @@ -636,10 +632,10 @@ public class TestAllocationFileLoaderService { out.println(""); out.close(); - ReloadListener confHolder = new ReloadListener(); - AllocationFileLoaderService allocLoader = - new AllocationFileLoaderService(confHolder); + AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(); allocLoader.init(conf); + ReloadListener confHolder = new ReloadListener(); + allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); } @@ -658,10 +654,10 @@ public class TestAllocationFileLoaderService { out.println(""); out.close(); - ReloadListener confHolder = new ReloadListener(); - AllocationFileLoaderService allocLoader = - new AllocationFileLoaderService(confHolder); + AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(); allocLoader.init(conf); + ReloadListener confHolder = new ReloadListener(); + allocLoader.setReloadListener(confHolder); try { allocLoader.reloadAllocations(); } catch (AllocationConfigurationException ex) { @@ -689,10 +685,10 @@ public class TestAllocationFileLoaderService { out.println(""); out.close(); - ReloadListener confHolder = new ReloadListener(); - AllocationFileLoaderService allocLoader = - new AllocationFileLoaderService(confHolder); + AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(); allocLoader.init(conf); + ReloadListener confHolder = new ReloadListener(); + allocLoader.setReloadListener(confHolder); try { allocLoader.reloadAllocations(); } catch (AllocationConfigurationException ex) { @@ -718,10 +714,10 @@ public class TestAllocationFileLoaderService { out.println(""); out.close(); - ReloadListener confHolder = new ReloadListener(); - AllocationFileLoaderService allocLoader = - new AllocationFileLoaderService(confHolder); + AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(); allocLoader.init(conf); + ReloadListener confHolder = new ReloadListener(); + allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); AllocationConfiguration queueConf = confHolder.allocConf; // Check whether queue 'parent' and 'child' are loaded successfully @@ -749,10 +745,10 @@ public class TestAllocationFileLoaderService { out.println(""); out.close(); - ReloadListener confHolder = new ReloadListener(); - AllocationFileLoaderService allocLoader = - new AllocationFileLoaderService(confHolder); + AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(); allocLoader.init(conf); + ReloadListener confHolder = new ReloadListener(); + allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); } @@ -771,10 +767,10 @@ public class TestAllocationFileLoaderService { out.println(""); out.close(); - ReloadListener confHolder = new ReloadListener(); - AllocationFileLoaderService allocLoader = - new AllocationFileLoaderService(confHolder); + AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(); allocLoader.init(conf); + ReloadListener confHolder = new ReloadListener(); + allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); } @@ -797,10 +793,10 @@ public class TestAllocationFileLoaderService { out.println(""); out.close(); - ReloadListener confHolder = new ReloadListener(); - AllocationFileLoaderService allocLoader = - new AllocationFileLoaderService(confHolder); + AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(); allocLoader.init(conf); + ReloadListener confHolder = new ReloadListener(); + allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); AllocationConfiguration allocConf = confHolder.allocConf; @@ -857,10 +853,10 @@ public class TestAllocationFileLoaderService { out.println(""); out.close(); - ReloadListener confHolder = new ReloadListener(); - AllocationFileLoaderService allocLoader = - new AllocationFileLoaderService(confHolder); + AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(); allocLoader.init(conf); + ReloadListener confHolder = new ReloadListener(); + allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); } From 61fc7f73f21b0949e27ef3893efda757d91a03f9 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Mon, 4 Jun 2018 09:28:09 -0700 Subject: [PATCH 033/113] YARN-8389. Improve the description of machine-list property in Federation docs. Contributed by Takanobu Asanuma. --- .../hadoop-yarn-site/src/site/markdown/Federation.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md index a1d3ab6acf6..087a5b01268 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md @@ -218,7 +218,7 @@ SQL-Server scripts are located in **sbin/FederationStateStore/SQLServer/**. |`yarn.federation.policy-manager` | `org.apache.hadoop.yarn.server.federation.policies.manager.WeightedLocalityPolicyManager` | The choice of policy manager determines how Applications and ResourceRequests are routed through the system. | |`yarn.federation.policy-manager-params` | `` | The payload that configures the policy. In our example a set of weights for router and amrmproxy policies. This is typically generated by serializing a policymanager that has been configured programmatically, or by populating the state-store with the .json serialized form of it. | |`yarn.federation.subcluster-resolver.class` | `org.apache.hadoop.yarn.server.federation.resolver.DefaultSubClusterResolverImpl` | The class used to resolve which subcluster a node belongs to, and which subcluster(s) a rack belongs to. | -| `yarn.federation.machine-list` | `node1,subcluster1,rack1\n node2 , subcluster2, RACK1\n node3,subcluster3, rack2\n node4, subcluster3, rack2\n` | a list of Nodes, Sub-clusters, Rack, used by the `DefaultSubClusterResolverImpl` | +|`yarn.federation.machine-list` | `` | Path of machine-list file used by `SubClusterResolver`. Each line of the file is a node with sub-cluster and rack information. Below is the example:

node1, subcluster1, rack1
node2, subcluster2, rack1
node3, subcluster3, rack2
node4, subcluster3, rack2 | ###ON RMs: @@ -226,8 +226,7 @@ These are extra configurations that should appear in the **conf/yarn-site.xml** | Property | Example | Description | |:---- |:---- | -|`yarn.resourcemanager.epoch` | `` | The seed value for the epoch. This is used to guarantee uniqueness of container-IDs generate by different RMs. It must therefore be unique among sub-clusters and `well-spaced` to allow for failures which increment epoch. Increments of 1000 allow for a large number of sub-clusters and - practically ensure near-zero chance of collisions (a clash will only happen if a container is still alive for 1000 restarts of one RM, while the next RM never restarted, and an app requests more containers). | +|`yarn.resourcemanager.epoch` | `` | The seed value for the epoch. This is used to guarantee uniqueness of container-IDs generate by different RMs. It must therefore be unique among sub-clusters and `well-spaced` to allow for failures which increment epoch. Increments of 1000 allow for a large number of sub-clusters and practically ensure near-zero chance of collisions (a clash will only happen if a container is still alive for 1000 restarts of one RM, while the next RM never restarted, and an app requests more containers). | Optional: @@ -285,7 +284,7 @@ In order to submit jobs to a Federation cluster one must create a seperate set o | Property | Example | Description | |:--- |:--- | | `yarn.resourcemanager.address` | `:8050` | Redirects jobs launched at the client to the router's client RM port. | -| `yarn.resourcemanger.scheduler.address` | `localhost:8049` | Redirects jobs to the federation AMRMProxy port.| +| `yarn.resourcemanager.scheduler.address` | `localhost:8049` | Redirects jobs to the federation AMRMProxy port.| Any YARN jobs for the cluster can be submitted from the client configurations described above. In order to launch a job through federation, first start up all the clusters involved in the federation as described [here](../../hadoop-project-dist/hadoop-common/ClusterSetup.html). Next, start up the router on the router machine with the following command: From ea7b53fb44496c35d58346f2222610a30d7041f6 Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Mon, 4 Jun 2018 09:51:04 -0700 Subject: [PATCH 034/113] HDDS-145. Freon times out because of because of wrong ratis port number in datanode details. Contributed by Mukul Kumar Singh. --- .../common/src/main/java/org/apache/ratis/RatisHelper.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java index 20356b39805..df831161ca4 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java +++ b/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java @@ -49,12 +49,12 @@ public interface RatisHelper { static String toRaftPeerIdString(DatanodeDetails id) { return id.getUuidString() + "_" + - id.getPort(DatanodeDetails.Port.Name.RATIS); + id.getPort(DatanodeDetails.Port.Name.RATIS).getValue(); } static String toRaftPeerAddressString(DatanodeDetails id) { return id.getIpAddress() + ":" + - id.getPort(DatanodeDetails.Port.Name.RATIS); + id.getPort(DatanodeDetails.Port.Name.RATIS).getValue(); } static RaftPeerId toRaftPeerId(DatanodeDetails id) { From e2c172dc9faeb5472a32d7052e54d79d499c0a55 Mon Sep 17 00:00:00 2001 From: Miklos Szegedi Date: Sun, 3 Jun 2018 22:13:31 -0700 Subject: [PATCH 035/113] YARN-8382. cgroup file leak in NM. Contributed by Hu Ziqian. --- .../containermanager/linux/resources/CGroupsHandlerImpl.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java index 6ed94e1c41d..c3800b6e4d6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java @@ -96,7 +96,9 @@ class CGroupsHandlerImpl implements CGroupsHandler { NM_LINUX_CONTAINER_CGROUPS_MOUNT_PATH, null); this.deleteCGroupTimeout = conf.getLong( YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_DELETE_TIMEOUT, - YarnConfiguration.DEFAULT_NM_LINUX_CONTAINER_CGROUPS_DELETE_TIMEOUT); + YarnConfiguration.DEFAULT_NM_LINUX_CONTAINER_CGROUPS_DELETE_TIMEOUT) + + conf.getLong(YarnConfiguration.NM_SLEEP_DELAY_BEFORE_SIGKILL_MS, + YarnConfiguration.DEFAULT_NM_SLEEP_DELAY_BEFORE_SIGKILL_MS) + 1000; this.deleteCGroupDelay = conf.getLong(YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_DELETE_DELAY, YarnConfiguration.DEFAULT_NM_LINUX_CONTAINER_CGROUPS_DELETE_DELAY); From dad1bb868f7a28a486fd500f102c8c174e6dd272 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Mon, 4 Jun 2018 12:55:54 -0700 Subject: [PATCH 036/113] MAPREDUCE-7105. Fix TestNativeCollectorOnlyHandler.testOnCall on Windows because of the path format. Contributed by Giovanni Matteo Fumarola. --- .../handlers/TestNativeCollectorOnlyHandler.java | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/handlers/TestNativeCollectorOnlyHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/handlers/TestNativeCollectorOnlyHandler.java index 1c8bf7a3664..314963b1fd1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/handlers/TestNativeCollectorOnlyHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/handlers/TestNativeCollectorOnlyHandler.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.mapred.nativetask.handlers; +import java.io.File; import java.io.IOException; import org.apache.hadoop.conf.Configuration; @@ -33,6 +34,7 @@ import org.apache.hadoop.mapred.nativetask.buffer.InputBuffer; import org.apache.hadoop.mapred.nativetask.testutil.TestConstants; import org.apache.hadoop.mapred.nativetask.util.OutputUtil; import org.apache.hadoop.mapred.nativetask.util.ReadWriteBuffer; +import org.apache.hadoop.util.StringUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -110,9 +112,12 @@ public class TestNativeCollectorOnlyHandler { } Assert.assertTrue("exception thrown", thrown); - final String expectedOutputPath = LOCAL_DIR + "/output/file.out"; - final String expectedOutputIndexPath = LOCAL_DIR + "/output/file.out.index"; - final String expectedSpillPath = LOCAL_DIR + "/output/spill0.out"; + final String expectedOutputPath = StringUtils.join(File.separator, + new String[] {LOCAL_DIR, "output", "file.out"}); + final String expectedOutputIndexPath = StringUtils.join(File.separator, + new String[] {LOCAL_DIR, "output", "file.out.index"}); + final String expectedSpillPath = StringUtils.join(File.separator, + new String[] {LOCAL_DIR, "output", "spill0.out"}); final String outputPath = handler.onCall( NativeCollectorOnlyHandler.GET_OUTPUT_PATH, null).readString(); From 04cf699dd54aab3595eb80295652dcde9a2f4dd5 Mon Sep 17 00:00:00 2001 From: Haibo Chen Date: Mon, 4 Jun 2018 11:13:40 -0700 Subject: [PATCH 037/113] YARN-8388. TestCGroupElasticMemoryController.testNormalExit() hangs on Linux. (Miklos Szegedi via Haibo Chen) --- .../hadoop/test/PlatformAssumptions.java | 8 -------- .../TestCGroupElasticMemoryController.java | 19 +++++-------------- 2 files changed, 5 insertions(+), 22 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/PlatformAssumptions.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/PlatformAssumptions.java index fdbb71f5f1d..4e831625023 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/PlatformAssumptions.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/PlatformAssumptions.java @@ -25,7 +25,6 @@ import org.junit.internal.AssumptionViolatedException; public final class PlatformAssumptions { public static final String OS_NAME = System.getProperty("os.name"); public static final boolean WINDOWS = OS_NAME.startsWith("Windows"); - public static final boolean MAC_OS = OS_NAME.startsWith("Mac OS X"); private PlatformAssumptions() { } @@ -45,11 +44,4 @@ public final class PlatformAssumptions { "Expected Windows platform but got " + OS_NAME); } } - - public static void assumeMacOS() { - if (!MAC_OS) { - throw new AssumptionViolatedException( - "Expected MacOS platform but got " + OS_NAME); - } - } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java index c263c79a65b..e93bc5da1a7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java @@ -38,7 +38,6 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import static org.apache.hadoop.test.PlatformAssumptions.assumeMacOS; /** * Test for elastic non-strict memory controller based on cgroups. @@ -257,26 +256,20 @@ public class TestCGroupElasticMemoryController { /** * Test that node manager can exit listening. - * This is done by running a long running listener for 10 seconds. + * This is done by running a long running listener for 10000 seconds. * Then we wait for 2 seconds and stop listening. + * We do not use a script this time to avoid leaking the child process. * @throws Exception exception occurred */ @Test(timeout = 20000) public void testNormalExit() throws Exception { - // TODO This may hang on Linux - assumeMacOS(); conf.set(YarnConfiguration.NM_ELASTIC_MEMORY_CONTROL_OOM_LISTENER_PATH, - script.getAbsolutePath()); + "sleep"); ExecutorService service = Executors.newFixedThreadPool(1); try { - FileUtils.writeStringToFile(script, - "#!/bin/bash\nsleep 10000;", - Charset.defaultCharset(), false); - assertTrue("Could not set executable", - script.setExecutable(true)); - CGroupsHandler cgroups = mock(CGroupsHandler.class); - when(cgroups.getPathForCGroup(any(), any())).thenReturn(""); + // This will be passed to sleep as an argument + when(cgroups.getPathForCGroup(any(), any())).thenReturn("10000"); when(cgroups.getCGroupParam(any(), any(), any())) .thenReturn("under_oom 0"); @@ -308,8 +301,6 @@ public class TestCGroupElasticMemoryController { controller.run(); } finally { service.shutdown(); - assertTrue(String.format("Could not clean up script %s", - script.getAbsolutePath()), script.delete()); } } From 0cd145a44390bc1a01113dce4be4e629637c3e8a Mon Sep 17 00:00:00 2001 From: Robert Kanter Date: Mon, 4 Jun 2018 15:32:03 -0700 Subject: [PATCH 038/113] YARN-4677. RMNodeResourceUpdateEvent update from scheduler can lead to race condition (wilfreds and gphillips via rkanter) --- .../scheduler/AbstractYarnScheduler.java | 19 +++++--- .../scheduler/fair/FairScheduler.java | 2 +- .../scheduler/fifo/FifoScheduler.java | 6 ++- .../capacity/TestCapacityScheduler.java | 44 ++++++++++++++----- .../scheduler/fair/TestFairScheduler.java | 43 +++++++++++++----- .../scheduler/fifo/TestFifoScheduler.java | 44 ++++++++++++++----- 6 files changed, 115 insertions(+), 43 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java index 18c7b4eb7d9..d2e81a50d94 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java @@ -1106,12 +1106,16 @@ public abstract class AbstractYarnScheduler } // Process new container information + // NOTICE: it is possible to not find the NodeID as a node can be + // decommissioned at the same time. Skip updates if node is null. SchedulerNode schedulerNode = getNode(nm.getNodeID()); List completedContainers = updateNewContainerInfo(nm, schedulerNode); // Notify Scheduler Node updated. - schedulerNode.notifyNodeUpdate(); + if (schedulerNode != null) { + schedulerNode.notifyNodeUpdate(); + } // Process completed containers Resource releasedResources = Resource.newInstance(0, 0); @@ -1121,9 +1125,7 @@ public abstract class AbstractYarnScheduler // If the node is decommissioning, send an update to have the total // resource equal to the used resource, so no available resource to // schedule. - // TODO YARN-5128: Fix possible race-condition when request comes in before - // update is propagated - if (nm.getState() == NodeState.DECOMMISSIONING) { + if (nm.getState() == NodeState.DECOMMISSIONING && schedulerNode != null) { this.rmContext .getDispatcher() .getEventHandler() @@ -1133,13 +1135,16 @@ public abstract class AbstractYarnScheduler } updateSchedulerHealthInformation(releasedResources, releasedContainers); - updateNodeResourceUtilization(nm, schedulerNode); + if (schedulerNode != null) { + updateNodeResourceUtilization(nm, schedulerNode); + } // Now node data structures are up-to-date and ready for scheduling. if(LOG.isDebugEnabled()) { LOG.debug( - "Node being looked for scheduling " + nm + " availableResource: " - + schedulerNode.getUnallocatedResource()); + "Node being looked for scheduling " + nm + " availableResource: " + + (schedulerNode == null ? "unknown (decommissioned)" : + schedulerNode.getUnallocatedResource())); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index 123f7110cfe..557e684b673 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -1096,7 +1096,7 @@ public class FairScheduler extends return; } - final NodeId nodeID = node.getNodeID(); + final NodeId nodeID = (node != null ? node.getNodeID() : null); if (!nodeTracker.exists(nodeID)) { // The node might have just been removed while this thread was waiting // on the synchronized lock before it entered this synchronized method diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java index 7ac9027a78a..8396db54ad8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java @@ -966,8 +966,10 @@ public class FifoScheduler extends return; } - if (Resources.greaterThanOrEqual(resourceCalculator, getClusterResource(), - node.getUnallocatedResource(), minimumAllocation)) { + // A decommissioned node might be removed before we get here + if (node != null && + Resources.greaterThanOrEqual(resourceCalculator, getClusterResource(), + node.getUnallocatedResource(), minimumAllocation)) { LOG.debug("Node heartbeat " + nm.getNodeID() + " available resource = " + node.getUnallocatedResource()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index 1d2aadcf2a8..0b54010c276 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -258,14 +258,12 @@ public class TestCapacityScheduler extends CapacitySchedulerTestBase { } } - private NodeManager - registerNode(String hostName, int containerManagerPort, int httpPort, - String rackName, Resource capability) + private NodeManager registerNode(String hostName, int containerManagerPort, + int httpPort, String rackName, + Resource capability) throws IOException, YarnException { - NodeManager nm = - new NodeManager( - hostName, containerManagerPort, httpPort, rackName, capability, - resourceManager); + NodeManager nm = new NodeManager(hostName, containerManagerPort, httpPort, + rackName, capability, resourceManager); NodeAddedSchedulerEvent nodeAddEvent1 = new NodeAddedSchedulerEvent(resourceManager.getRMContext() .getRMNodes().get(nm.getNodeId())); @@ -280,13 +278,13 @@ public class TestCapacityScheduler extends CapacitySchedulerTestBase { // Register node1 String host_0 = "host_0"; - org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_0 = + NodeManager nm_0 = registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, Resources.createResource(4 * GB, 1)); // Register node2 String host_1 = "host_1"; - org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_1 = + NodeManager nm_1 = registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, Resources.createResource(2 * GB, 1)); @@ -4038,6 +4036,29 @@ public class TestCapacityScheduler extends CapacitySchedulerTestBase { Assert.fail("Cannot find RMContainer"); } } + @Test + public void testRemovedNodeDecomissioningNode() throws Exception { + // Register nodemanager + NodeManager nm = registerNode("host_decom", 1234, 2345, + NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4)); + + RMNode node = + resourceManager.getRMContext().getRMNodes().get(nm.getNodeId()); + // Send a heartbeat to kick the tires on the Scheduler + NodeUpdateSchedulerEvent nodeUpdate = new NodeUpdateSchedulerEvent(node); + resourceManager.getResourceScheduler().handle(nodeUpdate); + + // force remove the node to simulate race condition + ((CapacityScheduler) resourceManager.getResourceScheduler()).getNodeTracker(). + removeNode(nm.getNodeId()); + // Kick off another heartbeat with the node state mocked to decommissioning + RMNode spyNode = + Mockito.spy(resourceManager.getRMContext().getRMNodes() + .get(nm.getNodeId())); + when(spyNode.getState()).thenReturn(NodeState.DECOMMISSIONING); + resourceManager.getResourceScheduler().handle( + new NodeUpdateSchedulerEvent(spyNode)); + } @Test public void testResourceUpdateDecommissioningNode() throws Exception { @@ -4064,9 +4085,8 @@ public class TestCapacityScheduler extends CapacitySchedulerTestBase { ((AsyncDispatcher) mockDispatcher).start(); // Register node String host_0 = "host_0"; - org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_0 = - registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(8 * GB, 4)); + NodeManager nm_0 = registerNode(host_0, 1234, 2345, + NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4)); // ResourceRequest priorities Priority priority_0 = Priority.newInstance(0); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index 2f6c2cf2595..9120d3a6cc1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -82,6 +82,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockAM; import org.apache.hadoop.yarn.server.resourcemanager.MockNM; import org.apache.hadoop.yarn.server.resourcemanager.MockNodes; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; +import org.apache.hadoop.yarn.server.resourcemanager.NodeManager; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore; @@ -4973,6 +4974,30 @@ public class TestFairScheduler extends FairSchedulerTestBase { .get(attId3.getApplicationId()).getQueue()); } + @Test + public void testRemovedNodeDecomissioningNode() throws Exception { + // Register nodemanager + NodeManager nm = registerNode("host_decom", 1234, 2345, + NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4)); + + RMNode node = + resourceManager.getRMContext().getRMNodes().get(nm.getNodeId()); + // Send a heartbeat to kick the tires on the Scheduler + NodeUpdateSchedulerEvent nodeUpdate = new NodeUpdateSchedulerEvent(node); + resourceManager.getResourceScheduler().handle(nodeUpdate); + + // Force remove the node to simulate race condition + ((FairScheduler) resourceManager.getResourceScheduler()) + .getNodeTracker().removeNode(nm.getNodeId()); + // Kick off another heartbeat with the node state mocked to decommissioning + RMNode spyNode = + Mockito.spy(resourceManager.getRMContext().getRMNodes() + .get(nm.getNodeId())); + when(spyNode.getState()).thenReturn(NodeState.DECOMMISSIONING); + resourceManager.getResourceScheduler().handle( + new NodeUpdateSchedulerEvent(spyNode)); + } + @Test public void testResourceUpdateDecommissioningNode() throws Exception { // Mock the RMNodeResourceUpdate event handler to update SchedulerNode @@ -4998,9 +5023,8 @@ public class TestFairScheduler extends FairSchedulerTestBase { ((AsyncDispatcher) mockDispatcher).start(); // Register node String host_0 = "host_0"; - org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_0 = - registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(8 * GB, 4)); + NodeManager nm_0 = registerNode(host_0, 1234, 2345, + NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4)); RMNode node = resourceManager.getRMContext().getRMNodes().get(nm_0.getNodeId()); @@ -5038,13 +5062,12 @@ public class TestFairScheduler extends FairSchedulerTestBase { Assert.assertEquals(availableResource.getVirtualCores(), 0); } - private org.apache.hadoop.yarn.server.resourcemanager.NodeManager registerNode( - String hostName, int containerManagerPort, int httpPort, String rackName, - Resource capability) throws IOException, YarnException { - org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm = - new org.apache.hadoop.yarn.server.resourcemanager.NodeManager(hostName, - containerManagerPort, httpPort, rackName, capability, - resourceManager); + private NodeManager registerNode(String hostName, int containerManagerPort, + int httpPort, String rackName, + Resource capability) + throws IOException, YarnException { + NodeManager nm = new NodeManager(hostName, containerManagerPort, httpPort, + rackName, capability, resourceManager); // after YARN-5375, scheduler event is processed in rm main dispatcher, // wait it processed, or may lead dead lock diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java index 8814c0e542d..ee66a49032f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java @@ -66,6 +66,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockAM; import org.apache.hadoop.yarn.server.resourcemanager.MockNM; import org.apache.hadoop.yarn.server.resourcemanager.MockNodes; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; +import org.apache.hadoop.yarn.server.resourcemanager.NodeManager; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; @@ -138,14 +139,12 @@ public class TestFifoScheduler { resourceManager.stop(); } - private org.apache.hadoop.yarn.server.resourcemanager.NodeManager - registerNode(String hostName, int containerManagerPort, int nmHttpPort, - String rackName, Resource capability) throws IOException, - YarnException { - org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm = - new org.apache.hadoop.yarn.server.resourcemanager.NodeManager(hostName, - containerManagerPort, nmHttpPort, rackName, capability, - resourceManager); + private NodeManager registerNode(String hostName, int containerManagerPort, + int nmHttpPort, String rackName, + Resource capability) + throws IOException, YarnException { + NodeManager nm = new NodeManager(hostName, containerManagerPort, + nmHttpPort, rackName, capability, resourceManager); NodeAddedSchedulerEvent nodeAddEvent1 = new NodeAddedSchedulerEvent(resourceManager.getRMContext().getRMNodes() .get(nm.getNodeId())); @@ -1195,6 +1194,30 @@ public class TestFifoScheduler { rm.stop(); } + @Test + public void testRemovedNodeDecomissioningNode() throws Exception { + // Register nodemanager + NodeManager nm = registerNode("host_decom", 1234, 2345, + NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4)); + + RMNode node = + resourceManager.getRMContext().getRMNodes().get(nm.getNodeId()); + // Send a heartbeat to kick the tires on the Scheduler + NodeUpdateSchedulerEvent nodeUpdate = new NodeUpdateSchedulerEvent(node); + resourceManager.getResourceScheduler().handle(nodeUpdate); + + // Force remove the node to simulate race condition + ((FifoScheduler) resourceManager.getResourceScheduler()) + .getNodeTracker().removeNode(nm.getNodeId()); + // Kick off another heartbeat with the node state mocked to decommissioning + RMNode spyNode = + Mockito.spy(resourceManager.getRMContext().getRMNodes() + .get(nm.getNodeId())); + when(spyNode.getState()).thenReturn(NodeState.DECOMMISSIONING); + resourceManager.getResourceScheduler().handle( + new NodeUpdateSchedulerEvent(spyNode)); + } + @Test public void testResourceUpdateDecommissioningNode() throws Exception { // Mock the RMNodeResourceUpdate event handler to update SchedulerNode @@ -1220,9 +1243,8 @@ public class TestFifoScheduler { ((AsyncDispatcher) mockDispatcher).start(); // Register node String host_0 = "host_0"; - org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_0 = - registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(8 * GB, 4)); + NodeManager nm_0 = registerNode(host_0, 1234, 2345, + NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4)); // ResourceRequest priorities Priority priority_0 = Priority.newInstance(0); From 5cf37418bdc6ff09c0c1ae3ac8ac4b0867de0de4 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Mon, 4 Jun 2018 19:01:17 -0400 Subject: [PATCH 039/113] YARN-8365. Set DNS query type according to client request. Contributed by Shane Kumpf --- .../apache/hadoop/registry/server/dns/RegistryDNS.java | 9 +++++---- .../hadoop/registry/server/dns/TestRegistryDNS.java | 9 ++------- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java index c322023682e..5e994fb776e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java @@ -1106,7 +1106,7 @@ public class RegistryDNS extends AbstractService implements DNSOperations, LOG.debug("calling addAnswer"); byte rcode = addAnswer(response, name, type, dclass, 0, flags); if (rcode != Rcode.NOERROR) { - rcode = remoteLookup(response, name, 0); + rcode = remoteLookup(response, name, type, 0); response.getHeader().setRcode(rcode); } addAdditional(response, flags); @@ -1124,9 +1124,10 @@ public class RegistryDNS extends AbstractService implements DNSOperations, /** * Lookup record from upstream DNS servers. */ - private byte remoteLookup(Message response, Name name, int iterations) { + private byte remoteLookup(Message response, Name name, int type, + int iterations) { // Forward lookup to primary DNS servers - Record[] answers = getRecords(name, Type.ANY); + Record[] answers = getRecords(name, type); try { for (Record r : answers) { if (r.getType() == Type.SOA) { @@ -1137,7 +1138,7 @@ public class RegistryDNS extends AbstractService implements DNSOperations, if (r.getType() == Type.CNAME) { Name cname = ((CNAMERecord) r).getAlias(); if (iterations < 6) { - remoteLookup(response, cname, iterations + 1); + remoteLookup(response, cname, Type.CNAME, iterations + 1); } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java index 01adc45f72d..6ba58dd99d5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java @@ -424,13 +424,8 @@ public class TestRegistryDNS extends Assert { assertEquals("Questions do not match", query.getQuestion(), response.getQuestion()); Record[] recs = response.getSectionArray(Section.ANSWER); - boolean found = false; - for (Record r : recs) { - if (r.getType()==Type.A) { - found = true; - } - } - assertTrue("No A records in answer", found); + assertEquals(1, recs.length); + assertEquals(recs[0].getType(), type); return recs; } From f30f2dc4085c51b6b6850430213f47b97a763b7f Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Mon, 4 Jun 2018 15:42:47 -0700 Subject: [PATCH 040/113] HADOOP-15137. ClassNotFoundException: org.apache.hadoop.yarn.server.api.DistributedSchedulingAMProtocol when using hadoop-client-minicluster. Contributed by Bharat Viswanadham --- .../hadoop-client-minicluster/pom.xml | 141 +++++++++++++++++- 1 file changed, 137 insertions(+), 4 deletions(-) diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml b/hadoop-client-modules/hadoop-client-minicluster/pom.xml index da519a3998f..ee3d050ff0e 100644 --- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml +++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml @@ -149,10 +149,6 @@ org.apache.hadoop hadoop-yarn-common - - org.apache.hadoop - hadoop-yarn-server-common - org.apache.zookeeper zookeeper @@ -225,6 +221,110 @@ javax.servlet javax.servlet-api + + + + org.apache.hadoop + hadoop-auth + + + org.apache.httpcomponents + httpclient + + + org.apache.httpcomponents + httpcore + + + com.nimbusds + nimbus-jose-jwt + + + net.minidev + json-smart + + + net.minidev + accessors-smart + + + org.apache.kerby + kerb-simplekdc + + + org.apache.kerby + kerb-util + + + org.apache.kerby + token-provider + + + org.apache.kerby + kerb-common + + + org.apache.kerby + kerb-crypto + + + org.apache.kerby + kerby-util + + + org.apache.kerby + kerb-common + + + org.apache.kerby + kerby-pkix + + + org.apache.kerby + kerby-asn1 + + + org.apache.kerby + kerb-core + + + org.apache.kerby + kerby-config + + + org.apache.kerby + kerby-xdr + + + org.apache.kerby + kerb-identity + + + org.apache.kerby + kerb-server + + + org.apache.kerby + kerb-identity + + + org.apache.kerby + kerb-admin + + + org.apache.curator + curator-framework + + + org.apache.curator + curator-recipes + + + commons-net + commons-net + capacity-scheduler.xml krb5.conf + .keep + + + + org.ehcache + + ehcache-107ext.xsd + ehcache-core.xsd + + + + + + dnsjava:dnsjava + + dig* + jnamed* + lookup* + update* + + + + + + + microsoft/ + ${shaded.dependency.prefix}.microsoft. + + **/pom.xml + + + org/ ${shaded.dependency.prefix}.org. From a28848104f2a067669f9b1706cd0da05ba71bcb7 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Mon, 4 Jun 2018 18:12:43 -0700 Subject: [PATCH 041/113] HDFS-13652. Randomize baseDir for MiniDFSCluster in TestBlockScanner. Contributed by Anbang Hu. --- .../apache/hadoop/hdfs/server/datanode/TestBlockScanner.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java index 1e92a5db4fa..a7d325e7eb8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java @@ -93,7 +93,8 @@ public class TestBlockScanner { TestContext(Configuration conf, int numNameServices) throws Exception { this.numNameServices = numNameServices; - MiniDFSCluster.Builder bld = new MiniDFSCluster.Builder(conf). + File basedir = new File(GenericTestUtils.getRandomizedTempPath()); + MiniDFSCluster.Builder bld = new MiniDFSCluster.Builder(conf, basedir). numDataNodes(1). storagesPerDatanode(1); if (numNameServices > 1) { From 16316b60112842cf8a328b79c89297f6a8b30e7b Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Mon, 4 Jun 2018 18:21:38 -0700 Subject: [PATCH 042/113] HDFS-13649. Randomize baseDir for MiniDFSCluster in TestReconstructStripedFile and TestReconstructStripedFileWithRandomECPolicy. Contributed by Anbang Hu. --- .../org/apache/hadoop/hdfs/TestReconstructStripedFile.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java index 1e93a2df704..2adddb6156c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java @@ -122,7 +122,9 @@ public class TestReconstructStripedFile { CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODERS_KEY, NativeRSRawErasureCoderFactory.CODER_NAME); } - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(dnNum).build(); + File basedir = new File(GenericTestUtils.getRandomizedTempPath()); + cluster = new MiniDFSCluster.Builder(conf, basedir).numDataNodes(dnNum) + .build(); cluster.waitActive(); fs = cluster.getFileSystem(); From 8d31ddcfeb5c2e97692dd386e5bd62f433b44f8e Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Mon, 4 Jun 2018 18:28:11 -0700 Subject: [PATCH 043/113] HDFS-13650. Randomize baseDir for MiniDFSCluster in TestDFSStripedInputStream and TestDFSStripedInputStreamWithRandomECPolicy. Contributed by Anbang Hu. --- .../org/apache/hadoop/hdfs/TestDFSStripedInputStream.java | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java index 422746e33c4..48ecf9ae5e8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java @@ -105,7 +105,13 @@ public class TestDFSStripedInputStream { CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODERS_KEY, NativeRSRawErasureCoderFactory.CODER_NAME); } + conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, + GenericTestUtils.getRandomizedTempPath()); SimulatedFSDataset.setFactory(conf); + startUp(); + } + + private void startUp() throws IOException { cluster = new MiniDFSCluster.Builder(conf).numDataNodes( dataBlocks + parityBlocks).build(); cluster.waitActive(); @@ -326,7 +332,7 @@ public class TestDFSStripedInputStream { if (cellMisalignPacket) { conf.setInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT + 1); tearDown(); - setup(); + startUp(); } DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks, stripesPerBlock, false, ecPolicy); From 6d5e87aec2f615ed265dc495873bf53ee7d2ace2 Mon Sep 17 00:00:00 2001 From: Xiao Chen Date: Mon, 4 Jun 2018 21:13:17 -0700 Subject: [PATCH 044/113] HADOOP-15507. Add MapReduce counters about EC bytes read. --- .../java/org/apache/hadoop/fs/FileSystem.java | 34 +++++++++++++++++++ .../fs/FileSystemStorageStatistics.java | 5 ++- .../fs/TestFileSystemStorageStatistics.java | 6 +++- .../org/apache/hadoop/hdfs/DFSClient.java | 6 ++++ .../apache/hadoop/hdfs/DFSInputStream.java | 4 +++ .../apache/hadoop/hdfs/ReaderStrategy.java | 8 +++++ .../java/org/apache/hadoop/mapred/Task.java | 14 +++++++- .../hadoop/mapreduce/FileSystemCounter.java | 1 + .../mapreduce/FileSystemCounter.properties | 1 + 9 files changed, 76 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index 707b921bf9f..c309941e757 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -3605,6 +3605,7 @@ public abstract class FileSystem extends Configured implements Closeable { private volatile long bytesReadDistanceOfOneOrTwo; private volatile long bytesReadDistanceOfThreeOrFour; private volatile long bytesReadDistanceOfFiveOrLarger; + private volatile long bytesReadErasureCoded; /** * Add another StatisticsData object to this one. @@ -3621,6 +3622,7 @@ public abstract class FileSystem extends Configured implements Closeable { other.bytesReadDistanceOfThreeOrFour; this.bytesReadDistanceOfFiveOrLarger += other.bytesReadDistanceOfFiveOrLarger; + this.bytesReadErasureCoded += other.bytesReadErasureCoded; } /** @@ -3638,6 +3640,7 @@ public abstract class FileSystem extends Configured implements Closeable { -this.bytesReadDistanceOfThreeOrFour; this.bytesReadDistanceOfFiveOrLarger = -this.bytesReadDistanceOfFiveOrLarger; + this.bytesReadErasureCoded = -this.bytesReadErasureCoded; } @Override @@ -3682,6 +3685,10 @@ public abstract class FileSystem extends Configured implements Closeable { public long getBytesReadDistanceOfFiveOrLarger() { return bytesReadDistanceOfFiveOrLarger; } + + public long getBytesReadErasureCoded() { + return bytesReadErasureCoded; + } } private interface StatisticsAggregator { @@ -3873,6 +3880,14 @@ public abstract class FileSystem extends Configured implements Closeable { getThreadStatistics().writeOps += count; } + /** + * Increment the bytes read on erasure-coded files in the statistics. + * @param newBytes the additional bytes read + */ + public void incrementBytesReadErasureCoded(long newBytes) { + getThreadStatistics().bytesReadErasureCoded += newBytes; + } + /** * Increment the bytes read by the network distance in the statistics * In the common network topology setup, distance value should be an even @@ -4067,6 +4082,25 @@ public abstract class FileSystem extends Configured implements Closeable { }); } + /** + * Get the total number of bytes read on erasure-coded files. + * @return the number of bytes + */ + public long getBytesReadErasureCoded() { + return visitAll(new StatisticsAggregator() { + private long bytesReadErasureCoded = 0; + + @Override + public void accept(StatisticsData data) { + bytesReadErasureCoded += data.bytesReadErasureCoded; + } + + public Long aggregate() { + return bytesReadErasureCoded; + } + }); + } + @Override public String toString() { return visitAll(new StatisticsAggregator() { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java index 8c633f6f359..43c23abadea 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java @@ -46,7 +46,8 @@ public class FileSystemStorageStatistics extends StorageStatistics { "bytesReadLocalHost", "bytesReadDistanceOfOneOrTwo", "bytesReadDistanceOfThreeOrFour", - "bytesReadDistanceOfFiveOrLarger" + "bytesReadDistanceOfFiveOrLarger", + "bytesReadErasureCoded" }; private static class LongStatisticIterator @@ -104,6 +105,8 @@ public class FileSystemStorageStatistics extends StorageStatistics { return data.getBytesReadDistanceOfThreeOrFour(); case "bytesReadDistanceOfFiveOrLarger": return data.getBytesReadDistanceOfFiveOrLarger(); + case "bytesReadErasureCoded": + return data.getBytesReadErasureCoded(); default: return null; } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemStorageStatistics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemStorageStatistics.java index 8debb697171..597eb93b58e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemStorageStatistics.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemStorageStatistics.java @@ -51,7 +51,8 @@ public class TestFileSystemStorageStatistics { "bytesReadLocalHost", "bytesReadDistanceOfOneOrTwo", "bytesReadDistanceOfThreeOrFour", - "bytesReadDistanceOfFiveOrLarger" + "bytesReadDistanceOfFiveOrLarger", + "bytesReadErasureCoded" }; private FileSystem.Statistics statistics = @@ -74,6 +75,7 @@ public class TestFileSystemStorageStatistics { statistics.incrementBytesReadByDistance(0, RandomUtils.nextInt(100)); statistics.incrementBytesReadByDistance(1, RandomUtils.nextInt(100)); statistics.incrementBytesReadByDistance(3, RandomUtils.nextInt(100)); + statistics.incrementBytesReadErasureCoded(RandomUtils.nextInt(100)); } @Test @@ -126,6 +128,8 @@ public class TestFileSystemStorageStatistics { return statistics.getBytesReadByDistance(3); case "bytesReadDistanceOfFiveOrLarger": return statistics.getBytesReadByDistance(5); + case "bytesReadErasureCoded": + return statistics.getBytesReadErasureCoded(); default: return 0; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 5f1b2bb1721..96c45053fc4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -2942,6 +2942,12 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, } } + void updateFileSystemECReadStats(int nRead) { + if (stats != null) { + stats.incrementBytesReadErasureCoded(nRead); + } + } + /** * Create hedged reads thread pool, HEDGED_READ_THREAD_POOL, if * it does not already exist. diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index e2508735891..4d70fee0357 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -61,6 +61,7 @@ import org.apache.hadoop.fs.StreamCapabilities; import org.apache.hadoop.hdfs.DFSUtilClient.CorruptedBlocks; import org.apache.hadoop.hdfs.client.impl.BlockReaderFactory; import org.apache.hadoop.hdfs.client.impl.DfsClientConf; +import org.apache.hadoop.hdfs.protocol.BlockType; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -1082,6 +1083,9 @@ public class DFSInputStream extends FSInputStream IOUtilsClient.updateReadStatistics(readStatistics, nread, reader); dfsClient.updateFileSystemReadStats( reader.getNetworkDistance(), nread); + if (readStatistics.getBlockType() == BlockType.STRIPED) { + dfsClient.updateFileSystemECReadStats(nread); + } if (nread != len) { throw new IOException("truncated return from reader.read(): " + "excpected " + len + ", got " + nread); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java index c984c3b7322..39ad2ff4a9e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs; +import org.apache.hadoop.hdfs.protocol.BlockType; + import java.io.IOException; import java.nio.ByteBuffer; import static org.apache.hadoop.hdfs.util.IOUtilsClient.updateReadStatistics; @@ -121,6 +123,9 @@ class ByteArrayStrategy implements ReaderStrategy { updateReadStatistics(readStatistics, nRead, blockReader); dfsClient.updateFileSystemReadStats(blockReader.getNetworkDistance(), nRead); + if (readStatistics.getBlockType() == BlockType.STRIPED) { + dfsClient.updateFileSystemECReadStats(nRead); + } offset += nRead; } return nRead; @@ -188,6 +193,9 @@ class ByteBufferStrategy implements ReaderStrategy { updateReadStatistics(readStatistics, nRead, blockReader); dfsClient.updateFileSystemReadStats(blockReader.getNetworkDistance(), nRead); + if (readStatistics.getBlockType() == BlockType.STRIPED) { + dfsClient.updateFileSystemECReadStats(nRead); + } } return nRead; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java index d83a6b0e945..9b62afc3416 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java @@ -86,6 +86,7 @@ abstract public class Task implements Writable, Configurable { public static String MERGED_OUTPUT_PREFIX = ".merged"; public static final long DEFAULT_COMBINE_RECORDS_BEFORE_PROGRESS = 10000; + private static final String HDFS_URI_SCHEME = "hdfs"; /** * @deprecated Provided for compatibility. Use {@link TaskCounter} instead. @@ -1125,7 +1126,8 @@ abstract public class Task implements Writable, Configurable { class FileSystemStatisticUpdater { private List stats; private Counters.Counter readBytesCounter, writeBytesCounter, - readOpsCounter, largeReadOpsCounter, writeOpsCounter; + readOpsCounter, largeReadOpsCounter, writeOpsCounter, + readBytesEcCounter; private String scheme; FileSystemStatisticUpdater(List stats, String scheme) { this.stats = stats; @@ -1153,23 +1155,33 @@ abstract public class Task implements Writable, Configurable { writeOpsCounter = counters.findCounter(scheme, FileSystemCounter.WRITE_OPS); } + if (readBytesEcCounter == null && scheme.equals(HDFS_URI_SCHEME)) { + // EC bytes only applies to hdfs + readBytesEcCounter = + counters.findCounter(scheme, FileSystemCounter.BYTES_READ_EC); + } long readBytes = 0; long writeBytes = 0; long readOps = 0; long largeReadOps = 0; long writeOps = 0; + long readBytesEC = 0; for (FileSystem.Statistics stat: stats) { readBytes = readBytes + stat.getBytesRead(); writeBytes = writeBytes + stat.getBytesWritten(); readOps = readOps + stat.getReadOps(); largeReadOps = largeReadOps + stat.getLargeReadOps(); writeOps = writeOps + stat.getWriteOps(); + readBytesEC = readBytesEC + stat.getBytesReadErasureCoded(); } readBytesCounter.setValue(readBytes); writeBytesCounter.setValue(writeBytes); readOpsCounter.setValue(readOps); largeReadOpsCounter.setValue(largeReadOps); writeOpsCounter.setValue(writeOps); + if (readBytesEcCounter != null) { + readBytesEcCounter.setValue(readBytesEC); + } } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/FileSystemCounter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/FileSystemCounter.java index 3624b1a99c7..e27d1dc4732 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/FileSystemCounter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/FileSystemCounter.java @@ -27,4 +27,5 @@ public enum FileSystemCounter { READ_OPS, LARGE_READ_OPS, WRITE_OPS, + BYTES_READ_EC, } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/org/apache/hadoop/mapreduce/FileSystemCounter.properties b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/org/apache/hadoop/mapreduce/FileSystemCounter.properties index 58089af8039..bc405c88bca 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/org/apache/hadoop/mapreduce/FileSystemCounter.properties +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/org/apache/hadoop/mapreduce/FileSystemCounter.properties @@ -19,3 +19,4 @@ BYTES_WRITTEN.name= Number of bytes written READ_OPS.name= Number of read operations LARGE_READ_OPS.name= Number of large read operations WRITE_OPS.name= Number of write operations +BYTES_READ_EC.name= Number of bytes read erasure-coded From 0e3c31579d1c733decfb9b00721bdc7b86be60f2 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Tue, 5 Jun 2018 17:53:24 +0900 Subject: [PATCH 045/113] HDFS-13545. "guarded" is misspelled as "gaurded" in FSPermissionChecker.java. Contributed by Jianchao Jia. --- .../apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index 0b284b9a7d7..354b4e364aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -44,7 +44,7 @@ import org.apache.hadoop.security.UserGroupInformation; * The state of this class need not be synchronized as it has data structures that * are read-only. * - * Some of the helper methods are gaurded by {@link FSNamesystem#readLock()}. + * Some of the helper methods are guarded by {@link FSNamesystem#readLock()}. */ public class FSPermissionChecker implements AccessControlEnforcer { static final Log LOG = LogFactory.getLog(UserGroupInformation.class); From 745f3a244166c1e6a1aee81eb1d6c2c692053d04 Mon Sep 17 00:00:00 2001 From: Rohith Sharma K S Date: Tue, 5 Jun 2018 19:28:24 +0530 Subject: [PATCH 046/113] YARN-8396. Click on an individual container continuously spins and doesn't load the page. Contributed by Sunil Govindan. --- .../src/main/webapp/app/routes/yarn-node-container.js | 2 +- .../src/main/webapp/app/templates/yarn-container-log.hbs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-node-container.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-node-container.js index 0f1e4cd0498..388918ed529 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-node-container.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-node-container.js @@ -26,7 +26,7 @@ export default AbstractRoute.extend({ return Ember.RSVP.hash({ nodeContainer: this.store.queryRecord('yarn-node-container', { nodeHttpAddr: param.node_addr, containerId: param.container_id }), - nmGpuInfo: this.store.findRecord('yarn-nm-gpu', address, {reload:true}), + nmGpuInfo: this.store.findRecord('yarn-nm-gpu', param.node_addr, {reload:true}), nodeInfo: { id: param.node_id, addr: param.node_addr, containerId: param.container_id } }); }, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-container-log.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-container-log.hbs index 67629d2817c..6376caa8a8e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-container-log.hbs +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-container-log.hbs @@ -19,7 +19,7 @@ {{breadcrumb-bar breadcrumbs=breadcrumbs}}
- {{node-menu path="yarn-container-log" nodeAddr=model.nodeInfo.addr nodeId=model.nodeInfo.id}} + {{node-menu-panel path="yarn-container-log" nodeAddr=model.nodeInfo.addr nodeId=model.nodeInfo.id}}
From 920d154997f0ad6000d8f76029d6d415e7b8980c Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Tue, 5 Jun 2018 08:34:44 -0700 Subject: [PATCH 047/113] HDDS-148. Remove ContainerReportManager and ContainerReportManagerImpl. Contributed by Nanda kumar. --- .../common/impl/ContainerManagerImpl.java | 8 --- .../impl/ContainerReportManagerImpl.java | 67 ------------------- .../interfaces/ContainerReportManager.java | 30 --------- 3 files changed, 105 deletions(-) delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerReportManagerImpl.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerReportManager.java diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java index b09d324e2f8..eb437afc02b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java @@ -53,8 +53,6 @@ import org.apache.hadoop.ozone.container.common.interfaces import org.apache.hadoop.ozone.container.common.interfaces .ContainerLocationManager; import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager; -import org.apache.hadoop.ozone.container.common.interfaces - .ContainerReportManager; import org.apache.hadoop.ozone.container.common.interfaces.KeyManager; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.utils.MetadataKeyFilters; @@ -127,10 +125,8 @@ public class ContainerManagerImpl implements ContainerManager { private ChunkManager chunkManager; private KeyManager keyManager; private Configuration conf; - private DatanodeDetails datanodeDetails; private ContainerDeletionChoosingPolicy containerDeletionChooser; - private ContainerReportManager containerReportManager; /** * Init call that sets up a container Manager. @@ -154,7 +150,6 @@ public class ContainerManagerImpl implements ContainerManager { " directories must be greater than zero."); this.conf = config; - this.datanodeDetails = dnDetails; readLock(); try { @@ -203,9 +198,6 @@ public class ContainerManagerImpl implements ContainerManager { } this.locationManager = new ContainerLocationManagerImpl(containerDirs, dataDirs, config); - - this.containerReportManager = - new ContainerReportManagerImpl(config); } finally { readUnlock(); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerReportManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerReportManagerImpl.java deleted file mode 100644 index f1d3f7f1691..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerReportManagerImpl.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.impl; - -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.container.common.interfaces - .ContainerReportManager; -import org.apache.hadoop.util.Time; - -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.scm.HddsServerUtil.getScmHeartbeatInterval; - -/** - * Class wraps the container report operations on datanode. - * // TODO: support incremental/delta container report - */ -public class ContainerReportManagerImpl implements ContainerReportManager { - // Last non-empty container report time - private long lastContainerReportTime; - private final long containerReportInterval; - private final long heartbeatInterval; - - public ContainerReportManagerImpl(Configuration config) { - this.lastContainerReportTime = -1; - this.containerReportInterval = config.getTimeDuration( - OzoneConfigKeys.OZONE_CONTAINER_REPORT_INTERVAL, - OzoneConfigKeys.OZONE_CONTAINER_REPORT_INTERVAL_DEFAULT, - TimeUnit.MILLISECONDS); - this.heartbeatInterval = getScmHeartbeatInterval(config); - } - - public boolean shouldSendContainerReport() { - if (lastContainerReportTime < 0) { - return true; - } - // Add a random delay (0~30s) on top of the container report - // interval (60s) so tha the SCM is overwhelmed by the container reports - // sent in sync. - if (Time.monotonicNow() - lastContainerReportTime > - (containerReportInterval + getRandomReportDelay())) { - return true; - } - return false; - } - - private long getRandomReportDelay() { - return RandomUtils.nextLong(0, heartbeatInterval); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerReportManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerReportManager.java deleted file mode 100644 index 6d7557b94aa..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerReportManager.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.interfaces; - -/** - * Interface for container report manager operations. - */ -public interface ContainerReportManager { - - /** - * Check if we have to send container report. - * @return true if container report has to be sent. - */ - boolean shouldSendContainerReport(); -} From baebe4d52bc0e1ee3be062b61efa1de1d19a3bca Mon Sep 17 00:00:00 2001 From: Anu Engineer Date: Tue, 5 Jun 2018 10:31:42 -0700 Subject: [PATCH 048/113] HDDS-129. Support for ReportManager in Datanode. Contributed by Nanda Kumar. --- .../report/ContainerReportPublisher.java | 70 +++++++++ .../common/report/NodeReportPublisher.java | 40 +++++ .../common/report/ReportManager.java | 147 ++++++++++++++++++ .../common/report/ReportPublisher.java | 96 ++++++++++++ .../common/report/ReportPublisherFactory.java | 71 +++++++++ .../container/common/report/package-info.java | 80 ++++++++++ .../statemachine/DatanodeStateMachine.java | 18 ++- .../common/statemachine/StateContext.java | 59 +++++-- .../endpoint/HeartbeatEndpointTask.java | 24 ++- .../common/report/TestReportManager.java | 52 +++++++ .../common/report/TestReportPublisher.java | 106 +++++++++++++ .../report/TestReportPublisherFactory.java | 68 ++++++++ .../container/common/report/package-info.java | 22 +++ 13 files changed, 834 insertions(+), 19 deletions(-) create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/NodeReportPublisher.java create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisher.java create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisherFactory.java create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/package-info.java create mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java create mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java create mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java create mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/package-info.java diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java new file mode 100644 index 00000000000..ea2b987036a --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.container.common.report; + +import org.apache.commons.lang3.RandomUtils; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; +import org.apache.hadoop.ozone.OzoneConfigKeys; + +import java.util.concurrent.TimeUnit; + + +/** + * Publishes ContainerReport which will be sent to SCM as part of heartbeat. + * ContainerReport consist of the following information about each containers: + * - containerID + * - size + * - used + * - keyCount + * - readCount + * - writeCount + * - readBytes + * - writeBytes + * - finalHash + * - LifeCycleState + * + */ +public class ContainerReportPublisher extends + ReportPublisher { + + private Long containerReportInterval = null; + + @Override + protected long getReportFrequency() { + if (containerReportInterval == null) { + containerReportInterval = getConf().getTimeDuration( + OzoneConfigKeys.OZONE_CONTAINER_REPORT_INTERVAL, + OzoneConfigKeys.OZONE_CONTAINER_REPORT_INTERVAL_DEFAULT, + TimeUnit.MILLISECONDS); + } + // Add a random delay (0~30s) on top of the container report + // interval (60s) so tha the SCM is overwhelmed by the container reports + // sent in sync. + return containerReportInterval + getRandomReportDelay(); + } + + private long getRandomReportDelay() { + return RandomUtils.nextLong(0, containerReportInterval); + } + + @Override + protected ContainerReportsProto getReport() { + return ContainerReportsProto.getDefaultInstance(); + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/NodeReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/NodeReportPublisher.java new file mode 100644 index 00000000000..704b1f5b19d --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/NodeReportPublisher.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.container.common.report; + +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.NodeReportProto; + +/** + * Publishes NodeReport which will be sent to SCM as part of heartbeat. + * NodeReport consist of: + * - NodeIOStats + * - VolumeReports + */ +public class NodeReportPublisher extends ReportPublisher { + + @Override + protected long getReportFrequency() { + return 90000L; + } + + @Override + protected NodeReportProto getReport() { + return NodeReportProto.getDefaultInstance(); + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java new file mode 100644 index 00000000000..c09282e1bf0 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java @@ -0,0 +1,147 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.container.common.report; + +import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.google.protobuf.GeneratedMessage; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ozone.container.common.statemachine.StateContext; +import org.apache.hadoop.util.concurrent.HadoopExecutors; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ScheduledExecutorService; + +/** + * ReportManager is responsible for managing all the {@link ReportPublisher} + * and also provides {@link ScheduledExecutorService} to ReportPublisher + * which should be used for scheduling the reports. + */ +public final class ReportManager { + + private final StateContext context; + private final List publishers; + private final ScheduledExecutorService executorService; + + /** + * Construction of {@link ReportManager} should be done via + * {@link ReportManager.Builder}. + * + * @param context StateContext which holds the report + * @param publishers List of publishers which generates report + */ + private ReportManager(StateContext context, + List publishers) { + this.context = context; + this.publishers = publishers; + this.executorService = HadoopExecutors.newScheduledThreadPool(1, + new ThreadFactoryBuilder().setDaemon(true) + .setNameFormat("Datanode ReportManager Thread - %d").build()); + } + + /** + * Initializes ReportManager, also initializes all the configured + * report publishers. + */ + public void init() { + for (ReportPublisher publisher : publishers) { + publisher.init(context, executorService); + } + } + + /** + * Shutdown the ReportManager. + */ + public void shutdown() { + executorService.shutdown(); + } + + /** + * Returns new {@link ReportManager.Builder} which can be used to construct. + * {@link ReportManager} + * @param conf - Conf + * @return builder - Builder. + */ + public static Builder newBuilder(Configuration conf) { + return new Builder(conf); + } + + /** + * Builder to construct {@link ReportManager}. + */ + public static final class Builder { + + private StateContext stateContext; + private List reportPublishers; + private ReportPublisherFactory publisherFactory; + + + private Builder(Configuration conf) { + this.reportPublishers = new ArrayList<>(); + this.publisherFactory = new ReportPublisherFactory(conf); + } + + /** + * Sets the {@link StateContext}. + * + * @param context StateContext + + * @return ReportManager.Builder + */ + public Builder setStateContext(StateContext context) { + stateContext = context; + return this; + } + + /** + * Adds publisher for the corresponding report. + * + * @param report report for which publisher needs to be added + * + * @return ReportManager.Builder + */ + public Builder addPublisherFor(Class report) { + reportPublishers.add(publisherFactory.getPublisherFor(report)); + return this; + } + + /** + * Adds new ReportPublisher to the ReportManager. + * + * @param publisher ReportPublisher + * + * @return ReportManager.Builder + */ + public Builder addPublisher(ReportPublisher publisher) { + reportPublishers.add(publisher); + return this; + } + + /** + * Build and returns ReportManager. + * + * @return {@link ReportManager} + */ + public ReportManager build() { + Preconditions.checkNotNull(stateContext); + return new ReportManager(stateContext, reportPublishers); + } + + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisher.java new file mode 100644 index 00000000000..4ff47a05232 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisher.java @@ -0,0 +1,96 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.container.common.report; + +import com.google.protobuf.GeneratedMessage; +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ozone.container.common.statemachine + .DatanodeStateMachine.DatanodeStates; +import org.apache.hadoop.ozone.container.common.statemachine.StateContext; + +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +/** + * Abstract class responsible for scheduling the reports based on the + * configured interval. All the ReportPublishers should extend this class. + */ +public abstract class ReportPublisher + implements Configurable, Runnable { + + private Configuration config; + private StateContext context; + private ScheduledExecutorService executor; + + /** + * Initializes ReportPublisher with stateContext and executorService. + * + * @param stateContext Datanode state context + * @param executorService ScheduledExecutorService to schedule reports + */ + public void init(StateContext stateContext, + ScheduledExecutorService executorService) { + this.context = stateContext; + this.executor = executorService; + this.executor.schedule(this, + getReportFrequency(), TimeUnit.MILLISECONDS); + } + + @Override + public void setConf(Configuration conf) { + config = conf; + } + + @Override + public Configuration getConf() { + return config; + } + + @Override + public void run() { + publishReport(); + if (!executor.isShutdown() || + !(context.getState() == DatanodeStates.SHUTDOWN)) { + executor.schedule(this, + getReportFrequency(), TimeUnit.MILLISECONDS); + } + } + + /** + * Generates and publishes the report to datanode state context. + */ + private void publishReport() { + context.addReport(getReport()); + } + + /** + * Returns the frequency in which this particular report has to be scheduled. + * + * @return report interval in milliseconds + */ + protected abstract long getReportFrequency(); + + /** + * Generate and returns the report which has to be sent as part of heartbeat. + * + * @return datanode report + */ + protected abstract T getReport(); + +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisherFactory.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisherFactory.java new file mode 100644 index 00000000000..dc246d9428c --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisherFactory.java @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.container.common.report; + +import com.google.protobuf.GeneratedMessage; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.NodeReportProto; +import org.apache.hadoop.util.ReflectionUtils; + +import java.util.HashMap; +import java.util.Map; + +/** + * Factory class to construct {@link ReportPublisher} for a report. + */ +public class ReportPublisherFactory { + + private final Configuration conf; + private final Map, + Class> report2publisher; + + /** + * Constructs {@link ReportPublisherFactory} instance. + * + * @param conf Configuration to be passed to the {@link ReportPublisher} + */ + public ReportPublisherFactory(Configuration conf) { + this.conf = conf; + this.report2publisher = new HashMap<>(); + + report2publisher.put(NodeReportProto.class, NodeReportPublisher.class); + report2publisher.put(ContainerReportsProto.class, + ContainerReportPublisher.class); + } + + /** + * Returns the ReportPublisher for the corresponding report. + * + * @param report report + * + * @return report publisher + */ + public ReportPublisher getPublisherFor( + Class report) { + Class publisherClass = + report2publisher.get(report); + if (publisherClass == null) { + throw new RuntimeException("No publisher found for report " + report); + } + return ReflectionUtils.newInstance(publisherClass, conf); + } + +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/package-info.java new file mode 100644 index 00000000000..404b37a7b08 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/package-info.java @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.container.common.report; +/** + * Datanode Reports: As part of heartbeat, datanode has to share its current + * state with SCM. The state of datanode is split into multiple reports which + * are sent along with heartbeat in a configured frequency. + * + * This package contains code which is responsible for sending reports from + * datanode to SCM. + * + * ReportPublisherFactory: Given a report this constructs corresponding + * {@link org.apache.hadoop.ozone.container.common.report.ReportPublisher}. + * + * ReportManager: Manages and initializes all the available ReportPublishers. + * + * ReportPublisher: Abstract class responsible for scheduling the reports + * based on the configured interval. All the ReportPublishers should extend + * {@link org.apache.hadoop.ozone.container.common.report.ReportPublisher} + * + * How to add new report: + * + * 1. Create a new ReportPublisher class which extends + * {@link org.apache.hadoop.ozone.container.common.report.ReportPublisher}. + * + * 2. Add a mapping Report to ReportPublisher entry in ReportPublisherFactory. + * + * 3. In DatanodeStateMachine add the report to ReportManager instance. + * + * + * + * Datanode Reports State Diagram: + * + * DatanodeStateMachine ReportManager ReportPublisher SCM + * | | | | + * | | | | + * | construct | | | + * |----------------->| | | + * | | | | + * | init | | | + * |----------------->| | | + * | | init | | + * | |------------->| | + * | | | | + * +--------+------------------+--------------+--------------------+------+ + * |loop | | | | | + * | | | publish | | | + * | |<-----------------+--------------| | | + * | | | report | | | + * | | | | | | + * | | | | | | + * | | heartbeat(rpc) | | | | + * | |------------------+--------------+------------------->| | + * | | | | | | + * | | | | | | + * +--------+------------------+--------------+--------------------+------+ + * | | | | + * | | | | + * | | | | + * | shutdown | | | + * |----------------->| | | + * | | | | + * | | | | + * - - - - + */ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java index d0a4217245e..cb4319dc945 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java @@ -21,7 +21,13 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.CloseContainerCommandHandler; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.NodeReportProto; +import org.apache.hadoop.ozone.container.common.report.ReportManager; +import org.apache.hadoop.ozone.container.common.statemachine.commandhandler + .CloseContainerCommandHandler; import org.apache.hadoop.ozone.container.common.statemachine.commandhandler .CommandDispatcher; import org.apache.hadoop.ozone.container.common.statemachine.commandhandler @@ -56,6 +62,7 @@ public class DatanodeStateMachine implements Closeable { private final OzoneContainer container; private DatanodeDetails datanodeDetails; private final CommandDispatcher commandDispatcher; + private final ReportManager reportManager; private long commandsHandled; private AtomicLong nextHB; private Thread stateMachineThread = null; @@ -92,6 +99,12 @@ public class DatanodeStateMachine implements Closeable { .setContainer(container) .setContext(context) .build(); + + reportManager = ReportManager.newBuilder(conf) + .setStateContext(context) + .addPublisherFor(NodeReportProto.class) + .addPublisherFor(ContainerReportsProto.class) + .build(); } /** @@ -125,12 +138,12 @@ public class DatanodeStateMachine implements Closeable { long now = 0; container.start(); + reportManager.init(); initCommandHandlerThread(conf); while (context.getState() != DatanodeStates.SHUTDOWN) { try { LOG.debug("Executing cycle Number : {}", context.getExecutionCount()); nextHB.set(Time.monotonicNow() + heartbeatFrequency); - context.setNodeReport(container.getNodeReport()); context.execute(executorService, heartbeatFrequency, TimeUnit.MILLISECONDS); now = Time.monotonicNow(); @@ -307,6 +320,7 @@ public class DatanodeStateMachine implements Closeable { public synchronized void stopDaemon() { try { context.setState(DatanodeStates.SHUTDOWN); + reportManager.shutdown(); this.close(); LOG.info("Ozone container server stopped."); } catch (IOException e) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java index 4e3c610f778..98eb7a05f64 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java @@ -16,9 +16,8 @@ */ package org.apache.hadoop.ozone.container.common.statemachine; +import com.google.protobuf.GeneratedMessage; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; import org.apache.hadoop.ozone.container.common.states.DatanodeState; import org.apache.hadoop.ozone.container.common.states.datanode .InitDatanodeState; @@ -28,7 +27,9 @@ import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.ArrayList; import java.util.LinkedList; +import java.util.List; import java.util.Queue; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; @@ -51,8 +52,8 @@ public class StateContext { private final DatanodeStateMachine parent; private final AtomicLong stateExecutionCount; private final Configuration conf; + private final Queue reports; private DatanodeStateMachine.DatanodeStates state; - private NodeReportProto dnReport; /** * Constructs a StateContext. @@ -67,9 +68,9 @@ public class StateContext { this.state = state; this.parent = parent; commandQueue = new LinkedList<>(); + reports = new LinkedList<>(); lock = new ReentrantLock(); stateExecutionCount = new AtomicLong(0); - dnReport = NodeReportProto.getDefaultInstance(); } /** @@ -141,19 +142,53 @@ public class StateContext { } /** - * Returns the node report of the datanode state context. - * @return the node report. + * Adds the report to report queue. + * + * @param report report to be added */ - public NodeReportProto getNodeReport() { - return dnReport; + public void addReport(GeneratedMessage report) { + synchronized (reports) { + reports.add(report); + } } /** - * Sets the storage location report of the datanode state context. - * @param nodeReport node report + * Returns the next report, or null if the report queue is empty. + * + * @return report */ - public void setNodeReport(NodeReportProto nodeReport) { - this.dnReport = nodeReport; + public GeneratedMessage getNextReport() { + synchronized (reports) { + return reports.poll(); + } + } + + /** + * Returns all the available reports from the report queue, or empty list if + * the queue is empty. + * + * @return List + */ + public List getAllAvailableReports() { + return getReports(Integer.MAX_VALUE); + } + + /** + * Returns available reports from the report queue with a max limit on + * list size, or empty list if the queue is empty. + * + * @return List + */ + public List getReports(int maxLimit) { + List results = new ArrayList<>(); + synchronized (reports) { + GeneratedMessage report = reports.poll(); + while(results.size() < maxLimit && report != null) { + results.add(report); + report = reports.poll(); + } + } + return results; } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java index 337cdfbcf8b..3986faf37ff 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.container.common.states.endpoint; import com.google.common.base.Preconditions; +import com.google.protobuf.GeneratedMessage; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; @@ -99,13 +100,13 @@ public class HeartbeatEndpointTask try { Preconditions.checkState(this.datanodeDetailsProto != null); - SCMHeartbeatRequestProto request = SCMHeartbeatRequestProto.newBuilder() - .setDatanodeDetails(datanodeDetailsProto) - .setNodeReport(context.getNodeReport()) - .build(); + SCMHeartbeatRequestProto.Builder requestBuilder = + SCMHeartbeatRequestProto.newBuilder() + .setDatanodeDetails(datanodeDetailsProto); + addReports(requestBuilder); SCMHeartbeatResponseProto reponse = rpcEndpoint.getEndPoint() - .sendHeartbeat(request); + .sendHeartbeat(requestBuilder.build()); processResponse(reponse, datanodeDetailsProto); rpcEndpoint.setLastSuccessfulHeartbeat(ZonedDateTime.now()); rpcEndpoint.zeroMissedCount(); @@ -117,6 +118,19 @@ public class HeartbeatEndpointTask return rpcEndpoint.getState(); } + /** + * Adds all the available reports to heartbeat. + * + * @param requestBuilder builder to which the report has to be added. + */ + private void addReports(SCMHeartbeatRequestProto.Builder requestBuilder) { + for (GeneratedMessage report : context.getAllAvailableReports()) { + requestBuilder.setField( + SCMHeartbeatRequestProto.getDescriptor().findFieldByName( + report.getDescriptorForType().getName()), report); + } + } + /** * Returns a builder class for HeartbeatEndpointTask task. * @return Builder. diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java new file mode 100644 index 00000000000..aae388dd5a1 --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.container.common.report; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.container.common.statemachine.StateContext; +import org.junit.Test; +import org.mockito.Mockito; + +import java.util.concurrent.ScheduledExecutorService; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +/** + * Test cases to test {@link ReportManager}. + */ +public class TestReportManager { + + @Test + public void testReportManagerInit() { + Configuration conf = new OzoneConfiguration(); + StateContext dummyContext = Mockito.mock(StateContext.class); + ReportPublisher dummyPublisher = Mockito.mock(ReportPublisher.class); + ReportManager.Builder builder = ReportManager.newBuilder(conf); + builder.setStateContext(dummyContext); + builder.addPublisher(dummyPublisher); + ReportManager reportManager = builder.build(); + reportManager.init(); + verify(dummyPublisher, times(1)).init(eq(dummyContext), + any(ScheduledExecutorService.class)); + + } +} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java new file mode 100644 index 00000000000..067c5624f63 --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java @@ -0,0 +1,106 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.container.common.report; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.google.protobuf.GeneratedMessage; +import org.apache.hadoop.ozone.container.common.statemachine.StateContext; +import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.junit.Assert; +import org.junit.Test; +import org.mockito.Mockito; + +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +/** + * Test cases to test {@link ReportPublisher}. + */ +public class TestReportPublisher { + + /** + * Dummy report publisher for testing. + */ + private class DummyReportPublisher extends ReportPublisher { + + private final long frequency; + private int getReportCount = 0; + + DummyReportPublisher(long frequency) { + this.frequency = frequency; + } + + @Override + protected long getReportFrequency() { + return frequency; + } + + @Override + protected GeneratedMessage getReport() { + getReportCount++; + return null; + } + } + + @Test + public void testReportPublisherInit() { + ReportPublisher publisher = new DummyReportPublisher(0); + StateContext dummyContext = Mockito.mock(StateContext.class); + ScheduledExecutorService dummyExecutorService = Mockito.mock( + ScheduledExecutorService.class); + publisher.init(dummyContext, dummyExecutorService); + verify(dummyExecutorService, times(1)).schedule(publisher, + 0, TimeUnit.MILLISECONDS); + } + + @Test + public void testScheduledReport() throws InterruptedException { + ReportPublisher publisher = new DummyReportPublisher(100); + StateContext dummyContext = Mockito.mock(StateContext.class); + ScheduledExecutorService executorService = HadoopExecutors + .newScheduledThreadPool(1, + new ThreadFactoryBuilder().setDaemon(true) + .setNameFormat("Unit test ReportManager Thread - %d").build()); + publisher.init(dummyContext, executorService); + Thread.sleep(150); + Assert.assertEquals(1, ((DummyReportPublisher)publisher).getReportCount); + Thread.sleep(150); + Assert.assertEquals(2, ((DummyReportPublisher)publisher).getReportCount); + executorService.shutdown(); + } + + @Test + public void testPublishReport() throws InterruptedException { + ReportPublisher publisher = new DummyReportPublisher(100); + StateContext dummyContext = Mockito.mock(StateContext.class); + ScheduledExecutorService executorService = HadoopExecutors + .newScheduledThreadPool(1, + new ThreadFactoryBuilder().setDaemon(true) + .setNameFormat("Unit test ReportManager Thread - %d").build()); + publisher.init(dummyContext, executorService); + Thread.sleep(150); + executorService.shutdown(); + Assert.assertEquals(1, ((DummyReportPublisher)publisher).getReportCount); + verify(dummyContext, times(1)).addReport(null); + + } + +} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java new file mode 100644 index 00000000000..f8c5fe5e275 --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java @@ -0,0 +1,68 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.container.common.report; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.NodeReportProto; +import org.junit.Assert; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +/** + * Test cases to test ReportPublisherFactory. + */ +public class TestReportPublisherFactory { + + @Rule + public ExpectedException exception = ExpectedException.none(); + + @Test + public void testGetContainerReportPublisher() { + Configuration conf = new OzoneConfiguration(); + ReportPublisherFactory factory = new ReportPublisherFactory(conf); + ReportPublisher publisher = factory + .getPublisherFor(ContainerReportsProto.class); + Assert.assertEquals(ContainerReportPublisher.class, publisher.getClass()); + Assert.assertEquals(conf, publisher.getConf()); + } + + @Test + public void testGetNodeReportPublisher() { + Configuration conf = new OzoneConfiguration(); + ReportPublisherFactory factory = new ReportPublisherFactory(conf); + ReportPublisher publisher = factory + .getPublisherFor(NodeReportProto.class); + Assert.assertEquals(NodeReportPublisher.class, publisher.getClass()); + Assert.assertEquals(conf, publisher.getConf()); + } + + @Test + public void testInvalidReportPublisher() { + Configuration conf = new OzoneConfiguration(); + ReportPublisherFactory factory = new ReportPublisherFactory(conf); + exception.expect(RuntimeException.class); + exception.expectMessage("No publisher found for report"); + factory.getPublisherFor(HddsProtos.DatanodeDetailsProto.class); + } +} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/package-info.java new file mode 100644 index 00000000000..37615bc7536 --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/package-info.java @@ -0,0 +1,22 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.container.common.report; +/** + * This package has test cases for all the report publishers which generates + * reports that are sent to SCM via heartbeat. + */ \ No newline at end of file From 1b0d4f4606adc78a5e43a924634d3d8506db26fa Mon Sep 17 00:00:00 2001 From: Chen Liang Date: Tue, 5 Jun 2018 11:51:29 -0700 Subject: [PATCH 049/113] HDFS-13547. Add ingress port based sasl resolver. Contributed by Chen Liang. --- .../security/IngressPortBasedResolver.java | 100 ++++++++++++++++++ .../security/SaslPropertiesResolver.java | 47 +++++++- .../security/WhitelistBasedResolver.java | 20 +--- .../TestIngressPortBasedResolver.java | 59 +++++++++++ 4 files changed, 207 insertions(+), 19 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/IngressPortBasedResolver.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestIngressPortBasedResolver.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/IngressPortBasedResolver.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/IngressPortBasedResolver.java new file mode 100644 index 00000000000..a30e4a84dd8 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/IngressPortBasedResolver.java @@ -0,0 +1,100 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.security; + +import com.google.common.annotations.VisibleForTesting; +import java.net.InetAddress; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import org.apache.hadoop.conf.Configuration; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * An implementation of SaslPropertiesResolver. Used on server side, + * returns SASL properties based on the port the client is connecting + * to. This should be used along with server side enabling multiple ports + * TODO: when NN multiple listener is enabled, automatically use this + * resolver without having to set in config. + * + * For configuration, for example if server runs on two ports 9000 and 9001, + * and we want to specify 9000 to use auth-conf and 9001 to use auth. + * + * We need to set the following configuration properties: + * ingress.port.sasl.configured.ports=9000,9001 + * ingress.port.sasl.prop.9000=privacy + * ingress.port.sasl.prop.9001=authentication + * + * One note is that, if there is misconfiguration that a port, say, 9002 is + * given in ingress.port.sasl.configured.ports, but it's sasl prop is not + * set, a default of QOP of privacy (auth-conf) will be used. In addition, + * if a port is not given even in ingress.port.sasl.configured.ports, but + * is being checked in getServerProperties(), the default SASL prop will + * be returned. Both of these two cases are considered misconfiguration. + */ +public class IngressPortBasedResolver extends SaslPropertiesResolver { + + public static final Logger LOG = + LoggerFactory.getLogger(IngressPortBasedResolver.class.getName()); + + static final String INGRESS_PORT_SASL_PROP_PREFIX = "ingress.port.sasl.prop"; + + static final String INGRESS_PORT_SASL_CONFIGURED_PORTS = + "ingress.port.sasl.configured.ports"; + + // no need to concurrent map, because after setConf() it never change, + // only for read. + private HashMap> portPropMapping; + + @Override + public void setConf(Configuration conf) { + super.setConf(conf); + portPropMapping = new HashMap<>(); + Collection portStrings = + conf.getTrimmedStringCollection(INGRESS_PORT_SASL_CONFIGURED_PORTS); + for (String portString : portStrings) { + int port = Integer.parseInt(portString); + String configKey = INGRESS_PORT_SASL_PROP_PREFIX + "." + portString; + Map props = getSaslProperties(conf, configKey, + SaslRpcServer.QualityOfProtection.PRIVACY); + portPropMapping.put(port, props); + } + LOG.debug("Configured with port to QOP mapping as:" + portPropMapping); + } + + /** + * Identify the Sasl Properties to be used for a connection with a client. + * @param clientAddress client's address + * @param ingressPort the port that the client is connecting + * @return the sasl properties to be used for the connection. + */ + @Override + @VisibleForTesting + public Map getServerProperties(InetAddress clientAddress, + int ingressPort) { + LOG.debug("Resolving SASL properties for " + clientAddress + " " + + ingressPort); + if (!portPropMapping.containsKey(ingressPort)) { + LOG.warn("An un-configured port is being requested " + ingressPort + + " using default"); + return getDefaultProperties(); + } + return portPropMapping.get(ingressPort); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPropertiesResolver.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPropertiesResolver.java index 305443cea88..64b86e3f274 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPropertiesResolver.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPropertiesResolver.java @@ -18,7 +18,6 @@ package org.apache.hadoop.security; import java.net.InetAddress; -import java.util.Locale; import java.util.Map; import java.util.TreeMap; @@ -95,6 +94,17 @@ public class SaslPropertiesResolver implements Configurable{ return properties; } + /** + * Identify the Sasl Properties to be used for a connection with a client. + * @param clientAddress client's address + * @param ingressPort the port that the client is connecting + * @return the sasl properties to be used for the connection. + */ + public Map getServerProperties(InetAddress clientAddress, + int ingressPort){ + return properties; + } + /** * Identify the Sasl Properties to be used for a connection with a server. * @param serverAddress server's address @@ -103,4 +113,39 @@ public class SaslPropertiesResolver implements Configurable{ public Map getClientProperties(InetAddress serverAddress){ return properties; } + + /** + * Identify the Sasl Properties to be used for a connection with a server. + * @param serverAddress server's address + * @param ingressPort the port that is used to connect to server + * @return the sasl properties to be used for the connection. + */ + public Map getClientProperties(InetAddress serverAddress, + int ingressPort) { + return properties; + } + + /** + * A util function to retrieve specific additional sasl property from config. + * Used by subclasses to read sasl properties used by themselves. + * @param conf the configuration + * @param configKey the config key to look for + * @param defaultQOP the default QOP if the key is missing + * @return sasl property associated with the given key + */ + static Map getSaslProperties(Configuration conf, + String configKey, QualityOfProtection defaultQOP) { + Map saslProps = new TreeMap<>(); + String[] qop = conf.getStrings(configKey, defaultQOP.toString()); + + for (int i=0; i < qop.length; i++) { + qop[i] = QualityOfProtection.valueOf( + StringUtils.toUpperCase(qop[i])).getSaslQop(); + } + + saslProps.put(Sasl.QOP, StringUtils.join(",", qop)); + saslProps.put(Sasl.SERVER_AUTH, "true"); + + return saslProps; + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/WhitelistBasedResolver.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/WhitelistBasedResolver.java index a64c4de7b66..5964886b9a6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/WhitelistBasedResolver.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/WhitelistBasedResolver.java @@ -20,15 +20,10 @@ package org.apache.hadoop.security; import java.net.InetAddress; import java.net.UnknownHostException; import java.util.Map; -import java.util.TreeMap; - -import javax.security.sasl.Sasl; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.security.SaslPropertiesResolver; import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection; import org.apache.hadoop.util.CombinedIPWhiteList; -import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -134,18 +129,7 @@ public class WhitelistBasedResolver extends SaslPropertiesResolver { } static Map getSaslProperties(Configuration conf) { - Map saslProps =new TreeMap(); - String[] qop = conf.getStrings(HADOOP_RPC_PROTECTION_NON_WHITELIST, - QualityOfProtection.PRIVACY.toString()); - - for (int i=0; i < qop.length; i++) { - qop[i] = QualityOfProtection.valueOf( - StringUtils.toUpperCase(qop[i])).getSaslQop(); - } - - saslProps.put(Sasl.QOP, StringUtils.join(",", qop)); - saslProps.put(Sasl.SERVER_AUTH, "true"); - - return saslProps; + return getSaslProperties(conf, HADOOP_RPC_PROTECTION_NON_WHITELIST, + QualityOfProtection.PRIVACY); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestIngressPortBasedResolver.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestIngressPortBasedResolver.java new file mode 100644 index 00000000000..96c80af15f3 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestIngressPortBasedResolver.java @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.security; + +import javax.security.sasl.Sasl; +import org.apache.hadoop.conf.Configuration; +import org.junit.Test; + +import static org.junit.Assert.*; + + +/** + * Test class for IngressPortBasedResolver. + */ +public class TestIngressPortBasedResolver { + + /** + * A simple test to test that for the configured ports, the resolver + * can return the current SASL properties. + */ + @Test + public void testResolver() { + Configuration conf = new Configuration(); + conf.set("ingress.port.sasl.configured.ports", "444,555,666,777"); + conf.set("ingress.port.sasl.prop.444", "authentication"); + conf.set("ingress.port.sasl.prop.555", "authentication,privacy"); + conf.set("ingress.port.sasl.prop.666", "privacy"); + + IngressPortBasedResolver resolver = new IngressPortBasedResolver(); + resolver.setConf(conf); + + // the client address does not matter, give it a null + assertEquals("auth", + resolver.getServerProperties(null, 444).get(Sasl.QOP)); + assertEquals("auth,auth-conf", + resolver.getServerProperties(null, 555).get(Sasl.QOP)); + assertEquals("auth-conf", + resolver.getServerProperties(null, 666).get(Sasl.QOP)); + assertEquals("auth-conf", + resolver.getServerProperties(null, 777).get(Sasl.QOP)); + assertEquals("auth", + resolver.getServerProperties(null, 888).get(Sasl.QOP)); + } +} From 0afc036deb35df7e86ede3dcebc430c8f05ed368 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Tue, 5 Jun 2018 12:24:57 -0700 Subject: [PATCH 050/113] MAPREDUCE-7103. Fix TestHistoryViewerPrinter on windows due to a mismatch line separator. Contributed by Giovanni Matteo Fumarola. --- .../jobhistory/TestHistoryViewerPrinter.java | 378 ++++++++++++------ 1 file changed, 255 insertions(+), 123 deletions(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java index 3601ea762a3..f0f713a9821 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java @@ -41,6 +41,8 @@ public class TestHistoryViewerPrinter { private static final Logger LOG = LoggerFactory.getLogger(TestHistoryViewerPrinter.class); + private final String LINE_SEPARATOR = System.lineSeparator(); + @Test public void testHumanPrinter() throws Exception { JobHistoryParser.JobInfo job = createJobInfo(); @@ -61,12 +63,16 @@ public class TestHistoryViewerPrinter { "Counters: \n" + "\n" + "|Group Name |Counter name |Map Value |Reduce Value|Total Value|\n" + - "---------------------------------------------------------------------------------------\n" + - "|group1 |counter1 |5 |5 |5 \n" + - "|group1 |counter2 |10 |10 |10 \n" + - "|group2 |counter1 |15 |15 |15 \n" + - "\n" + - "=====================================\n" + + "---------------------------------------------------------------------------------------" + + LINE_SEPARATOR + + "|group1 |counter1 |5 |5 |5 " + + LINE_SEPARATOR + + "|group1 |counter2 |10 |10 |10 " + + LINE_SEPARATOR + + "|group2 |counter1 |15 |15 |15 " + + "\n\n" + + "=====================================" + + LINE_SEPARATOR + "\n" + "Task Summary\n" + "============================\n" + @@ -77,46 +83,66 @@ public class TestHistoryViewerPrinter { "Reduce\t1\t1\t\t0\t0\t6-Oct-2011 19:15:10\t6-Oct-2011 19:15:18 (8sec)\n" + "Cleanup\t1\t1\t\t0\t0\t6-Oct-2011 19:15:11\t6-Oct-2011 19:15:20 (9sec)\n" + "============================\n" + + LINE_SEPARATOR + "\n" + - "\n" + - "Analysis\n" + - "=========\n" + + "Analysis" + + LINE_SEPARATOR + + "=========" + + LINE_SEPARATOR + "\n" + "Time taken by best performing map task task_1317928501754_0001_m_000003: 3sec\n" + "Average time taken by map tasks: 5sec\n" + "Worse performing map tasks: \n" + - "TaskId\t\tTimetaken\n" + - "task_1317928501754_0001_m_000007 7sec\n" + - "task_1317928501754_0001_m_000006 6sec\n" + - "task_1317928501754_0001_m_000005 5sec\n" + - "task_1317928501754_0001_m_000004 4sec\n" + - "task_1317928501754_0001_m_000003 3sec\n" + - "The last map task task_1317928501754_0001_m_000007 finished at (relative to the Job launch time): 6-Oct-2011 19:15:16 (14sec)\n" + + "TaskId\t\tTimetaken" + + LINE_SEPARATOR + + "task_1317928501754_0001_m_000007 7sec" + + LINE_SEPARATOR + + "task_1317928501754_0001_m_000006 6sec" + + LINE_SEPARATOR + + "task_1317928501754_0001_m_000005 5sec" + + LINE_SEPARATOR + + "task_1317928501754_0001_m_000004 4sec" + + LINE_SEPARATOR + + "task_1317928501754_0001_m_000003 3sec" + + LINE_SEPARATOR + + "The last map task task_1317928501754_0001_m_000007 finished at (relative to the Job launch time): 6-Oct-2011 19:15:16 (14sec)" + + LINE_SEPARATOR + "\n" + "Time taken by best performing shuffle task task_1317928501754_0001_r_000008: 8sec\n" + "Average time taken by shuffle tasks: 8sec\n" + "Worse performing shuffle tasks: \n" + - "TaskId\t\tTimetaken\n" + - "task_1317928501754_0001_r_000008 8sec\n" + - "The last shuffle task task_1317928501754_0001_r_000008 finished at (relative to the Job launch time): 6-Oct-2011 19:15:18 (16sec)\n" + + "TaskId\t\tTimetaken" + + LINE_SEPARATOR + + "task_1317928501754_0001_r_000008 8sec" + + LINE_SEPARATOR + + "The last shuffle task task_1317928501754_0001_r_000008 finished at (relative to the Job launch time): 6-Oct-2011 19:15:18 (16sec)" + + LINE_SEPARATOR + "\n" + "Time taken by best performing reduce task task_1317928501754_0001_r_000008: 0sec\n" + "Average time taken by reduce tasks: 0sec\n" + "Worse performing reduce tasks: \n" + - "TaskId\t\tTimetaken\n" + - "task_1317928501754_0001_r_000008 0sec\n" + - "The last reduce task task_1317928501754_0001_r_000008 finished at (relative to the Job launch time): 6-Oct-2011 19:15:18 (16sec)\n" + - "=========\n" + + "TaskId\t\tTimetaken"+ + LINE_SEPARATOR + + "task_1317928501754_0001_r_000008 0sec" + + LINE_SEPARATOR + + "The last reduce task task_1317928501754_0001_r_000008 finished at (relative to the Job launch time): 6-Oct-2011 19:15:18 (16sec)" + + LINE_SEPARATOR + + "=========" + + LINE_SEPARATOR + "\n" + "FAILED MAP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\tInputSplits\n" + - "====================================================\n" + - "task_1317928501754_0001_m_000002\t6-Oct-2011 19:15:04\t6-Oct-2011 19:15:06 (2sec)\t\t\n" + + "====================================================" + + LINE_SEPARATOR + + "task_1317928501754_0001_m_000002\t6-Oct-2011 19:15:04\t6-Oct-2011 19:15:06 (2sec)\t\t" + + LINE_SEPARATOR + "\n" + "FAILED task attempts by nodes\n" + "Hostname\tFailedTasks\n" + - "===============================\n" + - "localhost\ttask_1317928501754_0001_m_000002, \n", outStr); + "===============================" + + LINE_SEPARATOR + + "localhost\ttask_1317928501754_0001_m_000002, " + + LINE_SEPARATOR, outStr); } @Test @@ -140,12 +166,16 @@ public class TestHistoryViewerPrinter { "Counters: \n" + "\n" + "|Group Name |Counter name |Map Value |Reduce Value|Total Value|\n" + - "---------------------------------------------------------------------------------------\n" + - "|group1 |counter1 |5 |5 |5 \n" + - "|group1 |counter2 |10 |10 |10 \n" + + "---------------------------------------------------------------------------------------" + + LINE_SEPARATOR + + "|group1 |counter1 |5 |5 |5 " + + LINE_SEPARATOR + + "|group1 |counter2 |10 |10 |10 " + + LINE_SEPARATOR + "|group2 |counter1 |15 |15 |15 \n" + "\n" + - "=====================================\n" + + "=====================================" + + LINE_SEPARATOR + "\n" + "Task Summary\n" + "============================\n" + @@ -156,111 +186,160 @@ public class TestHistoryViewerPrinter { "Reduce\t1\t1\t\t0\t0\t6-Oct-2011 19:15:10\t6-Oct-2011 19:15:18 (8sec)\n" + "Cleanup\t1\t1\t\t0\t0\t6-Oct-2011 19:15:11\t6-Oct-2011 19:15:20 (9sec)\n" + "============================\n" + + LINE_SEPARATOR + "\n" + - "\n" + - "Analysis\n" + - "=========\n" + + "Analysis" + + LINE_SEPARATOR + + "=========" + + LINE_SEPARATOR + "\n" + "Time taken by best performing map task task_1317928501754_0001_m_000003: 3sec\n" + "Average time taken by map tasks: 5sec\n" + "Worse performing map tasks: \n" + - "TaskId\t\tTimetaken\n" + - "task_1317928501754_0001_m_000007 7sec\n" + - "task_1317928501754_0001_m_000006 6sec\n" + - "task_1317928501754_0001_m_000005 5sec\n" + - "task_1317928501754_0001_m_000004 4sec\n" + - "task_1317928501754_0001_m_000003 3sec\n" + - "The last map task task_1317928501754_0001_m_000007 finished at (relative to the Job launch time): 6-Oct-2011 19:15:16 (14sec)\n" + + "TaskId\t\tTimetaken" + + LINE_SEPARATOR + + "task_1317928501754_0001_m_000007 7sec" + + LINE_SEPARATOR + + "task_1317928501754_0001_m_000006 6sec" + + LINE_SEPARATOR + + "task_1317928501754_0001_m_000005 5sec" + + LINE_SEPARATOR + + "task_1317928501754_0001_m_000004 4sec" + + LINE_SEPARATOR + + "task_1317928501754_0001_m_000003 3sec" + + LINE_SEPARATOR + + "The last map task task_1317928501754_0001_m_000007 finished at (relative to the Job launch time): 6-Oct-2011 19:15:16 (14sec)" + + LINE_SEPARATOR + "\n" + "Time taken by best performing shuffle task task_1317928501754_0001_r_000008: 8sec\n" + "Average time taken by shuffle tasks: 8sec\n" + "Worse performing shuffle tasks: \n" + - "TaskId\t\tTimetaken\n" + - "task_1317928501754_0001_r_000008 8sec\n" + - "The last shuffle task task_1317928501754_0001_r_000008 finished at (relative to the Job launch time): 6-Oct-2011 19:15:18 (16sec)\n" + + "TaskId\t\tTimetaken" + + LINE_SEPARATOR + + "task_1317928501754_0001_r_000008 8sec" + + LINE_SEPARATOR + + "The last shuffle task task_1317928501754_0001_r_000008 finished at (relative to the Job launch time): 6-Oct-2011 19:15:18 (16sec)" + + LINE_SEPARATOR + "\n" + "Time taken by best performing reduce task task_1317928501754_0001_r_000008: 0sec\n" + "Average time taken by reduce tasks: 0sec\n" + "Worse performing reduce tasks: \n" + - "TaskId\t\tTimetaken\n" + - "task_1317928501754_0001_r_000008 0sec\n" + - "The last reduce task task_1317928501754_0001_r_000008 finished at (relative to the Job launch time): 6-Oct-2011 19:15:18 (16sec)\n" + - "=========\n" + + "TaskId\t\tTimetaken" + + LINE_SEPARATOR + + "task_1317928501754_0001_r_000008 0sec" + + LINE_SEPARATOR + + "The last reduce task task_1317928501754_0001_r_000008 finished at (relative to the Job launch time): 6-Oct-2011 19:15:18 (16sec)" + + LINE_SEPARATOR + + "=========" + + LINE_SEPARATOR + "\n" + "FAILED MAP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\tInputSplits\n" + - "====================================================\n" + - "task_1317928501754_0001_m_000002\t6-Oct-2011 19:15:04\t6-Oct-2011 19:15:06 (2sec)\t\t\n" + + "====================================================" + + LINE_SEPARATOR + + "task_1317928501754_0001_m_000002\t6-Oct-2011 19:15:04\t6-Oct-2011 19:15:06 (2sec)\t\t" + + LINE_SEPARATOR + "\n" + "SUCCEEDED JOB_SETUP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\n" + - "====================================================\n" + - "task_1317928501754_0001_s_000001\t6-Oct-2011 19:15:03\t6-Oct-2011 19:15:04 (1sec)\t\n" + + "====================================================" + + LINE_SEPARATOR + + "task_1317928501754_0001_s_000001\t6-Oct-2011 19:15:03\t6-Oct-2011 19:15:04 (1sec)\t" + + LINE_SEPARATOR + "\n" + "SUCCEEDED MAP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\tInputSplits\n" + - "====================================================\n" + + "====================================================" + + LINE_SEPARATOR + "task_1317928501754_0001_m_000006\t6-Oct-2011 19:15:08\t6-Oct-2011 19:15:14 (6sec)\t\t\n" + + LINE_SEPARATOR + "\n" + "SUCCEEDED MAP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\tInputSplits\n" + - "====================================================\n" + + "====================================================" + + LINE_SEPARATOR + "task_1317928501754_0001_m_000005\t6-Oct-2011 19:15:07\t6-Oct-2011 19:15:12 (5sec)\t\t\n" + + LINE_SEPARATOR + "\n" + "SUCCEEDED MAP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\tInputSplits\n" + - "====================================================\n" + + "====================================================" + + LINE_SEPARATOR + "task_1317928501754_0001_m_000004\t6-Oct-2011 19:15:06\t6-Oct-2011 19:15:10 (4sec)\t\t\n" + + LINE_SEPARATOR + "\n" + "SUCCEEDED MAP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\tInputSplits\n" + - "====================================================\n" + + "====================================================" + + LINE_SEPARATOR + "task_1317928501754_0001_m_000003\t6-Oct-2011 19:15:05\t6-Oct-2011 19:15:08 (3sec)\t\t\n" + + LINE_SEPARATOR + "\n" + "SUCCEEDED MAP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\tInputSplits\n" + - "====================================================\n" + + "====================================================" + + LINE_SEPARATOR + "task_1317928501754_0001_m_000007\t6-Oct-2011 19:15:09\t6-Oct-2011 19:15:16 (7sec)\t\t\n" + + LINE_SEPARATOR + "\n" + "SUCCEEDED REDUCE task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\n" + - "====================================================\n" + - "task_1317928501754_0001_r_000008\t6-Oct-2011 19:15:10\t6-Oct-2011 19:15:18 (8sec)\t\n" + + "====================================================" + + LINE_SEPARATOR + + "task_1317928501754_0001_r_000008\t6-Oct-2011 19:15:10\t6-Oct-2011 19:15:18 (8sec)\t" + + LINE_SEPARATOR + "\n" + "SUCCEEDED JOB_CLEANUP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\n" + - "====================================================\n" + - "task_1317928501754_0001_c_000009\t6-Oct-2011 19:15:11\t6-Oct-2011 19:15:20 (9sec)\t\n" + + "====================================================" + + LINE_SEPARATOR + + "task_1317928501754_0001_c_000009\t6-Oct-2011 19:15:11\t6-Oct-2011 19:15:20 (9sec)\t" + + LINE_SEPARATOR + "\n" + "JOB_SETUP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tHostName\tError\tTaskLogs\n" + - "====================================================\n" + - "attempt_1317928501754_0001_s_000001_1\t6-Oct-2011 19:15:03\t6-Oct-2011 19:15:04 (1sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_s_000001_1\n" + + "====================================================" + + LINE_SEPARATOR + + "attempt_1317928501754_0001_s_000001_1\t6-Oct-2011 19:15:03\t6-Oct-2011 19:15:04 (1sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_s_000001_1" + + LINE_SEPARATOR + "\n" + "MAP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tHostName\tError\tTaskLogs\n" + "====================================================\n" + + LINE_SEPARATOR + "attempt_1317928501754_0001_m_000002_1\t6-Oct-2011 19:15:04\t6-Oct-2011 19:15:06 (2sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000002_1\n" + + LINE_SEPARATOR + "attempt_1317928501754_0001_m_000006_1\t6-Oct-2011 19:15:08\t6-Oct-2011 19:15:14 (6sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000006_1\n" + + LINE_SEPARATOR + "attempt_1317928501754_0001_m_000005_1\t6-Oct-2011 19:15:07\t6-Oct-2011 19:15:12 (5sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000005_1\n" + + LINE_SEPARATOR + "attempt_1317928501754_0001_m_000004_1\t6-Oct-2011 19:15:06\t6-Oct-2011 19:15:10 (4sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000004_1\n" + + LINE_SEPARATOR + "attempt_1317928501754_0001_m_000003_1\t6-Oct-2011 19:15:05\t6-Oct-2011 19:15:08 (3sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000003_1\n" + + LINE_SEPARATOR + "attempt_1317928501754_0001_m_000007_1\t6-Oct-2011 19:15:09\t6-Oct-2011 19:15:16 (7sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000007_1\n" + + LINE_SEPARATOR + "\n" + "REDUCE task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tShuffleFinished\tSortFinished\tFinishTime\tHostName\tError\tTaskLogs\n" + - "====================================================\n" + - "attempt_1317928501754_0001_r_000008_1\t6-Oct-2011 19:15:10\t6-Oct-2011 19:15:18 (8sec)\t6-Oct-2011 19:15:18 (0sec)6-Oct-2011 19:15:18 (8sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_r_000008_1\n" + + "====================================================" + + LINE_SEPARATOR + + "attempt_1317928501754_0001_r_000008_1\t6-Oct-2011 19:15:10\t6-Oct-2011 19:15:18 (8sec)\t6-Oct-2011 19:15:18 (0sec)6-Oct-2011 19:15:18 (8sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_r_000008_1" + + LINE_SEPARATOR + "\n" + "JOB_CLEANUP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tHostName\tError\tTaskLogs\n" + - "====================================================\n" + - "attempt_1317928501754_0001_c_000009_1\t6-Oct-2011 19:15:11\t6-Oct-2011 19:15:20 (9sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_c_000009_1\n" + + "====================================================" + + LINE_SEPARATOR + + "attempt_1317928501754_0001_c_000009_1\t6-Oct-2011 19:15:11\t6-Oct-2011 19:15:20 (9sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_c_000009_1" + + LINE_SEPARATOR + "\n" + "FAILED task attempts by nodes\n" + "Hostname\tFailedTasks\n" + - "===============================\n" + - "localhost\ttask_1317928501754_0001_m_000002, \n", outStr); + "===============================" + + LINE_SEPARATOR + + "localhost\ttask_1317928501754_0001_m_000002, " + + LINE_SEPARATOR, outStr); } else { Assert.assertEquals("\n" + "Hadoop job: job_1317928501754_0001\n" + @@ -275,12 +354,16 @@ public class TestHistoryViewerPrinter { "Counters: \n" + "\n" + "|Group Name |Counter name |Map Value |Reduce Value|Total Value|\n" + - "---------------------------------------------------------------------------------------\n" + - "|group1 |counter1 |5 |5 |5 \n" + - "|group1 |counter2 |10 |10 |10 \n" + + "---------------------------------------------------------------------------------------" + + LINE_SEPARATOR + + "|group1 |counter1 |5 |5 |5 " + + LINE_SEPARATOR + + "|group1 |counter2 |10 |10 |10 " + + LINE_SEPARATOR + "|group2 |counter1 |15 |15 |15 \n" + "\n" + - "=====================================\n" + + "=====================================" + + LINE_SEPARATOR + "\n" + "Task Summary\n" + "============================\n" + @@ -291,111 +374,160 @@ public class TestHistoryViewerPrinter { "Reduce\t1\t1\t\t0\t0\t6-Oct-2011 19:15:10\t6-Oct-2011 19:15:18 (8sec)\n" + "Cleanup\t1\t1\t\t0\t0\t6-Oct-2011 19:15:11\t6-Oct-2011 19:15:20 (9sec)\n" + "============================\n" + + LINE_SEPARATOR + "\n" + - "\n" + - "Analysis\n" + - "=========\n" + + "Analysis" + + LINE_SEPARATOR + + "=========" + + LINE_SEPARATOR + "\n" + "Time taken by best performing map task task_1317928501754_0001_m_000003: 3sec\n" + "Average time taken by map tasks: 5sec\n" + "Worse performing map tasks: \n" + - "TaskId\t\tTimetaken\n" + - "task_1317928501754_0001_m_000007 7sec\n" + - "task_1317928501754_0001_m_000006 6sec\n" + - "task_1317928501754_0001_m_000005 5sec\n" + - "task_1317928501754_0001_m_000004 4sec\n" + - "task_1317928501754_0001_m_000003 3sec\n" + - "The last map task task_1317928501754_0001_m_000007 finished at (relative to the Job launch time): 6-Oct-2011 19:15:16 (14sec)\n" + + "TaskId\t\tTimetaken" + + LINE_SEPARATOR + + "task_1317928501754_0001_m_000007 7sec" + + LINE_SEPARATOR + + "task_1317928501754_0001_m_000006 6sec" + + LINE_SEPARATOR + + "task_1317928501754_0001_m_000005 5sec" + + LINE_SEPARATOR + + "task_1317928501754_0001_m_000004 4sec" + + LINE_SEPARATOR + + "task_1317928501754_0001_m_000003 3sec" + + LINE_SEPARATOR + + "The last map task task_1317928501754_0001_m_000007 finished at (relative to the Job launch time): 6-Oct-2011 19:15:16 (14sec)" + + LINE_SEPARATOR + "\n" + "Time taken by best performing shuffle task task_1317928501754_0001_r_000008: 8sec\n" + "Average time taken by shuffle tasks: 8sec\n" + "Worse performing shuffle tasks: \n" + - "TaskId\t\tTimetaken\n" + - "task_1317928501754_0001_r_000008 8sec\n" + - "The last shuffle task task_1317928501754_0001_r_000008 finished at (relative to the Job launch time): 6-Oct-2011 19:15:18 (16sec)\n" + + "TaskId\t\tTimetaken" + + LINE_SEPARATOR + + "task_1317928501754_0001_r_000008 8sec" + + LINE_SEPARATOR + + "The last shuffle task task_1317928501754_0001_r_000008 finished at (relative to the Job launch time): 6-Oct-2011 19:15:18 (16sec)" + + LINE_SEPARATOR + "\n" + "Time taken by best performing reduce task task_1317928501754_0001_r_000008: 0sec\n" + "Average time taken by reduce tasks: 0sec\n" + "Worse performing reduce tasks: \n" + - "TaskId\t\tTimetaken\n" + - "task_1317928501754_0001_r_000008 0sec\n" + - "The last reduce task task_1317928501754_0001_r_000008 finished at (relative to the Job launch time): 6-Oct-2011 19:15:18 (16sec)\n" + - "=========\n" + + "TaskId\t\tTimetaken" + + LINE_SEPARATOR + + "task_1317928501754_0001_r_000008 0sec" + + LINE_SEPARATOR + + "The last reduce task task_1317928501754_0001_r_000008 finished at (relative to the Job launch time): 6-Oct-2011 19:15:18 (16sec)" + + LINE_SEPARATOR + + "=========" + + LINE_SEPARATOR + "\n" + "FAILED MAP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\tInputSplits\n" + - "====================================================\n" + - "task_1317928501754_0001_m_000002\t6-Oct-2011 19:15:04\t6-Oct-2011 19:15:06 (2sec)\t\t\n" + + "====================================================" + + LINE_SEPARATOR + + "task_1317928501754_0001_m_000002\t6-Oct-2011 19:15:04\t6-Oct-2011 19:15:06 (2sec)\t\t" + + LINE_SEPARATOR + "\n" + "SUCCEEDED JOB_SETUP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\n" + - "====================================================\n" + - "task_1317928501754_0001_s_000001\t6-Oct-2011 19:15:03\t6-Oct-2011 19:15:04 (1sec)\t\n" + + "====================================================" + + LINE_SEPARATOR + + "task_1317928501754_0001_s_000001\t6-Oct-2011 19:15:03\t6-Oct-2011 19:15:04 (1sec)\t" + + LINE_SEPARATOR + "\n" + "SUCCEEDED MAP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\tInputSplits\n" + - "====================================================\n" + - "task_1317928501754_0001_m_000007\t6-Oct-2011 19:15:09\t6-Oct-2011 19:15:16 (7sec)\t\t\n" + + "====================================================" + + LINE_SEPARATOR + + "task_1317928501754_0001_m_000007\t6-Oct-2011 19:15:09\t6-Oct-2011 19:15:16 (7sec)\t\t" + + LINE_SEPARATOR + "\n" + "SUCCEEDED MAP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\tInputSplits\n" + - "====================================================\n" + - "task_1317928501754_0001_m_000006\t6-Oct-2011 19:15:08\t6-Oct-2011 19:15:14 (6sec)\t\t\n" + + "====================================================" + + LINE_SEPARATOR + + "task_1317928501754_0001_m_000006\t6-Oct-2011 19:15:08\t6-Oct-2011 19:15:14 (6sec)\t\t" + + LINE_SEPARATOR + "\n" + "SUCCEEDED MAP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\tInputSplits\n" + - "====================================================\n" + - "task_1317928501754_0001_m_000005\t6-Oct-2011 19:15:07\t6-Oct-2011 19:15:12 (5sec)\t\t\n" + + "====================================================" + + LINE_SEPARATOR + + "task_1317928501754_0001_m_000005\t6-Oct-2011 19:15:07\t6-Oct-2011 19:15:12 (5sec)\t\t" + + LINE_SEPARATOR + "\n" + "SUCCEEDED MAP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\tInputSplits\n" + - "====================================================\n" + - "task_1317928501754_0001_m_000004\t6-Oct-2011 19:15:06\t6-Oct-2011 19:15:10 (4sec)\t\t\n" + + "====================================================" + + LINE_SEPARATOR + + "task_1317928501754_0001_m_000004\t6-Oct-2011 19:15:06\t6-Oct-2011 19:15:10 (4sec)\t\t" + + LINE_SEPARATOR + "\n" + "SUCCEEDED MAP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\tInputSplits\n" + - "====================================================\n" + - "task_1317928501754_0001_m_000003\t6-Oct-2011 19:15:05\t6-Oct-2011 19:15:08 (3sec)\t\t\n" + + "====================================================" + + LINE_SEPARATOR + + "task_1317928501754_0001_m_000003\t6-Oct-2011 19:15:05\t6-Oct-2011 19:15:08 (3sec)\t\t" + + LINE_SEPARATOR + "\n" + "SUCCEEDED REDUCE task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\n" + - "====================================================\n" + - "task_1317928501754_0001_r_000008\t6-Oct-2011 19:15:10\t6-Oct-2011 19:15:18 (8sec)\t\n" + + "====================================================" + + LINE_SEPARATOR + + "task_1317928501754_0001_r_000008\t6-Oct-2011 19:15:10\t6-Oct-2011 19:15:18 (8sec)\t" + + LINE_SEPARATOR + "\n" + "SUCCEEDED JOB_CLEANUP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\n" + - "====================================================\n" + - "task_1317928501754_0001_c_000009\t6-Oct-2011 19:15:11\t6-Oct-2011 19:15:20 (9sec)\t\n" + + "====================================================" + + LINE_SEPARATOR + + "task_1317928501754_0001_c_000009\t6-Oct-2011 19:15:11\t6-Oct-2011 19:15:20 (9sec)\t" + + LINE_SEPARATOR + "\n" + "JOB_SETUP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tHostName\tError\tTaskLogs\n" + - "====================================================\n" + - "attempt_1317928501754_0001_s_000001_1\t6-Oct-2011 19:15:03\t6-Oct-2011 19:15:04 (1sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_s_000001_1\n" + + "====================================================" + + LINE_SEPARATOR + + "attempt_1317928501754_0001_s_000001_1\t6-Oct-2011 19:15:03\t6-Oct-2011 19:15:04 (1sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_s_000001_1" + + LINE_SEPARATOR + "\n" + "MAP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tHostName\tError\tTaskLogs\n" + - "====================================================\n" + - "attempt_1317928501754_0001_m_000007_1\t6-Oct-2011 19:15:09\t6-Oct-2011 19:15:16 (7sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000007_1\n" + - "attempt_1317928501754_0001_m_000002_1\t6-Oct-2011 19:15:04\t6-Oct-2011 19:15:06 (2sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000002_1\n" + - "attempt_1317928501754_0001_m_000006_1\t6-Oct-2011 19:15:08\t6-Oct-2011 19:15:14 (6sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000006_1\n" + - "attempt_1317928501754_0001_m_000005_1\t6-Oct-2011 19:15:07\t6-Oct-2011 19:15:12 (5sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000005_1\n" + - "attempt_1317928501754_0001_m_000004_1\t6-Oct-2011 19:15:06\t6-Oct-2011 19:15:10 (4sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000004_1\n" + - "attempt_1317928501754_0001_m_000003_1\t6-Oct-2011 19:15:05\t6-Oct-2011 19:15:08 (3sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000003_1\n" + + "====================================================" + + LINE_SEPARATOR + + "attempt_1317928501754_0001_m_000007_1\t6-Oct-2011 19:15:09\t6-Oct-2011 19:15:16 (7sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000007_1" + + LINE_SEPARATOR + + "attempt_1317928501754_0001_m_000002_1\t6-Oct-2011 19:15:04\t6-Oct-2011 19:15:06 (2sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000002_1" + + LINE_SEPARATOR + + "attempt_1317928501754_0001_m_000006_1\t6-Oct-2011 19:15:08\t6-Oct-2011 19:15:14 (6sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000006_1" + + LINE_SEPARATOR + + "attempt_1317928501754_0001_m_000005_1\t6-Oct-2011 19:15:07\t6-Oct-2011 19:15:12 (5sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000005_1" + + LINE_SEPARATOR + + "attempt_1317928501754_0001_m_000004_1\t6-Oct-2011 19:15:06\t6-Oct-2011 19:15:10 (4sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000004_1" + + LINE_SEPARATOR + + "attempt_1317928501754_0001_m_000003_1\t6-Oct-2011 19:15:05\t6-Oct-2011 19:15:08 (3sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000003_1" + + LINE_SEPARATOR + "\n" + "REDUCE task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tShuffleFinished\tSortFinished\tFinishTime\tHostName\tError\tTaskLogs\n" + - "====================================================\n" + - "attempt_1317928501754_0001_r_000008_1\t6-Oct-2011 19:15:10\t6-Oct-2011 19:15:18 (8sec)\t6-Oct-2011 19:15:18 (0sec)6-Oct-2011 19:15:18 (8sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_r_000008_1\n" + + "====================================================" + + LINE_SEPARATOR + + "attempt_1317928501754_0001_r_000008_1\t6-Oct-2011 19:15:10\t6-Oct-2011 19:15:18 (8sec)\t6-Oct-2011 19:15:18 (0sec)6-Oct-2011 19:15:18 (8sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_r_000008_1" + + LINE_SEPARATOR + "\n" + "JOB_CLEANUP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tHostName\tError\tTaskLogs\n" + - "====================================================\n" + - "attempt_1317928501754_0001_c_000009_1\t6-Oct-2011 19:15:11\t6-Oct-2011 19:15:20 (9sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_c_000009_1\n" + + "====================================================" + + LINE_SEPARATOR + + "attempt_1317928501754_0001_c_000009_1\t6-Oct-2011 19:15:11\t6-Oct-2011 19:15:20 (9sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_c_000009_1" + + LINE_SEPARATOR + "\n" + "FAILED task attempts by nodes\n" + "Hostname\tFailedTasks\n" + - "===============================\n" + - "localhost\ttask_1317928501754_0001_m_000002, \n", outStr); + "===============================" + + LINE_SEPARATOR + + "localhost\ttask_1317928501754_0001_m_000002, " + + LINE_SEPARATOR, outStr); } } From ba4011d64fadef3bee5920ccedbcdac01794cc23 Mon Sep 17 00:00:00 2001 From: Xiao Chen Date: Tue, 5 Jun 2018 21:17:42 -0700 Subject: [PATCH 051/113] HADOOP-15217. FsUrlConnection does not handle paths with spaces. Contributed by Joseph Fourny and Zsolt Venczel. --- .../org/apache/hadoop/fs/FsUrlConnection.java | 2 +- .../fs/TestUrlStreamHandlerFactory.java | 36 ++++++++++++++----- 2 files changed, 29 insertions(+), 9 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java index 03c7aeddd9c..e62c86ff214 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java @@ -57,7 +57,7 @@ class FsUrlConnection extends URLConnection { try { LOG.debug("Connecting to {}", url); FileSystem fs = FileSystem.get(url.toURI(), conf); - is = fs.open(new Path(url.getPath())); + is = fs.open(new Path(url.toURI())); } catch (URISyntaxException e) { throw new IOException(e.toString()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandlerFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandlerFactory.java index 910fee2b071..53cd557541a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandlerFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandlerFactory.java @@ -18,14 +18,19 @@ package org.apache.hadoop.fs; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.Timeout; +import java.io.File; +import java.io.IOException; +import java.net.URL; import java.util.ArrayList; import java.util.Random; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; /** * Test of the URL stream handler factory. @@ -35,7 +40,9 @@ public class TestUrlStreamHandlerFactory { private static final int RUNS = 20; private static final int THREADS = 10; private static final int TASKS = 200; - private static final int TIMEOUT = 30; + + @Rule + public Timeout globalTimeout = new Timeout(30000); @Test public void testConcurrency() throws Exception { @@ -62,12 +69,6 @@ public class TestUrlStreamHandlerFactory { } executor.shutdown(); - try { - executor.awaitTermination(TIMEOUT, TimeUnit.SECONDS); - executor.shutdownNow(); - } catch (InterruptedException e) { - // pass - } // check for exceptions for (Future future : futures) { @@ -77,4 +78,23 @@ public class TestUrlStreamHandlerFactory { future.get(); } } + + @Test + public void testFsUrlStreamHandlerFactory() throws IOException { + File myFile = new File(GenericTestUtils.getTestDir(), "foo bar.txt"); + myFile.createNewFile(); + + // Create URL directly from File (JRE builds it). + URL myUrl = myFile.toURI().toURL(); + + // Succeeds. + myUrl.openStream().close(); + + // Replace handling of file: scheme with FsUrlStreamHandler. + URL.setURLStreamHandlerFactory(new FsUrlStreamHandlerFactory()); + + URL myUrl2 = myFile.toURI().toURL(); + + myUrl2.openStream(); + } } From 774c1f199e11d886d0c0a1069325f0284da35deb Mon Sep 17 00:00:00 2001 From: Xiao Chen Date: Tue, 5 Jun 2018 21:24:25 -0700 Subject: [PATCH 052/113] HDFS-13511. Provide specialized exception when block length cannot be obtained. Contributed by Gabor Bota. --- .../CannotObtainBlockLengthException.java | 55 +++++++++++++++++++ .../apache/hadoop/hdfs/DFSInputStream.java | 2 +- 2 files changed, 56 insertions(+), 1 deletion(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/CannotObtainBlockLengthException.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/CannotObtainBlockLengthException.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/CannotObtainBlockLengthException.java new file mode 100644 index 00000000000..6da1d67b9ae --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/CannotObtainBlockLengthException.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; + +import java.io.IOException; + +/** + * This exception is thrown when the length of a LocatedBlock instance + * can not be obtained. + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public class CannotObtainBlockLengthException extends IOException { + private static final long serialVersionUID = 1L; + + public CannotObtainBlockLengthException() { + super(); + } + + public CannotObtainBlockLengthException(String message){ + super(message); + } + + /** + * Constructs an {@code CannotObtainBlockLengthException} with the + * specified LocatedBlock that failed to obtain block length. + * + * @param locatedBlock + * The LocatedBlock instance which block length can not be obtained + */ + public CannotObtainBlockLengthException(LocatedBlock locatedBlock) { + super("Cannot obtain block length for " + locatedBlock); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index 4d70fee0357..573b860fab1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -360,7 +360,7 @@ public class DFSInputStream extends FSInputStream return 0; } - throw new IOException("Cannot obtain block length for " + locatedblock); + throw new CannotObtainBlockLengthException(locatedblock); } public long getFileLength() { From d1992ab9b01d6b2881abb2faae3dcab95d6fd996 Mon Sep 17 00:00:00 2001 From: Sunil G Date: Wed, 6 Jun 2018 13:34:55 +0530 Subject: [PATCH 053/113] HADOOP-15514. NoClassDefFoundError for TimelineCollectorManager when starting MiniYARNCluster. Contributed by Rohith Sharma K S. --- .../hadoop-client-minicluster/pom.xml | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml b/hadoop-client-modules/hadoop-client-minicluster/pom.xml index ee3d050ff0e..b9363de5691 100644 --- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml +++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml @@ -83,13 +83,6 @@ hadoop-minicluster true - - - org.apache.hadoop - hadoop-yarn-server-timelineservice - @@ -149,10 +142,6 @@ org.apache.hadoop hadoop-yarn-common - - org.apache.zookeeper - zookeeper - org.fusesource.leveldbjni leveldbjni-all @@ -668,8 +657,6 @@ commons-logging:commons-logging junit:junit com.google.code.findbugs:jsr305 - - org.apache.hadoop:hadoop-yarn-server-timelineservice log4j:log4j From 30f0582f2b8bdf2e9368e84078f5a7aba033f9ff Mon Sep 17 00:00:00 2001 From: Anu Engineer Date: Wed, 6 Jun 2018 08:39:18 -0700 Subject: [PATCH 054/113] HDDS-107. TestOzoneConfigurationFields is failing. Contributed by LiXin Ge & Mukul Kumar Singh. --- .../java/org/apache/hadoop/hdds/conf/HddsConfServlet.java | 5 ++++- .../main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java | 2 ++ .../apache/hadoop/ozone/TestOzoneConfigurationFields.java | 1 + 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java index 521408b7016..677b752bd9c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java @@ -39,6 +39,8 @@ import com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TAGS_SYSTEM_KEY; + /** * A servlet to print out the running configuration data. */ @@ -154,7 +156,8 @@ public class HddsConfServlet extends HttpServlet { switch (cmd) { case "getOzoneTags": - out.write(gson.toJson(config.get("ozone.tags.system").split(","))); + out.write(gson.toJson(config.get(OZONE_TAGS_SYSTEM_KEY) + .split(","))); break; case "getPropertyByTag": String tags = request.getParameter("tags"); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index d1377becf96..856d088c792 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -31,6 +31,8 @@ import org.apache.ratis.util.TimeDuration; @InterfaceAudience.Public @InterfaceStability.Unstable public final class OzoneConfigKeys { + public static final String OZONE_TAGS_SYSTEM_KEY = + "ozone.tags.system"; public static final String DFS_CONTAINER_IPC_PORT = "dfs.container.ipc"; public static final int DFS_CONTAINER_IPC_PORT_DEFAULT = 9859; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java index 533a3b4a697..4898a1b6c27 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java @@ -34,5 +34,6 @@ public class TestOzoneConfigurationFields extends TestConfigurationFieldsBase { KSMConfigKeys.class}; errorIfMissingConfigProps = true; errorIfMissingXmlProps = true; + xmlPropsToSkipCompare.add("hadoop.tags.custom"); } } \ No newline at end of file From d901be679554eb6b323f3bc6e8de267d85dd2e06 Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Wed, 6 Jun 2018 18:28:14 +0100 Subject: [PATCH 055/113] HADOOP-15506. Upgrade Azure Storage Sdk version to 7.0.0 and update corresponding code blocks. Contributed by Esfandiar Manii. --- hadoop-project/pom.xml | 2 +- .../hadoop/fs/azure/NativeAzureFileSystemHelper.java | 4 +++- .../org/apache/hadoop/fs/azure/ITestContainerChecks.java | 9 ++++++--- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 12897a7f3e1..8edfd76eb05 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -1333,7 +1333,7 @@ com.microsoft.azure azure-storage - 5.4.0 + 7.0.0 diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java index 754f3431426..e4ad70cedb2 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java @@ -87,8 +87,10 @@ final class NativeAzureFileSystemHelper { if (errorCode != null && (errorCode.equals(StorageErrorCodeStrings.BLOB_NOT_FOUND) || errorCode.equals(StorageErrorCodeStrings.RESOURCE_NOT_FOUND) + || errorCode.equals(StorageErrorCodeStrings.CONTAINER_NOT_FOUND) || errorCode.equals(StorageErrorCode.BLOB_NOT_FOUND.toString()) - || errorCode.equals(StorageErrorCode.RESOURCE_NOT_FOUND.toString()))) { + || errorCode.equals(StorageErrorCode.RESOURCE_NOT_FOUND.toString()) + || errorCode.equals(StorageErrorCode.CONTAINER_NOT_FOUND.toString()))) { return true; } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java index cc3baf501d7..456e4b18bda 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java @@ -75,7 +75,7 @@ public class ITestContainerChecks extends AbstractWasbTestWithTimeout { assertTrue("Should've thrown.", false); } catch (FileNotFoundException ex) { assertTrue("Unexpected exception: " + ex, - ex.getMessage().contains("does not exist.")); + ex.getMessage().contains("is not found")); } assertFalse(container.exists()); @@ -115,7 +115,7 @@ public class ITestContainerChecks extends AbstractWasbTestWithTimeout { assertTrue("Should've thrown.", false); } catch (FileNotFoundException ex) { assertTrue("Unexpected exception: " + ex, - ex.getMessage().contains("does not exist.")); + ex.getMessage().contains("is not found")); } assertFalse(container.exists()); @@ -143,7 +143,7 @@ public class ITestContainerChecks extends AbstractWasbTestWithTimeout { assertTrue("Should've thrown.", false); } catch (FileNotFoundException ex) { assertTrue("Unexpected exception: " + ex, - ex.getMessage().contains("does not exist.")); + ex.getMessage().contains("is not found")); } assertFalse(container.exists()); @@ -165,6 +165,9 @@ public class ITestContainerChecks extends AbstractWasbTestWithTimeout { assertFalse(fs.rename(foo, bar)); assertFalse(container.exists()); + // Create a container outside of the WASB FileSystem + container.create(); + // But a write should. assertTrue(fs.createNewFile(foo)); assertTrue(container.exists()); From db81f85e844b53595f40e5699776a336faad2f84 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Wed, 6 Jun 2018 11:44:17 -0700 Subject: [PATCH 056/113] HADOOP-15513. Add additional test cases to cover some corner cases for FileUtil#symlink. Contributed by Giovanni Matteo Fumarola. --- .../java/org/apache/hadoop/fs/FileUtil.java | 7 + .../org/apache/hadoop/fs/TestFileUtil.java | 154 ++++++++++++++++++ 2 files changed, 161 insertions(+) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java index 5ef78f2f6a0..ed10f1c2ecf 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java @@ -1033,6 +1033,13 @@ public class FileUtil { * @return 0 on success */ public static int symLink(String target, String linkname) throws IOException{ + + if (target == null || linkname == null) { + LOG.warn("Can not create a symLink with a target = " + target + + " and link =" + linkname); + return 1; + } + // Run the input paths through Java's File so that they are converted to the // native OS form File targetFile = new File( diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java index 7218a1bd221..01fa563020c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java @@ -968,6 +968,160 @@ public class TestFileUtil { Assert.assertFalse(link.exists()); } + /** + * This test validates the correctness of + * {@link FileUtil#symLink(String, String)} in case of null pointer inputs. + * + * @throws IOException + */ + @Test + public void testSymlinkWithNullInput() throws IOException { + Assert.assertFalse(del.exists()); + del.mkdirs(); + + File file = new File(del, FILE); + File link = new File(del, "_link"); + + // Create the same symbolic link + // The operation should fail and returns 1 + int result = FileUtil.symLink(null, null); + Assert.assertEquals(1, result); + + // Create the same symbolic link + // The operation should fail and returns 1 + result = FileUtil.symLink(file.getAbsolutePath(), null); + Assert.assertEquals(1, result); + + // Create the same symbolic link + // The operation should fail and returns 1 + result = FileUtil.symLink(null, link.getAbsolutePath()); + Assert.assertEquals(1, result); + + file.delete(); + link.delete(); + } + + /** + * This test validates the correctness of + * {@link FileUtil#symLink(String, String)} in case the file already exists. + * + * @throws IOException + */ + @Test + public void testSymlinkFileAlreadyExists() throws IOException { + Assert.assertFalse(del.exists()); + del.mkdirs(); + + File file = new File(del, FILE); + File link = new File(del, "_link"); + + // Create a symbolic link + // The operation should succeed + int result1 = + FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath()); + + Assert.assertEquals(0, result1); + + // Create the same symbolic link + // The operation should fail and returns 1 + result1 = FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath()); + + Assert.assertEquals(1, result1); + + file.delete(); + link.delete(); + } + + /** + * This test validates the correctness of + * {@link FileUtil#symLink(String, String)} in case the file and the link are + * the same file. + * + * @throws IOException + */ + @Test + public void testSymlinkSameFile() throws IOException { + Assert.assertFalse(del.exists()); + del.mkdirs(); + + File file = new File(del, FILE); + + // Create a symbolic link + // The operation should succeed + int result = + FileUtil.symLink(file.getAbsolutePath(), file.getAbsolutePath()); + + Assert.assertEquals(0, result); + + file.delete(); + } + + /** + * This test validates the correctness of + * {@link FileUtil#symLink(String, String)} in case we want to use a link for + * 2 different files. + * + * @throws IOException + */ + @Test + public void testSymlink2DifferentFile() throws IOException { + Assert.assertFalse(del.exists()); + del.mkdirs(); + File file = new File(del, FILE); + File fileSecond = new File(del, FILE + "_1"); + File link = new File(del, "_link"); + + // Create a symbolic link + // The operation should succeed + int result = + FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath()); + + Assert.assertEquals(0, result); + + // The operation should fail and returns 1 + result = + FileUtil.symLink(fileSecond.getAbsolutePath(), link.getAbsolutePath()); + + Assert.assertEquals(1, result); + + file.delete(); + fileSecond.delete(); + link.delete(); + } + + /** + * This test validates the correctness of + * {@link FileUtil#symLink(String, String)} in case we want to use a 2 + * different links for the same file. + * + * @throws IOException + */ + @Test + public void testSymlink2DifferentLinks() throws IOException { + Assert.assertFalse(del.exists()); + del.mkdirs(); + File file = new File(del, FILE); + File link = new File(del, "_link"); + File linkSecond = new File(del, "_link_1"); + + // Create a symbolic link + // The operation should succeed + int result = + FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath()); + + Assert.assertEquals(0, result); + + // The operation should succeed + result = + FileUtil.symLink(file.getAbsolutePath(), linkSecond.getAbsolutePath()); + + Assert.assertEquals(0, result); + + file.delete(); + link.delete(); + linkSecond.delete(); + } + private void doUntarAndVerify(File tarFile, File untarDir) throws IOException { if (untarDir.exists() && !FileUtil.fullyDelete(untarDir)) { From 9654dd1f472052c4bb4a48a7412149c2e4859a10 Mon Sep 17 00:00:00 2001 From: Brahma Reddy Battula Date: Thu, 7 Jun 2018 10:16:02 +0530 Subject: [PATCH 057/113] HDFS-12950. [oiv] ls will fail in secure cluster. Contributed by Wei-Chiu Chuang. --- .../OfflineImageViewerPB.java | 3 ++- .../offlineImageViewer/WebImageViewer.java | 17 ++++++++++++++++ .../src/site/markdown/HdfsImageViewer.md | 3 ++- .../TestOfflineImageViewer.java | 20 +++++++++++++++++++ 4 files changed, 41 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java index 0f2ac81d8d5..e4afa994614 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java @@ -72,6 +72,7 @@ public class OfflineImageViewerPB { + " rather than a number of bytes. (false by default)\n" + " * Web: Run a viewer to expose read-only WebHDFS API.\n" + " -addr specifies the address to listen. (localhost:5978 by default)\n" + + " It does not support secure mode nor HTTPS.\n" + " * Delimited (experimental): Generate a text file with all of the elements common\n" + " to both inodes and inodes-under-construction, separated by a\n" + " delimiter. The default delimiter is \\t, though this may be\n" @@ -200,7 +201,7 @@ public class OfflineImageViewerPB { case "WEB": String addr = cmd.getOptionValue("addr", "localhost:5978"); try (WebImageViewer viewer = - new WebImageViewer(NetUtils.createSocketAddr(addr))) { + new WebImageViewer(NetUtils.createSocketAddr(addr), conf)) { viewer.start(inputFile); } break; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/WebImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/WebImageViewer.java index 087972f94cc..a50e828e4a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/WebImageViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/WebImageViewer.java @@ -34,6 +34,9 @@ import io.netty.handler.codec.string.StringEncoder; import io.netty.util.concurrent.GlobalEventExecutor; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.security.UserGroupInformation; import java.io.Closeable; import java.io.IOException; @@ -53,8 +56,12 @@ public class WebImageViewer implements Closeable { private final EventLoopGroup bossGroup; private final EventLoopGroup workerGroup; private final ChannelGroup allChannels; + private final Configuration conf; public WebImageViewer(InetSocketAddress address) { + this(address, new Configuration()); + } + public WebImageViewer(InetSocketAddress address, Configuration conf) { this.address = address; this.bossGroup = new NioEventLoopGroup(); this.workerGroup = new NioEventLoopGroup(); @@ -62,15 +69,25 @@ public class WebImageViewer implements Closeable { this.bootstrap = new ServerBootstrap() .group(bossGroup, workerGroup) .channel(NioServerSocketChannel.class); + this.conf = conf; + UserGroupInformation.setConfiguration(conf); } /** * Start WebImageViewer and wait until the thread is interrupted. * @param fsimage the fsimage to load. * @throws IOException if failed to load the fsimage. + * @throws RuntimeException if security is enabled in configuration. */ public void start(String fsimage) throws IOException { try { + if (UserGroupInformation.isSecurityEnabled()) { + throw new RuntimeException( + "WebImageViewer does not support secure mode. To start in " + + "non-secure mode, pass -D" + + CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION + + "=simple"); + } initServer(fsimage); channel.closeFuture().await(); } catch (InterruptedException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md index bd3a797bfa0..6b0c27c8a29 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md @@ -26,7 +26,8 @@ The Offline Image Viewer provides several output processors: 1. Web is the default output processor. It launches a HTTP server that exposes read-only WebHDFS API. Users can investigate the namespace - interactively by using HTTP REST API. + interactively by using HTTP REST API. It does not support secure mode, nor + HTTPS. 2. XML creates an XML document of the fsimage and includes all of the information within the fsimage. The diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java index b8078049be3..c84237cb836 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hdfs.tools.offlineImageViewer; import com.google.common.collect.ImmutableMap; + +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS; import static org.apache.hadoop.fs.permission.AclEntryType.GROUP; import static org.apache.hadoop.fs.permission.AclEntryType.OTHER; @@ -100,8 +102,10 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.LambdaTestUtils; import org.apache.log4j.Level; import org.junit.AfterClass; import org.junit.Assert; @@ -583,6 +587,22 @@ public class TestOfflineImageViewer { } } + @Test + public void testWebImageViewerSecureMode() throws Exception { + Configuration conf = new Configuration(); + conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos"); + try (WebImageViewer viewer = + new WebImageViewer( + NetUtils.createSocketAddr("localhost:0"), conf)) { + RuntimeException ex = LambdaTestUtils.intercept(RuntimeException.class, + "WebImageViewer does not support secure mode.", + () -> viewer.start("foo")); + } finally { + conf.set(HADOOP_SECURITY_AUTHENTICATION, "simple"); + UserGroupInformation.setConfiguration(conf); + } + } + @Test public void testPBDelimitedWriter() throws IOException, InterruptedException { testPBDelimitedWriter(""); // Test in memory db. From 58bc34f1e347034af566d6968eb3b3439a91cc74 Mon Sep 17 00:00:00 2001 From: Rohith Sharma K S Date: Thu, 7 Jun 2018 10:25:47 +0530 Subject: [PATCH 058/113] YARN-8399. NodeManager is giving 403 GSS exception post upgrade to 3.1 in secure mode. Contributed by Sunil Govindan. --- .../containermanager/AuxServices.java | 6 +-- .../containermanager/TestAuxServices.java | 48 +++++++++++++++++++ 2 files changed, 51 insertions(+), 3 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java index 3fe3cfd16ef..77c4dd9a6cb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java @@ -262,7 +262,7 @@ public class AuxServices extends AbstractService } } s = AuxiliaryServiceWithCustomClassLoader.getInstance( - conf, className, dest.toString()); + new Configuration(conf), className, dest.toString()); } LOG.info("The aux service:" + sName + " are using the custom classloader"); @@ -273,7 +273,7 @@ public class AuxServices extends AbstractService if (sClass == null) { throw new RuntimeException("No class defined for " + sName); } - s = ReflectionUtils.newInstance(sClass, conf); + s = ReflectionUtils.newInstance(sClass, new Configuration(conf)); } if (s == null) { throw new RuntimeException("No object created for " + sName); @@ -294,7 +294,7 @@ public class AuxServices extends AbstractService stateStoreFs.mkdirs(storePath, storeDirPerms); s.setRecoveryPath(storePath); } - s.init(conf); + s.init(new Configuration(conf)); } catch (RuntimeException e) { LOG.error("Failed to initialize " + sName, e); throw e; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java index fcf92b509e0..ca0b32a7521 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java @@ -678,4 +678,52 @@ public class TestAuxServices { super("RecoverableServiceB", "Bsrv"); } } + + static class ConfChangeAuxService extends AuxiliaryService + implements Service { + + ConfChangeAuxService() { + super("ConfChangeAuxService"); + } + + @Override + protected void serviceInit(Configuration conf) throws Exception { + conf.set("dummyConfig", "changedTestValue"); + super.serviceInit(conf); + } + + @Override + public void initializeApplication( + ApplicationInitializationContext initAppContext) { + } + + @Override + public void stopApplication(ApplicationTerminationContext stopAppContext) { + } + + @Override + public ByteBuffer getMetaData() { + return null; + } + } + + @Test + public void testAuxServicesConfChange() { + Configuration conf = new Configuration(); + conf.setStrings(YarnConfiguration.NM_AUX_SERVICES, + new String[]{"ConfChangeAuxService"}); + conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, + "ConfChangeAuxService"), ConfChangeAuxService.class, Service.class); + AuxServices aux = new AuxServices(MOCK_AUX_PATH_HANDLER, MOCK_CONTEXT, + MOCK_DEL_SERVICE); + conf.set("dummyConfig", "testValue"); + aux.init(conf); + aux.start(); + for (AuxiliaryService s : aux.getServices()) { + assertEquals(STARTED, s.getServiceState()); + assertEquals(conf.get("dummyConfig"), "testValue"); + } + + aux.stop(); + } } From f494f0b8968a61bf3aa32b7ca0851b8c744aa70f Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Thu, 7 Jun 2018 15:55:49 +0100 Subject: [PATCH 059/113] HADOOP-15512. Clean up Shell from JDK7 workarounds. Contributed by Zsolt Venczel. --- .../java/org/apache/hadoop/util/Shell.java | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java index 04b4b4fe394..0b76f0df2a0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java @@ -22,7 +22,6 @@ import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStreamReader; -import java.io.InputStream; import java.io.InterruptedIOException; import java.nio.charset.Charset; import java.util.Arrays; @@ -1018,17 +1017,7 @@ public abstract class Shell { } // close the input stream try { - // JDK 7 tries to automatically drain the input streams for us - // when the process exits, but since close is not synchronized, - // it creates a race if we close the stream first and the same - // fd is recycled. the stream draining thread will attempt to - // drain that fd!! it may block, OOM, or cause bizarre behavior - // see: https://bugs.openjdk.java.net/browse/JDK-8024521 - // issue is fixed in build 7u60 - InputStream stdout = process.getInputStream(); - synchronized (stdout) { - inReader.close(); - } + inReader.close(); } catch (IOException ioe) { LOG.warn("Error while closing the input stream", ioe); } @@ -1037,10 +1026,7 @@ public abstract class Shell { joinThread(errThread); } try { - InputStream stderr = process.getErrorStream(); - synchronized (stderr) { - errReader.close(); - } + errReader.close(); } catch (IOException ioe) { LOG.warn("Error while closing the error stream", ioe); } From e39b113db0f2e4bcf93f873801be770e472601da Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Thu, 7 Jun 2018 08:30:06 -0700 Subject: [PATCH 060/113] HDFS-13659. Add more test coverage for contentSummary for snapshottable path. Contributed by Wei-Chiu Chuang. --- .../TestGetContentSummaryWithSnapshot.java | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestGetContentSummaryWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestGetContentSummaryWithSnapshot.java index dc6f584a2b8..1c168188c01 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestGetContentSummaryWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestGetContentSummaryWithSnapshot.java @@ -81,6 +81,9 @@ public class TestGetContentSummaryWithSnapshot { * 3. create a 10 byte file /foo/bar/baz * Make sure for "/foo/bar" and "/foo/.snapshot/s1/bar" have correct results: * the 1 byte file is not included in snapshot s1. + * 4. create another snapshot, append to the file /foo/bar/baz, + * and make sure file count, directory count and file length is good. + * 5. delete the file, ensure contentSummary output too. */ @Test public void testGetContentSummary() throws IOException { @@ -118,6 +121,29 @@ public class TestGetContentSummaryWithSnapshot { Assert.assertEquals(0, summary.getFileCount()); Assert.assertEquals(0, summary.getLength()); + // create a new snapshot s2 and update the file + dfs.createSnapshot(foo, "s2"); + DFSTestUtil.appendFile(dfs, baz, 10); + summary = cluster.getNameNodeRpc().getContentSummary( + bar.toString()); + Assert.assertEquals(1, summary.getDirectoryCount()); + Assert.assertEquals(1, summary.getFileCount()); + Assert.assertEquals(20, summary.getLength()); + + final Path fooS2 = SnapshotTestHelper.getSnapshotRoot(foo, "s2"); + summary = cluster.getNameNodeRpc().getContentSummary(fooS2.toString()); + Assert.assertEquals(2, summary.getDirectoryCount()); + Assert.assertEquals(1, summary.getFileCount()); + Assert.assertEquals(10, summary.getLength()); + + cluster.getNameNodeRpc().delete(baz.toString(), false); + + summary = cluster.getNameNodeRpc().getContentSummary( + foo.toString()); + Assert.assertEquals(0, summary.getSnapshotDirectoryCount()); + Assert.assertEquals(1, summary.getSnapshotFileCount()); + Assert.assertEquals(20, summary.getSnapshotLength()); + final Path bazS1 = SnapshotTestHelper.getSnapshotPath(foo, "s1", "bar/baz"); try { cluster.getNameNodeRpc().getContentSummary(bazS1.toString()); From 377ea1bcdfd523f9353647388881b82699f2df12 Mon Sep 17 00:00:00 2001 From: Rohith Sharma K S Date: Thu, 7 Jun 2018 21:31:15 +0530 Subject: [PATCH 061/113] YARN-8386. App log can not be viewed from Logs tab in secure cluster. Contributed by Sunil Govindan. --- .../main/webapp/app/adapters/yarn-app-log.js | 68 +++++++++++++++++++ .../src/main/webapp/app/adapters/yarn-log.js | 6 -- .../webapp/app/controllers/yarn-app/logs.js | 17 +++-- .../main/webapp/app/models/yarn-app-log.js | 25 +++++++ .../webapp/app/serializers/yarn-app-log.js | 38 +++++++++++ .../src/main/webapp/app/utils/converter.js | 10 +++ .../tests/unit/adapters/yarn-app-log-test.js | 30 ++++++++ 7 files changed, 181 insertions(+), 13 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app-log.js create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-log.js create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-log.js create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-app-log-test.js diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app-log.js new file mode 100644 index 00000000000..318b27319cf --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app-log.js @@ -0,0 +1,68 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import DS from 'ember-data'; +import Ember from 'ember'; +import Converter from 'yarn-ui/utils/converter'; +import RESTAbstractAdapter from './restabstract'; + +/** + * REST URL's response when fetching container logs will be + * in plain text format and not JSON. + */ +export default RESTAbstractAdapter.extend({ + address: "timelineV1WebAddress", + restNameSpace: "timeline", + serverName: "ATS", + + headers: { + Accept: 'text/plain' + }, + + urlForFindRecord(id/*, modelName, snapshot*/) { + var splits = Converter.splitForAppLogs(id); + var containerId = splits[0]; + var logFile = splits[1]; + var url = this._buildURL(); + url = url + '/containers/' + containerId + '/logs/' + logFile; + console.log('log url' + url); + return url; + }, + + /** + * Override options so that result is not expected to be JSON + */ + ajaxOptions: function (url, type, options) { + var hash = options || {}; + hash.url = url; + hash.type = type; + // Make sure jQuery does not try to convert response to JSON. + hash.dataType = 'text'; + hash.context = this; + + var headers = Ember.get(this, 'headers'); + if (headers !== undefined) { + hash.beforeSend = function (xhr) { + Object.keys(headers).forEach(function (key) { + return xhr.setRequestHeader(key, headers[key]); + }); + }; + } + return hash; + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-log.js index 979ec79155f..58cbea2a8bf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-log.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-log.js @@ -29,11 +29,5 @@ export default AbstractAdapter.extend({ var containerId = query['containerId']; delete query.containerId; return url + '/containers/' + containerId + '/logs'; - }, - - fetchLogFileContent(containerId, logFile) { - var url = this._buildURL(); - url = url + '/containers/' + containerId + '/logs/' + logFile; - return Ember.$.ajax({url: url, type: 'GET', dataType: 'text'}); } }); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/logs.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/logs.js index 3b75f7696a0..5a6e25ec539 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/logs.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/logs.js @@ -17,6 +17,7 @@ */ import Ember from 'ember'; +import Constants from 'yarn-ui/constants'; export default Ember.Controller.extend({ queryParams: ["service"], @@ -118,16 +119,17 @@ export default Ember.Controller.extend({ if (logFile) { this.set("_isLoadingBottomPanel", true); this.set("selectedLogFileName", logFile); - this.fetchContentForLogFile(this.get("selectedContainerId"), logFile) + var id = this.get("selectedContainerId") + Constants.PARAM_SEPARATOR + logFile; + this.fetchContentForLogFile(id) .then( - content => { - this.set("selectedLogFileContent", content.trim()); + hash => { + this.set("selectedLogFileContent", hash.logs.get('logs').trim()); }, () => { this.set("selectedLogFileContent", ""); } ) - .always(() => { + .then(() => { this.set("_isLoadingBottomPanel", false); }); } else { @@ -224,9 +226,10 @@ export default Ember.Controller.extend({ }); }, - fetchContentForLogFile(containerId, logFile) { - let logAdapter = this.store.adapterFor("yarn-log"); - return logAdapter.fetchLogFileContent(containerId, logFile); + fetchContentForLogFile(id) { + return Ember.RSVP.hash({ + logs: this.store.findRecord('yarn-app-log', id) + }); }, resetAfterRefresh() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-log.js new file mode 100644 index 00000000000..31cf61ecbcd --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-log.js @@ -0,0 +1,25 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import DS from 'ember-data'; + +export default DS.Model.extend({ + logs: DS.attr('string'), + containerID: DS.attr('string'), + logFileName: DS.attr('string') +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-log.js new file mode 100644 index 00000000000..4bfc5b470b4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-log.js @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import DS from 'ember-data'; +import Converter from 'yarn-ui/utils/converter'; + +export default DS.JSONAPISerializer.extend({ + normalizeSingleResponse(store, primaryModelClass, payload, id/*, requestType*/) { + // Convert plain text response into JSON. + // ID is of the form containerId!fileName + var splits = Converter.splitForAppLogs(id); + var convertedPayload = { + id: id, + type: primaryModelClass.modelName, + attributes: { + logs: payload, + containerID: splits[1], + logFileName: splits[2] + } + }; + return { data: convertedPayload }; + }, +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/converter.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/converter.js index d7de4e7dca1..d5acaa22eb1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/converter.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/converter.js @@ -112,6 +112,16 @@ export default { return [splits[0], splits[1], fileName]; } }, + splitForAppLogs: function(id) { + if (id) { + var splits = id.split(Constants.PARAM_SEPARATOR); + var splitLen = splits.length; + if (splitLen < 2) { + return null; + } + return [splits[0], splits[1]]; + } + }, memoryToSimpliedUnit: function(mb) { var unit = "MB"; var value = mb; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-app-log-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-app-log-test.js new file mode 100644 index 00000000000..83b3c59e4c4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-app-log-test.js @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('adapter:yarn-app-log', 'Unit | Adapter | yarn app log', { + // Specify the other units that are required for this test. + // needs: ['serializer:foo'] +}); + +// Replace this with your real tests. +test('it exists', function(assert) { + let adapter = this.subject(); + assert.ok(adapter); +}); From b79ae5d93d71f14a052ce4e54d0215a74a4b4266 Mon Sep 17 00:00:00 2001 From: James Clampffer Date: Thu, 7 Jun 2018 14:16:17 -0400 Subject: [PATCH 062/113] HDFS-13534. libhdfs++: Fix GCC7 build. Contributed by James Clampffer. --- .../src/main/native/libhdfspp/include/hdfspp/ioservice.h | 1 + .../src/main/native/libhdfspp/lib/reader/datatransfer.h | 2 +- .../src/main/native/libhdfspp/tests/mock_connection.h | 2 +- .../src/main/native/libhdfspp/tests/remote_block_reader_test.cc | 2 +- 4 files changed, 4 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/ioservice.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/ioservice.h index 9805bad1c0f..a6ec97ad491 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/ioservice.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/ioservice.h @@ -58,6 +58,7 @@ #ifndef INCLUDE_HDFSPP_IOSERVICE_H_ #define INCLUDE_HDFSPP_IOSERVICE_H_ +#include #include // forward decl diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/reader/datatransfer.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/reader/datatransfer.h index 2b36f590b4a..ea176532f23 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/reader/datatransfer.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/reader/datatransfer.h @@ -59,7 +59,7 @@ public: void Connect(std::function dn)> handler) override {(void)handler; /*TODO: Handshaking goes here*/}; - void Cancel(); + void Cancel() override; private: DataTransferSaslStream(const DataTransferSaslStream &) = delete; DataTransferSaslStream &operator=(const DataTransferSaslStream &) = delete; diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/mock_connection.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/mock_connection.h index de234efd8d8..82db7604213 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/mock_connection.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/mock_connection.h @@ -87,7 +87,7 @@ public: virtual void cancel() {} virtual void close() {} protected: - virtual ProducerResult Produce() = 0; + ProducerResult Produce() override = 0; ::asio::io_service *io_service_; private: diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/remote_block_reader_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/remote_block_reader_test.cc index 4b909b23942..3997e64be5c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/remote_block_reader_test.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/remote_block_reader_test.cc @@ -82,7 +82,7 @@ public: this->MockConnectionBase::async_write_some(buf, handler); } - void Cancel() { + void Cancel() override { /* no-op, declared pure virtual */ } }; From c4bbcd5634d9e32183ae20adbeafca258568bedd Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Thu, 7 Jun 2018 12:47:58 -0700 Subject: [PATCH 063/113] HDDS-119:Skip Apache license header check for some ozone doc scripts. Contributed by Ajay Kumar --- .../src/main/resources/checkstyle/suppressions.xml | 1 + hadoop-ozone/pom.xml | 10 ++++++++++ 2 files changed, 11 insertions(+) diff --git a/hadoop-build-tools/src/main/resources/checkstyle/suppressions.xml b/hadoop-build-tools/src/main/resources/checkstyle/suppressions.xml index ccc89c8bf0f..084384d689c 100644 --- a/hadoop-build-tools/src/main/resources/checkstyle/suppressions.xml +++ b/hadoop-build-tools/src/main/resources/checkstyle/suppressions.xml @@ -18,4 +18,5 @@ + diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index f605da2ac92..5d57e10de78 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -155,6 +155,16 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> webapps/static/nvd3-1.8.5.min.js.map webapps/static/angular-1.6.4.min.js webapps/static/d3-3.5.17.min.js + static/OzoneOverview.svg + themes/ozonedoc/static/js/jquery.min.js + themes/ozonedoc/static/js/bootstrap.min.js + themes/ozonedoc/static/css/bootstrap.min.css + themes/ozonedoc/static/css/bootstrap.min.css.map + themes/ozonedoc/static/css/bootstrap-theme.min.css + themes/ozonedoc/static/css/bootstrap-theme.min.css.map + themes/ozonedoc/static/fonts/glyphicons-halflings-regular.svg + themes/ozonedoc/layouts/index.html + themes/ozonedoc/theme.toml From 7969cc4667343e5d3d738de66483e63af1955bbc Mon Sep 17 00:00:00 2001 From: James Clampffer Date: Thu, 7 Jun 2018 16:02:57 -0400 Subject: [PATCH 064/113] HDFS-13615. libhdfs++ SaslProtocol hanging while accessing invalid lock. Contributed by Mitchell Tracy --- .../native/libhdfspp/lib/rpc/sasl_protocol.cc | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/sasl_protocol.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/sasl_protocol.cc index 0957ea377c9..6fc04f754f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/sasl_protocol.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/sasl_protocol.cc @@ -91,8 +91,10 @@ void SaslProtocol::Authenticate(std::function resp_msg = std::make_shared(); auto self(shared_from_this()); connection->AsyncRpc_locked(SASL_METHOD_NAME, req_msg.get(), resp_msg, - [self, req_msg, resp_msg] (const Status & status) { - self->OnServerResponse(status, resp_msg.get()); } ); + [self, req_msg, resp_msg, connection] (const Status & status) { + assert(connection); + self->OnServerResponse(status, resp_msg.get()); + }); } // authenticate() method AuthInfo::AuthMethod ParseMethod(const std::string & method) @@ -340,9 +342,10 @@ bool SaslProtocol::SendSaslMessage(RpcSaslProto & message) std::shared_ptr resp_msg = std::make_shared(); auto self(shared_from_this()); connection->AsyncRpc(SASL_METHOD_NAME, &message, resp_msg, - [self, resp_msg] (const Status & status) { - self->OnServerResponse(status, resp_msg.get()); - } ); + [self, resp_msg, connection] (const Status & status) { + assert(connection); + self->OnServerResponse(status, resp_msg.get()); + }); return true; } // SendSaslMessage() method @@ -370,7 +373,9 @@ bool SaslProtocol::AuthComplete(const Status & status, const AuthInfo & auth_inf void SaslProtocol::OnServerResponse(const Status & status, const hadoop::common::RpcSaslProto * response) { + std::lock_guard state_lock(sasl_state_lock_); + LOG_TRACE(kRPC, << "Received SASL response: " << status.ToString()); if (status.ok()) { From 12be8bad7debd67c9ea72b979a39c8cf42c5f37d Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Thu, 7 Jun 2018 13:34:52 -0700 Subject: [PATCH 065/113] HADOOP-15516. Add test cases to cover FileUtil#readLink. Contributed by Giovanni Matteo Fumarola. --- .../java/org/apache/hadoop/fs/FileUtil.java | 6 +++ .../org/apache/hadoop/fs/TestFileUtil.java | 52 +++++++++++++++++++ 2 files changed, 58 insertions(+) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java index ed10f1c2ecf..df89598e3c1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java @@ -198,6 +198,12 @@ public class FileUtil { * use getCanonicalPath in File to get the target of the symlink but that * does not indicate if the given path refers to a symlink. */ + + if (f == null) { + LOG.warn("Can not read a null symLink"); + return ""; + } + try { return Shell.execCommand( Shell.getReadlinkCommand(f.toString())).trim(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java index 01fa563020c..f5571038a6b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java @@ -1441,4 +1441,56 @@ public class TestFileUtil { } } + /** + * This test validates the correctness of {@link FileUtil#readLink(File)} in + * case of null pointer inputs. + */ + @Test + public void testReadSymlinkWithNullInput() { + String result = FileUtil.readLink(null); + Assert.assertEquals("", result); + } + + /** + * This test validates the correctness of {@link FileUtil#readLink(File)}. + * + * @throws IOException + */ + @Test + public void testReadSymlink() throws IOException { + Assert.assertFalse(del.exists()); + del.mkdirs(); + + File file = new File(del, FILE); + File link = new File(del, "_link"); + + // Create a symbolic link + FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath()); + + String result = FileUtil.readLink(link); + Assert.assertEquals(file.getAbsolutePath(), result); + + file.delete(); + link.delete(); + } + + /** + * This test validates the correctness of {@link FileUtil#readLink(File)} when + * it gets a file in input. + * + * @throws IOException + */ + @Test + public void testReadSymlinkWithAFileAsInput() throws IOException { + Assert.assertFalse(del.exists()); + del.mkdirs(); + + File file = new File(del, FILE); + + String result = FileUtil.readLink(file); + Assert.assertEquals("", result); + + file.delete(); + } + } From ba303b1f890ccd4deb806cb030e26a77e316ebe4 Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Thu, 7 Jun 2018 14:10:52 -0700 Subject: [PATCH 066/113] HDDS-147. Update Ozone site docs. Contributed by Arpit Agarwal. --- hadoop-ozone/docs/content/CommandShell.md | 141 +++---- hadoop-ozone/docs/content/GettingStarted.md | 387 ++++++++++---------- 2 files changed, 271 insertions(+), 257 deletions(-) diff --git a/hadoop-ozone/docs/content/CommandShell.md b/hadoop-ozone/docs/content/CommandShell.md index d8a733a86bd..95820e99be1 100644 --- a/hadoop-ozone/docs/content/CommandShell.md +++ b/hadoop-ozone/docs/content/CommandShell.md @@ -15,139 +15,144 @@ menu: main See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> -Ozone Command Shell -=================== +# Ozone Command Shell -Ozone command shell gives a command shell interface to work against ozone. + +Ozone command shell gives a command shell interface to work against Ozone. Please note that this document assumes that cluster is deployed with simple authentication. The Ozone commands take the following format. +``` +ozone oz --command_ /volume/bucket/key -user [-root] +``` -* `ozone oz --command_ http://hostname:port/volume/bucket/key -user - -root` - -The *port* specified in command should match the port mentioned in the config +The `port` specified in command should match the port mentioned in the config property `hdds.rest.http-address`. This property can be set in `ozone-site.xml`. The default value for the port is `9880` and is used in below commands. -The *-root* option is a command line short cut that allows *ozone oz* +The `-root` option is a command line short cut that allows *ozone oz* commands to be run as the user that started the cluster. This is useful to indicate that you want the commands to be run as some admin user. The only reason for this option is that it makes the life of a lazy developer more easier. -Ozone Volume Commands --------------------- +## Volume Commands + The volume commands allow users to create, delete and list the volumes in the ozone cluster. ### Create Volume - -Volumes can be created only by Admins. Here is an example of creating a volume. - -* `ozone oz -createVolume http://localhost:9880/hive -user bilbo -quota -100TB -root` - +Volumes can be created only by administrators. Here is an example of creating a volume. +``` +ozone oz -createVolume hive -user bilbo -quota 100TB -root +``` The above command creates a volume called `hive` owned by user `bilbo`. The `-root` option allows the command to be executed as user `hdfs` which is an admin in the cluster. ### Update Volume - Updates information like ownership and quota on an existing volume. - -* `ozone oz -updateVolume http://localhost:9880/hive -quota 500TB -root` +``` +ozone oz -updateVolume hive -quota 500TB -root +``` The above command changes the volume quota of hive from 100TB to 500TB. ### Delete Volume Deletes a Volume if it is empty. - -* `ozone oz -deleteVolume http://localhost:9880/hive -root` - +``` +ozone oz -deleteVolume /hive -root +``` ### Info Volume -Info volume command allows the owner or the administrator of the cluster to read meta-data about a specific volume. - -* `ozone oz -infoVolume http://localhost:9880/hive -root` +Info volume command allows the owner or the administrator of the cluster +to read meta-data about a specific volume. +``` +ozone oz -infoVolume /hive -root +``` ### List Volumes - -List volume command can be used by administrator to list volumes of any user. It can also be used by a user to list volumes owned by him. - -* `ozone oz -listVolume http://localhost:9880/ -user bilbo -root` +List volume command can be used by administrator to list volumes of any +user. It can also be used by any user to list their own volumes. +``` +ozone oz -listVolume / -user bilbo +``` The above command lists all volumes owned by user bilbo. -Ozone Bucket Commands --------------------- - -Bucket commands follow a similar pattern as volume commands. However bucket commands are designed to be run by the owner of the volume. -Following examples assume that these commands are run by the owner of the volume or bucket. +## Bucket Commands +Bucket commands follow a similar pattern as volume commands. However bucket +commands are designed to be run by the owner of the volume. +Following examples assume that these commands are run by the owner of the +volume or bucket. ### Create Bucket - Create bucket call allows the owner of a volume to create a bucket. - -* `ozone oz -createBucket http://localhost:9880/hive/january` +``` +ozone oz -createBucket /hive/january +``` This call creates a bucket called `january` in the volume called `hive`. If the volume does not exist, then this call will fail. - ### Update Bucket Updates bucket meta-data, like ACLs. - -* `ozone oz -updateBucket http://localhost:9880/hive/january -addAcl -user:spark:rw` - +``` +ozone oz -updateBucket /hive/january -addAcl user:spark:rw +``` ### Delete Bucket Deletes a bucket if it is empty. - -* `ozone oz -deleteBucket http://localhost:9880/hive/january` +``` +ozone oz -deleteBucket /hive/january +``` ### Info Bucket Returns information about a given bucket. - -* `ozone oz -infoBucket http://localhost:9880/hive/january` +``` +ozone oz -infoBucket /hive/january +``` ### List Buckets -List buckets on a given volume. +List buckets in a given volume. +``` +ozone oz -listBucket /hive +``` -* `ozone oz -listBucket http://localhost:9880/hive` +## Ozone Key Commands -Ozone Key Commands ------------------- - -Ozone key commands allows users to put, delete and get keys from ozone buckets. +Ozone key commands allows users to put, delete and get keys from Ozone buckets. ### Put Key -Creates or overwrites a key in ozone store, -file points to the file you want +Creates or overwrites a key in Ozone store, -file points to the file you want to upload. - -* `ozone oz -putKey http://localhost:9880/hive/january/processed.orc -file -processed.orc` +``` +ozone oz -putKey /hive/january/processed.orc -file processed.orc +``` ### Get Key -Downloads a file from the ozone bucket. - -* `ozone oz -getKey http://localhost:9880/hive/january/processed.orc -file - processed.orc.copy` +Downloads a file from the Ozone bucket. +``` +ozone oz -getKey /hive/january/processed.orc -file processed.orc.copy +``` ### Delete Key -Deletes a key from the ozone store. - -* `ozone oz -deleteKey http://localhost:9880/hive/january/processed.orc` +Deletes a key from the Ozone store. +``` +ozone oz -deleteKey /hive/january/processed.orc +``` ### Info Key -Reads key metadata from the ozone store. - -* `ozone oz -infoKey http://localhost:9880/hive/january/processed.orc` +Reads key metadata from the Ozone store. +``` +ozone oz -infoKey /hive/january/processed.orc +``` ### List Keys -List all keys in an ozone bucket. +List all keys in an Ozone bucket. +``` +ozone oz -listKey /hive/january +``` -* `ozone oz -listKey http://localhost:9880/hive/january` diff --git a/hadoop-ozone/docs/content/GettingStarted.md b/hadoop-ozone/docs/content/GettingStarted.md index 6b2316ee85d..531d1924126 100644 --- a/hadoop-ozone/docs/content/GettingStarted.md +++ b/hadoop-ozone/docs/content/GettingStarted.md @@ -17,118 +17,144 @@ menu: main limitations under the License. See accompanying LICENSE file. --> -Ozone - Object store for Hadoop -============================== +# Ozone - Object store for Apache Hadoop -Introduction ------------- -Ozone is an object store for Hadoop. It is a redundant, distributed object -store build by leveraging primitives present in HDFS. Ozone supports REST -API for accessing the store. -Getting Started ---------------- -Ozone is a work in progress and currently lives in the hadoop source tree. -The subprojects (ozone/hdds) are part of the hadoop source tree but by default -not compiled and not part of the official releases. To -use it, you have to build a package by yourself and deploy a cluster. +## Introduction + +Ozone is a scalable distributed object store for Hadoop. Ozone supports RPC +and REST APIs for working with Volumes, Buckets and Keys. + +Existing Hadoop applications can use Ozone transparently via a Hadoop Compatible +FileSystem shim. + +### Basic terminology +1. **Volumes** - Volumes are a notion similar to accounts. Volumes can be +created or deleted only by administrators. +1. **Buckets** - A volume can contain zero or more buckets. +1. **Keys** - Keys are unique within a given bucket. + +### Services in a minimal Ozone cluster +1. **Ozone Manager (OM)** - stores Ozone Metadata namely Volumes, +Buckets and Key names. +1. **Storage Container Manager (SCM)** - handles Storage Container lifecycle. +Containers are the unit of replication in Ozone and not exposed to users. +1. **DataNodes** - These are HDFS DataNodes which understand how to store +Ozone Containers. Ozone has been designed to efficiently share storage space +with HDFS blocks. + +## Getting Started + +Ozone is currently work-in-progress and lives in the Hadoop source tree. +The sub-projects (`hadoop-ozone` and `hadoop-hdds`) are part of +the Hadoop source tree but they are not compiled by default and not +part of official Apache Hadoop releases. + +To use Ozone, you have to build a package by yourself and deploy a cluster. ### Building Ozone -To build Ozone, please checkout the hadoop sources from github. Then -checkout the trunk branch and build it. +To build Ozone, please checkout the Hadoop sources from the +[Apache Hadoop git repo](https://git-wip-us.apache.org/repos/asf?p=hadoop.git). +Then checkout the `trunk` branch and build it with the `hdds` profile enabled. -`mvn clean package -DskipTests=true -Dmaven.javadoc.skip=true -Pdist -Phdds -Dtar -DskipShade` +` +git checkout trunk +mvn clean package -DskipTests=true -Dmaven.javadoc.skip=true -Pdist -Phdds -Dtar -DskipShade +` -skipShade is just to make compilation faster and not really required. +`skipShade` is just to make compilation faster and not required. -This will give you a tarball in your distribution directory. This is the -tarball that can be used for deploying your hadoop cluster. Here is an -example of the tarball that will be generated. +This builds a tarball in your distribution directory which can be used to deploy your +Ozone cluster. The tarball path is `hadoop-dist/target/ozone-${project.version}.tar.gz`. -* `~/apache/hadoop/hadoop-dist/target/${project.version}.tar.gz` - -At this point we have an option to setup a physical cluster or run ozone via +At this point you can either setup a physical cluster or run Ozone via docker. -Running Ozone via Docker ------------------------- +### Running Ozone via Docker -This assumes that you have a running docker setup on the machine. Please run -these following commands to see ozone in action. +This is the quickest way to bring up an Ozone cluster for development/testing +or if you just want to get a feel for Ozone. It assumes that you have docker installed +on the machine. - Go to the directory where the docker compose files exist. +Go to the directory where the docker compose files exist and tell +`docker-compose` to start Ozone. This will start SCM, OM and a single datanode +in the background. +``` +cd hadoop-dist/target/compose/ozone + +docker-compose up -d +``` + +Now let us run some workload against Ozone. To do that we will run +_freon_, the Ozone load generator after logging into one of the docker +containers for OM, SCM or DataNode. Let's take DataNode for example:. +``` +docker-compose exec datanode bash + +ozone freon -mode offline -validateWrites -numOfVolumes 1 -numOfBuckets 10 -numOfKeys 100 +``` + +You can checkout the OM UI to see the requests information. +``` +http://localhost:9874/ +``` + +If you need more datanodes you can scale up: +``` +docker-compose up --scale datanode=3 -d +``` + +## Running Ozone using a real cluster + +### Configuration + +First initialize Hadoop cluster configuration files like hadoop-env.sh, +core-site.xml, hdfs-site.xml and any other configuration files that are +needed for your cluster. + +#### Update hdfs-site.xml + +The container manager part of Ozone runs inside DataNodes as a pluggable module. +To activate ozone you should define the service plugin implementation class. +**Important**: It should be added to the **hdfs-site.xml** as the plugin should +be activated as part of the normal HDFS Datanode bootstrap. +``` + + dfs.datanode.plugins + org.apache.hadoop.ozone.HddsDatanodeService + +``` - - `cd hadoop-dist/target/compose/ozone` +#### Create ozone-site.xml -Tell docker to start ozone, this will start a KSM, SCM and a single datanode in -the background. +Ozone relies on its own configuration file called `ozone-site.xml`. +The following are the most important settings. - - - `docker-compose up -d` - -Now let us run some work load against ozone, to do that we will run freon. - -This will log into the datanode and run bash. - - - `docker-compose exec datanode bash` - -Now you can run the `ozone` command shell or freon, the ozone load generator. - -This is the command to run freon. - - - `ozone freon -mode offline -validateWrites -numOfVolumes 1 -numOfBuckets 10 -numOfKeys 100` - -You can checkout the KSM UI to see the requests information. - - - `http://localhost:9874/` - -If you need more datanode you can scale up: - - - `docker-compose scale datanode=3` - -Running Ozone using a real cluster ----------------------------------- - -Please proceed to setup a hadoop cluster by creating the hdfs-site.xml and -other configuration files that are needed for your cluster. - - -### Ozone Configuration - -Ozone relies on its own configuration file called `ozone-site.xml`. It is -just for convenience and ease of management -- you can add these settings -to `hdfs-site.xml`, if you don't want to keep ozone settings separate. -This document refers to `ozone-site.xml` so that ozone settings are in one -place and not mingled with HDFS settings. - - * _*ozone.enabled*_ This is the most important setting for ozone. + 1. _*ozone.enabled*_ This is the most important setting for ozone. Currently, Ozone is an opt-in subsystem of HDFS. By default, Ozone is disabled. Setting this flag to `true` enables ozone in the HDFS cluster. Here is an example, - -``` + ``` ozone.enabled True -``` - * _*ozone.metadata.dirs*_ Ozone is designed with modern hardware - in mind. It tries to use SSDs effectively. So users can specify where the + ``` + 1. **ozone.metadata.dirs** Administrators can specify where the metadata must reside. Usually you pick your fastest disk (SSD if - you have them on your nodes). KSM, SCM and datanode will write the metadata + you have them on your nodes). OM, SCM and datanode will write the metadata to these disks. This is a required setting, if this is missing Ozone will fail to come up. Here is an example, - -``` + ``` ozone.metadata.dirs /data/disk1/meta -``` + ``` -* _*ozone.scm.names*_ Ozone is build on top of container framework. Storage +1. **ozone.scm.names** Ozone is build on top of container framework. Storage container manager(SCM) is a distributed block service which is used by ozone and other storage services. This property allows datanodes to discover where SCM is, so that @@ -136,129 +162,105 @@ place and not mingled with HDFS settings. and datanodes assume there are multiple instances of SCM which form a highly available ring. The HA feature of SCM is a work in progress. So we configure ozone.scm.names to be a single machine. Here is an example, - -``` + ``` ozone.scm.names scm.hadoop.apache.org -``` + ``` -* _*ozone.scm.datanode.id*_ Each datanode that speaks to SCM generates an ID -just like HDFS. This is an optional setting. Please note: +1. **ozone.scm.datanode.id** Each datanode that speaks to SCM generates an ID +just like HDFS. This is a mandatory setting. Please note: This path will be created by datanodes if it doesn't exist already. Here is an example, - -``` + ``` ozone.scm.datanode.id /data/disk1/scm/meta/node/datanode.id -``` + ``` -* _*ozone.scm.block.client.address*_ Storage Container Manager(SCM) offers a +1. **ozone.scm.block.client.address** Storage Container Manager(SCM) offers a set of services that can be used to build a distributed storage system. One - of the services offered is the block services. KSM and HDFS would use this - service. This property describes where KSM can discover SCM's block service + of the services offered is the block services. OM and HDFS would use this + service. This property describes where OM can discover SCM's block service endpoint. There is corresponding ports etc, but assuming that we are using default ports, the server address is the only required field. Here is an example, - -``` + ``` ozone.scm.block.client.address scm.hadoop.apache.org -``` + ``` -* _*ozone.ksm.address*_ KSM server address. This is used by Ozonehandler and +1. **ozone.ksm.address** OM server address. This is used by OzoneClient and Ozone File System. - -``` + ``` ozone.ksm.address ksm.hadoop.apache.org -``` + ``` -* _*dfs.datanode.plugin*_ Datanode service plugins: the container manager part - of ozone is running inside the datanode as a service plugin. To activate ozone - you should define the service plugin implementation class. **Important** - It should be added to the **hdfs-site.xml** as the plugin should be activated - as part of the normal HDFS Datanode bootstrap. - -``` - - dfs.datanode.plugins - org.apache.hadoop.ozone.HddsDatanodeService - -``` - -Here is a quick summary of settings needed by Ozone. +#### Ozone Settings Summary | Setting | Value | Comment | |--------------------------------|------------------------------|------------------------------------------------------------------| | ozone.enabled | True | This enables SCM and containers in HDFS cluster. | | ozone.metadata.dirs | file path | The metadata will be stored here. | | ozone.scm.names | SCM server name | Hostname:port or or IP:port address of SCM. | -| ozone.scm.block.client.address | SCM server name and port | Used by services like KSM | +| ozone.scm.block.client.address | SCM server name and port | Used by services like OM | | ozone.scm.client.address | SCM server name and port | Used by client side | | ozone.scm.datanode.address | SCM server name and port | Used by datanode to talk to SCM | -| ozone.ksm.address | KSM server name | Used by Ozone handler and Ozone file system. | +| ozone.ksm.address | OM server name | Used by Ozone handler and Ozone file system. | - Here is a working example of`ozone-site.xml`. + +#### Sample ozone-site.xml ``` - - - - - ozone.enabled - True - - - - ozone.metadata.dirs - /data/disk1/ozone/meta - - - - ozone.scm.names - 127.0.0.1 - - - - ozone.scm.client.address - 127.0.0.1:9860 - - - - ozone.scm.block.client.address - 127.0.0.1:9863 - - - - ozone.scm.datanode.address - 127.0.0.1:9861 - - - - ozone.ksm.address - 127.0.0.1:9874 - - -``` - -And don't forget to enable the datanode component with adding the -following configuration to the hdfs-site.xml: - -``` - - dfs.datanode.plugins - org.apache.hadoop.ozone.HddsDatanodeService + + + + + ozone.enabled + True + + + ozone.metadata.dirs + /data/disk1/ozone/meta + + + + ozone.scm.names + 127.0.0.1 + + + + ozone.scm.client.address + 127.0.0.1:9860 + + + + ozone.scm.block.client.address + 127.0.0.1:9863 + + + + ozone.scm.datanode.address + 127.0.0.1:9861 + + + + ozone.ksm.address + 127.0.0.1:9874 + + ``` + + ### Starting Ozone Ozone is designed to run concurrently with HDFS. The simplest way to [start @@ -270,35 +272,40 @@ is running, please verify it is fully functional by running some commands like - *./hdfs dfs -ls /* Once you are sure that HDFS is running, start Ozone. To start ozone, you - need to start SCM and KSM. Currently we assume that both KSM and SCM - is running on the same node, this will change in future. + need to start SCM and OM. - The first time you bring up Ozone, SCM must be initialized. +The first time you bring up Ozone, SCM must be initialized. +``` +ozone scm -init +``` - - `./ozone scm -init` +Start SCM. +``` +ozone --daemon start scm +``` - Start SCM. +Once SCM gets started, OM must be initialized. +``` +ozone ksm -createObjectStore +``` - - `./ozone --daemon start scm` +Start OM. +``` +ozone --daemon start ksm +``` - Once SCM gets started, KSM must be initialized. - - - `./ozone ksm -createObjectStore` - - Start KSM. - - - `./ozone --daemon start ksm` - -if you would like to start HDFS and Ozone together, you can do that by running +If you would like to start HDFS and Ozone together, you can do that by running a single command. - - `$HADOOP/sbin/start-ozone.sh` +``` +$HADOOP/sbin/start-ozone.sh +``` - This command will start HDFS and then start the ozone components. +This command will start HDFS and then start the ozone components. - Once you have ozone running you can use these ozone [shell](./OzoneCommandShell.html) - commands to create a volume, bucket and keys. +Once you have ozone running you can use these ozone [shell](./OzoneCommandShell.html) +commands to start creating a volume, bucket and keys. -### Diagnosing issues +## Diagnosing issues Ozone tries not to pollute the existing HDFS streams of configuration and logging. So ozone logs are by default configured to be written to a file @@ -337,16 +344,18 @@ Here is the log4j properties that are added by ozone. If you would like to have a single datanode log instead of ozone stuff getting written to ozone.log, please remove this line or set this to true. +``` +log4j.additivity.org.apache.hadoop.ozone=false +``` - ` log4j.additivity.org.apache.hadoop.ozone=false` +On the SCM/OM side, you will be able to see +1. `hadoop-hdfs-ksm-hostname.log` +1. `hadoop-hdfs-scm-hostname.log` -On the SCM/KSM side, you will be able to see - - - `hadoop-hdfs-ksm-hostname.log` - - `hadoop-hdfs-scm-hostname.log` - -Please file any issues you see under the related issues: +## Reporting Bugs +Please file any issues you see under [Apache HDDS Project Jira](https://issues.apache.org/jira/projects/HDDS/issues/). +## References - [Object store in HDFS: HDFS-7240](https://issues.apache.org/jira/browse/HDFS-7240) - [Ozone File System: HDFS-13074](https://issues.apache.org/jira/browse/HDFS-13074) - [Building HDFS on top of new storage layer (HDDS): HDFS-10419](https://issues.apache.org/jira/browse/HDFS-10419) From 78761e87a7f3012ef2d96e294d55b323b76b7c42 Mon Sep 17 00:00:00 2001 From: Anu Engineer Date: Thu, 7 Jun 2018 14:35:22 -0700 Subject: [PATCH 067/113] HDDS-127. Add CloseContainerEventHandler in SCM. Contributed by Shashikant Banerjee. --- .../container/CloseContainerEventHandler.java | 83 ++++++++ .../hdds/scm/container/ContainerMapping.java | 5 + .../scm/container/ContainerStateManager.java | 9 + .../hadoop/hdds/scm/container/Mapping.java | 6 + .../TestCloseContainerEventHandler.java | 177 ++++++++++++++++++ 5 files changed, 280 insertions(+) create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java new file mode 100644 index 00000000000..bc95b553f64 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ *

http://www.apache.org/licenses/LICENSE-2.0 + *

+ *

Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hdds.scm.container; + +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.server.events.EventHandler; +import org.apache.hadoop.hdds.server.events.EventPublisher; +import org.apache.hadoop.hdds.server.events.TypedEvent; +import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * In case of a node failure, volume failure, volume out of spapce, node + * out of space etc, CLOSE_CONTAINER_EVENT will be triggered. + * CloseContainerEventHandler is the handler for CLOSE_CONTAINER_EVENT. + * When a close container event is fired, a close command for the container + * should be sent to all the datanodes in the pipeline and containerStateManager + * needs to update the container state to Closing. + */ +public class CloseContainerEventHandler implements EventHandler { + + public static final Logger LOG = + LoggerFactory.getLogger(CloseContainerEventHandler.class); + + public static final TypedEvent CLOSE_CONTAINER_EVENT = + new TypedEvent<>(ContainerID.class); + + private final Mapping containerManager; + + public CloseContainerEventHandler(Mapping containerManager) { + this.containerManager = containerManager; + } + + @Override + public void onMessage(ContainerID containerID, EventPublisher publisher) { + + LOG.info("Close container Event triggered for container : {}", + containerID.getId()); + ContainerStateManager stateManager = containerManager.getStateManager(); + ContainerInfo info = stateManager.getContainer(containerID); + if (info == null) { + LOG.info("Container with id : {} does not exist", containerID.getId()); + return; + } + if (info.getState() == HddsProtos.LifeCycleState.OPEN) { + for (DatanodeDetails datanode : info.getPipeline().getMachines()) { + containerManager.getNodeManager().addDatanodeCommand(datanode.getUuid(), + new CloseContainerCommand(containerID.getId())); + } + try { + // Finalize event will make sure the state of the container transitions + // from OPEN to CLOSING in containerStateManager. + stateManager + .updateContainerState(info, HddsProtos.LifeCycleEvent.FINALIZE); + } catch (SCMException ex) { + LOG.error("Failed to update the container state for container : {}" + + containerID); + } + } else { + LOG.info("container with id : {} is in {} state and need not be closed.", + containerID.getId(), info.getState()); + } + + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java index f5fe46a3a53..b961c38c133 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java @@ -588,6 +588,11 @@ public class ContainerMapping implements Mapping { } } + @Override + public NodeManager getNodeManager() { + return nodeManager; + } + @VisibleForTesting public MetadataStore getContainerStore() { return containerStore; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java index 4895b786f92..9dfa660fd16 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java @@ -445,6 +445,15 @@ public class ContainerStateManager implements Closeable { factor, type); } + /** + * Returns the containerInfo for the given container id. + * @param containerID id of the container + * @return ContainerInfo containerInfo + * @throws IOException + */ + public ContainerInfo getContainer(ContainerID containerID) { + return containers.getContainerInfo(containerID.getId()); + } @Override public void close() throws IOException { } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java index ee8e344d0df..ab425205fac 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; +import org.apache.hadoop.hdds.scm.node.NodeManager; import java.io.Closeable; import java.io.IOException; @@ -103,4 +104,9 @@ public interface Mapping extends Closeable { ContainerReportsProto reports) throws IOException; + /** + * Returns the nodeManager. + * @return NodeManager + */ + NodeManager getNodeManager(); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java new file mode 100644 index 00000000000..09ade3ea630 --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java @@ -0,0 +1,177 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.container; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.server.events.EventQueue; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.container.common.SCMTestUtils; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.util.Random; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.CREATE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.CREATED; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_GB; + +/** + * Tests the closeContainerEventHandler class. + */ +public class TestCloseContainerEventHandler { + + private static Configuration configuration; + private static MockNodeManager nodeManager; + private static ContainerMapping mapping; + private static long size; + private static File testDir; + private static EventQueue eventQueue; + + @BeforeClass + public static void setUp() throws Exception { + configuration = SCMTestUtils.getConf(); + size = configuration + .getLong(OZONE_SCM_CONTAINER_SIZE_GB, OZONE_SCM_CONTAINER_SIZE_DEFAULT) + * 1024 * 1024 * 1024; + testDir = GenericTestUtils + .getTestDir(TestCloseContainerEventHandler.class.getSimpleName()); + configuration + .set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); + nodeManager = new MockNodeManager(true, 10); + mapping = new ContainerMapping(configuration, nodeManager, 128); + eventQueue = new EventQueue(); + eventQueue.addHandler(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT, + new CloseContainerEventHandler(mapping)); + } + + @AfterClass + public static void tearDown() throws Exception { + if (mapping != null) { + mapping.close(); + } + FileUtil.fullyDelete(testDir); + } + + @Test + public void testIfCloseContainerEventHadnlerInvoked() { + GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer + .captureLogs(CloseContainerEventHandler.LOG); + eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT, + new ContainerID(Math.abs(new Random().nextLong()))); + eventQueue.processAll(1000); + Assert.assertTrue(logCapturer.getOutput() + .contains("Close container Event triggered for container")); + } + + @Test + public void testCloseContainerEventWithInvalidContainer() { + long id = Math.abs(new Random().nextLong()); + GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer + .captureLogs(CloseContainerEventHandler.LOG); + eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT, + new ContainerID(id)); + eventQueue.processAll(1000); + Assert.assertTrue(logCapturer.getOutput() + .contains("Container with id : " + id + " does not exist")); + } + + @Test + public void testCloseContainerEventWithValidContainers() throws IOException { + + GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer + .captureLogs(CloseContainerEventHandler.LOG); + ContainerInfo info = mapping + .allocateContainer(HddsProtos.ReplicationType.STAND_ALONE, + HddsProtos.ReplicationFactor.ONE, "ozone"); + ContainerID id = new ContainerID(info.getContainerID()); + DatanodeDetails datanode = info.getPipeline().getLeader(); + int closeCount = nodeManager.getCommandCount(datanode); + eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT, id); + eventQueue.processAll(1000); + // At this point of time, the allocated container is not in open + // state, so firing close container event should not queue CLOSE + // command in the Datanode + Assert.assertEquals(0, nodeManager.getCommandCount(datanode)); + // Make sure the information is logged + Assert.assertTrue(logCapturer.getOutput().contains( + "container with id : " + id.getId() + + " is in ALLOCATED state and need not be closed")); + //Execute these state transitions so that we can close the container. + mapping.updateContainerState(id.getId(), CREATE); + mapping.updateContainerState(id.getId(), CREATED); + eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT, + new ContainerID(info.getContainerID())); + eventQueue.processAll(1000); + Assert.assertEquals(closeCount + 1, nodeManager.getCommandCount(datanode)); + Assert.assertEquals(HddsProtos.LifeCycleState.CLOSING, + mapping.getStateManager().getContainer(id).getState()); + } + + @Test + public void testCloseContainerEventWithRatis() throws IOException { + + GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer + .captureLogs(CloseContainerEventHandler.LOG); + ContainerInfo info = mapping + .allocateContainer(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE, "ozone"); + ContainerID id = new ContainerID(info.getContainerID()); + int[] closeCount = new int[3]; + eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT, id); + eventQueue.processAll(1000); + int i = 0; + for (DatanodeDetails details : info.getPipeline().getMachines()) { + closeCount[i] = nodeManager.getCommandCount(details); + i++; + } + i = 0; + for (DatanodeDetails details : info.getPipeline().getMachines()) { + Assert.assertEquals(closeCount[i], nodeManager.getCommandCount(details)); + i++; + } + // Make sure the information is logged + Assert.assertTrue(logCapturer.getOutput().contains( + "container with id : " + id.getId() + + " is in ALLOCATED state and need not be closed")); + //Execute these state transitions so that we can close the container. + mapping.updateContainerState(id.getId(), CREATE); + mapping.updateContainerState(id.getId(), CREATED); + eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT, + new ContainerID(info.getContainerID())); + eventQueue.processAll(1000); + i = 0; + // Make sure close is queued for each datanode on the pipeline + for (DatanodeDetails details : info.getPipeline().getMachines()) { + Assert.assertEquals(closeCount[i] + 1, + nodeManager.getCommandCount(details)); + Assert.assertEquals(HddsProtos.LifeCycleState.CLOSING, + mapping.getStateManager().getContainer(id).getState()); + i++; + } + } +} From 67fc70e09f941e9b43b022d9f42a9486ad759e6e Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Thu, 7 Jun 2018 16:10:15 -0700 Subject: [PATCH 068/113] YARN-8400. Fix typos in YARN Federation documentation page. Contributed by Giovanni Matteo Fumarola. --- .../src/site/markdown/Federation.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md index 087a5b01268..953f8262ebf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md @@ -42,7 +42,7 @@ The applications running in this federated environment see a unified large YARN ![YARN Federation Architecture | width=800](./images/federation_architecture.png) ###YARN Sub-cluster -A sub-cluster is a YARN cluster with up to few thousands nodes. The exact size of the sub-cluster will be determined considering ease of deployment/maintenance, alignment +A sub-cluster is a YARN cluster with up to a few thousand nodes. The exact size of the sub-cluster will be determined considering ease of deployment/maintenance, alignment with network or availability zones and general best practices. The sub-cluster YARN RM will run with work-preserving high-availability turned-on, i.e., we should be able to tolerate YARN RM, NM failures with minimal disruption. @@ -80,7 +80,7 @@ to minimize overhead on the scheduling infrastructure (more in section on scalab ###Global Policy Generator Global Policy Generator overlooks the entire federation and ensures that the system is configured and tuned properly all the time. -A key design point is that the cluster availability does not depends on an always-on GPG. The GPG operates continuously but out-of-band from all cluster operations, +A key design point is that the cluster availability does not depend on an always-on GPG. The GPG operates continuously but out-of-band from all cluster operations, and provide us with a unique vantage point, that allows to enforce global invariants, affect load balancing, trigger draining of sub-clusters that will undergo maintenance, etc. More precisely the GPG will update user capacity allocation-to-subcluster mappings, and more rarely change the policies that run in Routers, AMRMProxy (and possible RMs). @@ -111,7 +111,7 @@ on the home sub-cluster. Only in certain cases it should need to ask for resourc The federation Policy Store is a logically separate store (while it might be backed by the same physical component), which contains information about how applications and resource requests are routed to different sub-clusters. The current implementation provides -several policies, ranging from random/hashing/roundrobin/priority to more sophisticated +several policies, ranging from random/hashing/round-robin/priority to more sophisticated ones which account for sub-cluster load, and request locality needs. @@ -218,7 +218,7 @@ SQL-Server scripts are located in **sbin/FederationStateStore/SQLServer/**. |`yarn.federation.policy-manager` | `org.apache.hadoop.yarn.server.federation.policies.manager.WeightedLocalityPolicyManager` | The choice of policy manager determines how Applications and ResourceRequests are routed through the system. | |`yarn.federation.policy-manager-params` | `` | The payload that configures the policy. In our example a set of weights for router and amrmproxy policies. This is typically generated by serializing a policymanager that has been configured programmatically, or by populating the state-store with the .json serialized form of it. | |`yarn.federation.subcluster-resolver.class` | `org.apache.hadoop.yarn.server.federation.resolver.DefaultSubClusterResolverImpl` | The class used to resolve which subcluster a node belongs to, and which subcluster(s) a rack belongs to. | -|`yarn.federation.machine-list` | `` | Path of machine-list file used by `SubClusterResolver`. Each line of the file is a node with sub-cluster and rack information. Below is the example:

node1, subcluster1, rack1
node2, subcluster2, rack1
node3, subcluster3, rack2
node4, subcluster3, rack2 | +|`yarn.federation.machine-list` | `` | Path of machine-list file used by `SubClusterResolver`. Each line of the file is a node with sub-cluster and rack information. Below is the example:

node1, subcluster1, rack1
node2, subcluster2, rack1
node3, subcluster3, rack2
node4, subcluster3, rack2 | ###ON RMs: @@ -242,7 +242,7 @@ These are extra configurations that should appear in the **conf/yarn-site.xml** | Property | Example | Description | |:---- |:---- | |`yarn.router.bind-host` | `0.0.0.0` | Host IP to bind the router to. The actual address the server will bind to. If this optional address is set, the RPC and webapp servers will bind to this address and the port specified in yarn.router.*.address respectively. This is most useful for making Router listen to all interfaces by setting to 0.0.0.0. | -| `yarn.router.clientrm.interceptor-class.pipeline` | `org.apache.hadoop.yarn.server.router.clientrm.FederationClientInterceptor` | A comma-seperated list of interceptor classes to be run at the router when interfacing with the client. The last step of this pipeline must be the Federation Client Interceptor. | +| `yarn.router.clientrm.interceptor-class.pipeline` | `org.apache.hadoop.yarn.server.router.clientrm.FederationClientInterceptor` | A comma-separated list of interceptor classes to be run at the router when interfacing with the client. The last step of this pipeline must be the Federation Client Interceptor. | Optional: @@ -273,13 +273,13 @@ Optional: | Property | Example | Description | |:---- |:---- | -| `yarn.nodemanager.amrmproxy.ha.enable` | `true` | Whether or not the AMRMProxy HA is enabled for multiple application attempt suppport. | +| `yarn.nodemanager.amrmproxy.ha.enable` | `true` | Whether or not the AMRMProxy HA is enabled for multiple application attempt support. | | `yarn.federation.statestore.max-connections` | `1` | The maximum number of parallel connections from each AMRMProxy to the state-store. This value is typically lower than the router one, since we have many AMRMProxy that could burn-through many DB connections quickly. | | `yarn.federation.cache-ttl.secs` | `300` | The time to leave for the AMRMProxy cache. Typically larger than at the router, as the number of AMRMProxy is large, and we want to limit the load to the centralized state-store. | Running a Sample Job -------------------- -In order to submit jobs to a Federation cluster one must create a seperate set of configs for the client from which jobs will be submitted. In these, the **conf/yarn-site.xml** should have the following additional configurations: +In order to submit jobs to a Federation cluster one must create a separate set of configs for the client from which jobs will be submitted. In these, the **conf/yarn-site.xml** should have the following additional configurations: | Property | Example | Description | |:--- |:--- | From d5eca1a6a0e3939eead6711805b7a61c364d254b Mon Sep 17 00:00:00 2001 From: Miklos Szegedi Date: Thu, 7 Jun 2018 14:58:56 -0700 Subject: [PATCH 069/113] YARN-6677. Preempt opportunistic containers when root container cgroup goes over memory limit. Contributed by Haibo Chen. --- .../containermanager/container/Container.java | 8 + .../container/ContainerImpl.java | 5 + .../linux/resources/DefaultOOMHandler.java | 249 ++-- .../resources/TestDefaultOOMHandler.java | 1108 ++++++++++++++--- .../nodemanager/webapp/MockContainer.java | 5 + 5 files changed, 1082 insertions(+), 293 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java index 86f2554af7d..5d48d8486b6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java @@ -37,8 +37,16 @@ public interface Container extends EventHandler { ContainerId getContainerId(); + /** + * The timestamp when the container start request is received. + */ long getContainerStartTime(); + /** + * The timestamp when the container is allowed to be launched. + */ + long getContainerLaunchTime(); + Resource getResource(); ContainerTokenIdentifier getContainerTokenIdentifier(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java index 5527ac4a12c..95ab37408a0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java @@ -882,6 +882,11 @@ public class ContainerImpl implements Container { return this.startTime; } + @Override + public long getContainerLaunchTime() { + return this.containerLaunchStartTime; + } + @Override public Resource getResource() { return Resources.clone( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java index c6902258514..202e7d0176e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - *

+ * * http://www.apache.org/licenses/LICENSE-2.0 - *

+ * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -18,10 +18,12 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; import com.google.common.annotations.VisibleForTesting; +import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.yarn.api.records.ExecutionType; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.Context; @@ -30,7 +32,7 @@ import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext import java.io.IOException; import java.util.ArrayList; -import java.util.Comparator; +import java.util.Collections; import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler.CGROUP_FILE_TASKS; import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler.CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES; @@ -46,66 +48,60 @@ import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r public class DefaultOOMHandler implements Runnable { protected static final Log LOG = LogFactory .getLog(DefaultOOMHandler.class); - private Context context; - private boolean virtual; - private CGroupsHandler cgroups; + private final Context context; + private final String memoryStatFile; + private final CGroupsHandler cgroups; /** * Create an OOM handler. * This has to be public to be able to construct through reflection. * @param context node manager context to work with - * @param testVirtual Test virtual memory or physical + * @param enforceVirtualMemory true if virtual memory needs to be checked, + * false if physical memory needs to be checked instead */ - public DefaultOOMHandler(Context context, boolean testVirtual) { + public DefaultOOMHandler(Context context, boolean enforceVirtualMemory) { this.context = context; - this.virtual = testVirtual; - this.cgroups = ResourceHandlerModule.getCGroupsHandler(); + this.memoryStatFile = enforceVirtualMemory ? + CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES : + CGROUP_PARAM_MEMORY_USAGE_BYTES; + this.cgroups = getCGroupsHandler(); } @VisibleForTesting - void setCGroupsHandler(CGroupsHandler handler) { - cgroups = handler; + protected CGroupsHandler getCGroupsHandler() { + return ResourceHandlerModule.getCGroupsHandler(); } /** - * Kill the container, if it has exceeded its request. - * - * @param container Container to check - * @param fileName CGroup filename (physical or swap/virtual) - * @return true, if the container was preempted + * Check if a given container exceeds its limits. */ - private boolean killContainerIfOOM(Container container, String fileName) { + private boolean isContainerOutOfLimit(Container container) { + boolean outOfLimit = false; + String value = null; try { value = cgroups.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, - container.getContainerId().toString(), - fileName); + container.getContainerId().toString(), memoryStatFile); long usage = Long.parseLong(value); long request = container.getResource().getMemorySize() * 1024 * 1024; // Check if the container has exceeded its limits. if (usage > request) { - // Kill the container - // We could call the regular cleanup but that sends a - // SIGTERM first that cannot be handled by frozen processes. - // Walk through the cgroup - // tasks file and kill all processes in it - sigKill(container); + outOfLimit = true; String message = String.format( - "Container %s was killed by elastic cgroups OOM handler using %d " + + "Container %s is out of its limits, using %d " + "when requested only %d", container.getContainerId(), usage, request); LOG.warn(message); - return true; } } catch (ResourceHandlerException ex) { LOG.warn(String.format("Could not access memory resource for %s", container.getContainerId()), ex); } catch (NumberFormatException ex) { - LOG.warn(String.format("Could not parse %s in %s", - value, container.getContainerId())); + LOG.warn(String.format("Could not parse %s in %s", value, + container.getContainerId())); } - return false; + return outOfLimit; } /** @@ -168,21 +164,16 @@ public class DefaultOOMHandler implements Runnable { /** * It is called when the node is under an OOM condition. All processes in * all sub-cgroups are suspended. We need to act fast, so that we do not - * affect the overall system utilization. - * In general we try to find a newly run container that exceeded its limits. - * The justification is cost, since probably this is the one that has - * accumulated the least amount of uncommitted data so far. - * We continue the process until the OOM is resolved. + * affect the overall system utilization. In general we try to find a + * newly launched container that exceeded its limits. The justification is + * cost, since probably this is the one that has accumulated the least + * amount of uncommitted data so far. OPPORTUNISTIC containers are always + * killed before any GUARANTEED containers are considered. We continue the + * process until the OOM is resolved. */ @Override public void run() { try { - // Reverse order by start time - Comparator comparator = (Container o1, Container o2) -> { - long order = o1.getContainerStartTime() - o2.getContainerStartTime(); - return order > 0 ? -1 : order < 0 ? 1 : 0; - }; - // We kill containers until the kernel reports the OOM situation resolved // Note: If the kernel has a delay this may kill more than necessary while (true) { @@ -194,61 +185,135 @@ public class DefaultOOMHandler implements Runnable { break; } - // The first pass kills a recent container - // that uses more than its request - ArrayList containers = new ArrayList<>(); - containers.addAll(context.getContainers().values()); - // Note: Sorting may take a long time with 10K+ containers - // but it is acceptable now with low number of containers per node - containers.sort(comparator); + boolean containerKilled = killContainer(); - // Kill the latest container that exceeded its request - boolean found = false; - for (Container container : containers) { - if (!virtual) { - if (killContainerIfOOM(container, - CGROUP_PARAM_MEMORY_USAGE_BYTES)) { - found = true; - break; - } - } else { - if (killContainerIfOOM(container, - CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) { - found = true; - break; - } - } + if (!containerKilled) { + // This can happen, if SIGKILL did not clean up + // non-PGID or containers or containers launched by other users + // or if a process was put to the root YARN cgroup. + throw new YarnRuntimeException( + "Could not find any containers but CGroups " + + "reserved for containers ran out of memory. " + + "I am giving up"); } - if (found) { - continue; - } - - // We have not found any containers that ran out of their limit, - // so we will kill the latest one. This can happen, if all use - // close to their request and one of them requests a big block - // triggering the OOM freeze. - // Currently there is no other way to identify the outstanding one. - if (containers.size() > 0) { - Container container = containers.get(0); - sigKill(container); - String message = String.format( - "Newest container %s killed by elastic cgroups OOM handler using", - container.getContainerId()); - LOG.warn(message); - continue; - } - - // This can happen, if SIGKILL did not clean up - // non-PGID or containers or containers launched by other users - // or if a process was put to the root YARN cgroup. - throw new YarnRuntimeException( - "Could not find any containers but CGroups " + - "reserved for containers ran out of memory. " + - "I am giving up"); } } catch (ResourceHandlerException ex) { - LOG.warn("Could not fecth OOM status. " + + LOG.warn("Could not fetch OOM status. " + "This is expected at shutdown. Exiting.", ex); } } + + /** + * Choose and kill a container in case of OOM. We try to find the most + * recently launched OPPORTUNISTIC container that exceeds its limit + * and fall back to the most recently launched OPPORTUNISTIC container + * If there is no such container found, we choose to kill a GUARANTEED + * container in the same way. + * @return true if a container is killed, false otherwise + */ + protected boolean killContainer() { + boolean containerKilled = false; + + ArrayList candidates = new ArrayList<>(0); + for (Container container : context.getContainers().values()) { + candidates.add( + new ContainerCandidate(container, isContainerOutOfLimit(container))); + } + Collections.sort(candidates); + + if (candidates.size() > 0) { + ContainerCandidate candidate = candidates.get(0); + sigKill(candidate.container); + String message = String.format( + "container %s killed by elastic cgroups OOM handler.", + candidate.container.getContainerId()); + LOG.warn(message); + containerKilled = true; + } + return containerKilled; + } + + /** + * Note: this class has a natural ordering that is inconsistent with equals. + */ + private static class ContainerCandidate + implements Comparable { + private final boolean outOfLimit; + final Container container; + + ContainerCandidate(Container container, boolean outOfLimit) { + this.outOfLimit = outOfLimit; + this.container = container; + } + + /** + * Order two containers by their execution type, followed by + * their out-of-limit status and then launch time. Opportunistic + * containers are ordered before Guaranteed containers. If two + * containers are of the same execution type, the one that is + * out of its limits is ordered before the one that isn't. If + * two containers have the same execution type and out-of-limit + * status, the one that's launched later is ordered before the + * other one. + */ + @Override + public int compareTo(ContainerCandidate o) { + boolean isThisOpportunistic = isOpportunistic(container); + boolean isOtherOpportunistic = isOpportunistic(o.container); + int ret = Boolean.compare(isOtherOpportunistic, isThisOpportunistic); + if (ret == 0) { + // the two containers are of the same execution type, order them + // by their out-of-limit status. + int outOfLimitRet = Boolean.compare(o.outOfLimit, outOfLimit); + if (outOfLimitRet == 0) { + // the two containers are also of the same out-of-limit status, + // order them by their launch time + ret = Long.compare(o.container.getContainerLaunchTime(), + this.container.getContainerLaunchTime()); + } else { + ret = outOfLimitRet; + } + } + return ret; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (this.getClass() != obj.getClass()) { + return false; + } + ContainerCandidate other = (ContainerCandidate) obj; + if (this.outOfLimit != other.outOfLimit) { + return false; + } + if (this.container == null) { + return other.container == null; + } else { + return this.container.equals(other.container); + } + } + + @Override + public int hashCode() { + return new HashCodeBuilder().append(container).append(outOfLimit) + .toHashCode(); + } + + /** + * Check if a container is OPPORTUNISTIC or not. A container is + * considered OPPORTUNISTIC only if its execution type is not + * null and is OPPORTUNISTIC. + */ + private static boolean isOpportunistic(Container container) { + return container.getContainerTokenIdentifier() != null && + ExecutionType.OPPORTUNISTIC.equals( + container.getContainerTokenIdentifier().getExecutionType()); + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestDefaultOOMHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestDefaultOOMHandler.java index 60c38fef648..e2390678328 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestDefaultOOMHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestDefaultOOMHandler.java @@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resourc import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ExecutionType; import org.apache.hadoop.yarn.api.records.Resource; -import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor; @@ -31,14 +31,12 @@ import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext import org.junit.Test; import java.io.IOException; -import java.util.LinkedHashMap; import java.util.concurrent.ConcurrentHashMap; import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler.CGROUP_FILE_TASKS; import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler.CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES; import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler.CGROUP_PARAM_MEMORY_OOM_CONTROL; import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler.CGROUP_PARAM_MEMORY_USAGE_BYTES; -import static org.junit.Assert.assertEquals; import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; @@ -51,13 +49,13 @@ import static org.mockito.Mockito.when; public class TestDefaultOOMHandler { /** - * Test an OOM situation where no containers are running. + * Test an OOM situation where there are no containers that can be killed. */ @Test(expected = YarnRuntimeException.class) - public void testNoContainers() throws Exception { + public void testExceptionThrownWithNoContainersToKill() throws Exception { Context context = mock(Context.class); - when(context.getContainers()).thenReturn(new ConcurrentHashMap<>()); + when(context.getContainers()).thenReturn(new ConcurrentHashMap<>(0)); CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class); when(cGroupsHandler.getCGroupParam( @@ -66,222 +64,902 @@ public class TestDefaultOOMHandler { CGROUP_PARAM_MEMORY_OOM_CONTROL)) .thenReturn("under_oom 1").thenReturn("under_oom 0"); - DefaultOOMHandler handler = new DefaultOOMHandler(context, false); - handler.setCGroupsHandler(cGroupsHandler); + DefaultOOMHandler handler = new DefaultOOMHandler(context, false) { + @Override + protected CGroupsHandler getCGroupsHandler() { + return cGroupsHandler; + } + }; handler.run(); } /** - * We have two containers, both out of limit. We should kill the later one. - * - * @throws Exception exception + * We have two guaranteed containers, both of which are out of limit. + * We should kill the later one. */ @Test - public void testBothContainersOOM() throws Exception { + public void testBothGuaranteedContainersOverLimitUponOOM() throws Exception { ConcurrentHashMap containers = - new ConcurrentHashMap<>(new LinkedHashMap<>()); + new ConcurrentHashMap<>(); + Container c1 = createContainer(1, true, 1L); + containers.put(c1.getContainerId(), c1); + Container c2 = createContainer(2, true, 2L); + containers.put(c2.getContainerId(), c2); - Container c1 = mock(Container.class); - ContainerId cid1 = createContainerId(1); - when(c1.getContainerId()).thenReturn(cid1); - when(c1.getResource()).thenReturn(Resource.newInstance(10, 1)); - when(c1.getContainerStartTime()).thenReturn((long) 1); - containers.put(createContainerId(1), c1); - - Container c2 = mock(Container.class); - ContainerId cid2 = createContainerId(2); - when(c2.getContainerId()).thenReturn(cid2); - when(c2.getResource()).thenReturn(Resource.newInstance(10, 1)); - when(c2.getContainerStartTime()).thenReturn((long) 2); - containers.put(cid2, c2); - - CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class); - when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, - cid1.toString(), CGROUP_FILE_TASKS)) - .thenReturn("1234").thenReturn(""); - when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, - cid1.toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) - .thenReturn(getMB(11)); - when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, - cid1.toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) - .thenReturn(getMB(11)); - when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, - cid2.toString(), CGROUP_FILE_TASKS)) - .thenReturn("1235").thenReturn(""); - when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, - cid2.toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) - .thenReturn(getMB(11)); - when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, - cid2.toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) - .thenReturn(getMB(11)); - - ContainerExecutor ex = mock(ContainerExecutor.class); - - runOOMHandler(containers, cGroupsHandler, ex); - - verify(ex, times(1)).signalContainer( - new ContainerSignalContext.Builder() - .setPid("1235") - .setContainer(c2) - .setSignal(ContainerExecutor.Signal.KILL) - .build() - ); - verify(ex, times(1)).signalContainer(any()); - } - - /** - * We have two containers, one out of limit. We should kill that one. - * This should happen even, if it was started earlier - * - * @throws Exception exception - */ - @Test - public void testOneContainerOOM() throws Exception { - ConcurrentHashMap containers = - new ConcurrentHashMap<>(new LinkedHashMap<>()); - - Container c1 = mock(Container.class); - ContainerId cid1 = createContainerId(1); - when(c1.getContainerId()).thenReturn(cid1); - when(c1.getResource()).thenReturn(Resource.newInstance(10, 1)); - when(c1.getContainerStartTime()).thenReturn((long) 2); - containers.put(createContainerId(1), c1); - - Container c2 = mock(Container.class); - ContainerId cid2 = createContainerId(2); - when(c2.getContainerId()).thenReturn(cid2); - when(c2.getResource()).thenReturn(Resource.newInstance(10, 1)); - when(c2.getContainerStartTime()).thenReturn((long) 1); - containers.put(cid2, c2); - - CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class); - when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, - cid1.toString(), CGROUP_FILE_TASKS)) - .thenReturn("1234").thenReturn(""); - when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, - cid1.toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) - .thenReturn(getMB(9)); - when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, - cid1.toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) - .thenReturn(getMB(9)); - when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, - cid2.toString(), CGROUP_FILE_TASKS)) - .thenReturn("1235").thenReturn(""); - when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, - cid2.toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) - .thenReturn(getMB(11)); - when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, - cid2.toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) - .thenReturn(getMB(11)); - - ContainerExecutor ex = mock(ContainerExecutor.class); - runOOMHandler(containers, cGroupsHandler, ex); - - verify(ex, times(1)).signalContainer( - new ContainerSignalContext.Builder() - .setPid("1235") - .setContainer(c2) - .setSignal(ContainerExecutor.Signal.KILL) - .build() - ); - verify(ex, times(1)).signalContainer(any()); - } - - /** - * We have two containers, neither out of limit. We should kill the later one. - * - * @throws Exception exception - */ - @Test - public void testNoContainerOOM() throws Exception { - ConcurrentHashMap containers = - new ConcurrentHashMap<>(new LinkedHashMap<>()); - - Container c1 = mock(Container.class); - ContainerId cid1 = createContainerId(1); - when(c1.getContainerId()).thenReturn(cid1); - when(c1.getResource()).thenReturn(Resource.newInstance(10, 1)); - when(c1.getContainerStartTime()).thenReturn((long) 1); - containers.put(createContainerId(1), c1); - - Container c2 = mock(Container.class); - ContainerId cid2 = createContainerId(2); - when(c2.getContainerId()).thenReturn(cid2); - when(c2.getResource()).thenReturn(Resource.newInstance(10, 1)); - when(c2.getContainerStartTime()).thenReturn((long) 2); - containers.put(cid2, c2); - - CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class); - when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, - cid1.toString(), CGROUP_FILE_TASKS)) - .thenReturn("1234").thenReturn(""); - when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, - cid1.toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) - .thenReturn(getMB(9)); - when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, - cid1.toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) - .thenReturn(getMB(9)); - when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, - cid2.toString(), CGROUP_FILE_TASKS)) - .thenReturn("1235").thenReturn(""); - when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, - cid2.toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) - .thenReturn(getMB(9)); - when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, - cid2.toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) - .thenReturn(getMB(9)); - - ContainerExecutor ex = mock(ContainerExecutor.class); - runOOMHandler(containers, cGroupsHandler, ex); - - verify(ex, times(1)).signalContainer( - new ContainerSignalContext.Builder() - .setPid("1235") - .setContainer(c2) - .setSignal(ContainerExecutor.Signal.KILL) - .build() - ); - verify(ex, times(1)).signalContainer(any()); - } - - private void runOOMHandler( - ConcurrentHashMap containers, - CGroupsHandler cGroupsHandler, ContainerExecutor ex) - throws IOException, ResourceHandlerException { + ContainerExecutor ex = createContainerExecutor(containers); Context context = mock(Context.class); when(context.getContainers()).thenReturn(containers); + when(context.getContainerExecutor()).thenReturn(ex); - when(ex.signalContainer(any())) - .thenAnswer(invocation -> { - assertEquals("Wrong pid killed", "1235", - ((ContainerSignalContext) invocation.getArguments()[0]).getPid()); - return true; - }); + CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class); when(cGroupsHandler.getCGroupParam( CGroupsHandler.CGroupController.MEMORY, "", CGROUP_PARAM_MEMORY_OOM_CONTROL)) .thenReturn("under_oom 1").thenReturn("under_oom 0"); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1234").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(11)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(11)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1235").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(11)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(11)); + DefaultOOMHandler handler = + new DefaultOOMHandler(context, false) { + @Override + protected CGroupsHandler getCGroupsHandler() { + return cGroupsHandler; + } + }; + handler.run(); + + + verify(ex, times(1)).signalContainer( + new ContainerSignalContext.Builder() + .setPid("1235") + .setContainer(c2) + .setSignal(ContainerExecutor.Signal.KILL) + .build() + ); + verify(ex, times(1)).signalContainer(any()); + } + + /** + * We have two GUARANTEED containers, one of which is out of limit. + * We should kill the one that's out of its limit. This should + * happen even if it was launched earlier than the other one. + */ + @Test + public void testOneGuaranteedContainerOverLimitUponOOM() throws Exception { + ConcurrentHashMap containers = + new ConcurrentHashMap<>(); + Container c1 = createContainer(1, true, 2L); + containers.put(c1.getContainerId(), c1); + Container c2 = createContainer(2, true, 1L); + containers.put(c2.getContainerId(), c2); + + ContainerExecutor ex = createContainerExecutor(containers); + Context context = mock(Context.class); + when(context.getContainers()).thenReturn(containers); when(context.getContainerExecutor()).thenReturn(ex); - DefaultOOMHandler handler = new DefaultOOMHandler(context, false); - handler.setCGroupsHandler(cGroupsHandler); + CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class); + when(cGroupsHandler.getCGroupParam( + CGroupsHandler.CGroupController.MEMORY, + "", + CGROUP_PARAM_MEMORY_OOM_CONTROL)) + .thenReturn("under_oom 1").thenReturn("under_oom 0"); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1234").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(9)); + // container c2 is out of its limit + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1235").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(11)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(11)); + + DefaultOOMHandler handler = + new DefaultOOMHandler(context, false) { + @Override + protected CGroupsHandler getCGroupsHandler() { + return cGroupsHandler; + } + }; + handler.run(); + + verify(ex, times(1)).signalContainer( + new ContainerSignalContext.Builder() + .setPid("1235") + .setContainer(c2) + .setSignal(ContainerExecutor.Signal.KILL) + .build() + ); + verify(ex, times(1)).signalContainer(any()); + } + + /** + * We have two GUARANTEE containers, neither of which is out of limit. + * We should kill the later launched one. + */ + @Test + public void testNoGuaranteedContainerOverLimitOOM() throws Exception { + ConcurrentHashMap containers = + new ConcurrentHashMap<>(); + Container c1 = createContainer(1, true, 1L); + containers.put(c1.getContainerId(), c1); + Container c2 = createContainer(2, true, 2L); + containers.put(c2.getContainerId(), c2); + + ContainerExecutor ex = createContainerExecutor(containers); + Context context = mock(Context.class); + when(context.getContainers()).thenReturn(containers); + when(context.getContainerExecutor()).thenReturn(ex); + + CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class); + when(cGroupsHandler.getCGroupParam( + CGroupsHandler.CGroupController.MEMORY, + "", + CGROUP_PARAM_MEMORY_OOM_CONTROL)) + .thenReturn("under_oom 1").thenReturn("under_oom 0"); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1234").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1235").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(9)); + + DefaultOOMHandler handler = + new DefaultOOMHandler(context, false) { + @Override + protected CGroupsHandler getCGroupsHandler() { + return cGroupsHandler; + } + }; + handler.run(); + + verify(ex, times(1)).signalContainer( + new ContainerSignalContext.Builder() + .setPid("1235") + .setContainer(c2) + .setSignal(ContainerExecutor.Signal.KILL) + .build() + ); + verify(ex, times(1)).signalContainer(any()); + } + + /** + * We have two opportunistic containers, both of which are out of limit. + * We should kill the later one. + */ + @Test + public void testBothOpportunisticContainersOverLimitUponOOM() + throws Exception { + ConcurrentHashMap containers = + new ConcurrentHashMap<>(); + Container c1 = createContainer(1, false, 1L); + containers.put(c1.getContainerId(), c1); + Container c2 = createContainer(2, false, 2L); + containers.put(c2.getContainerId(), c2); + + ContainerExecutor ex = createContainerExecutor(containers); + Context context = mock(Context.class); + when(context.getContainers()).thenReturn(containers); + when(context.getContainerExecutor()).thenReturn(ex); + + + CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class); + when(cGroupsHandler.getCGroupParam( + CGroupsHandler.CGroupController.MEMORY, + "", + CGROUP_PARAM_MEMORY_OOM_CONTROL)) + .thenReturn("under_oom 1").thenReturn("under_oom 0"); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1234").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(11)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(11)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1235").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(11)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(11)); + + DefaultOOMHandler handler = + new DefaultOOMHandler(context, false) { + @Override + protected CGroupsHandler getCGroupsHandler() { + return cGroupsHandler; + } + }; + handler.run(); + + + verify(ex, times(1)).signalContainer( + new ContainerSignalContext.Builder() + .setPid("1235") + .setContainer(c2) + .setSignal(ContainerExecutor.Signal.KILL) + .build() + ); + verify(ex, times(1)).signalContainer(any()); + } + + /** + * We have two OPPORTUNISTIC containers, one of which is out of limit. + * We should kill the one that's out of its limit. This should + * happen even if it was launched earlier than the other one. + */ + @Test + public void testOneOpportunisticContainerOverLimitUponOOM() throws Exception { + ConcurrentHashMap containers = + new ConcurrentHashMap<>(); + Container c1 = createContainer(1, false, 2L); + containers.put(c1.getContainerId(), c1); + Container c2 = createContainer(2, false, 1L); + containers.put(c2.getContainerId(), c2); + + ContainerExecutor ex = createContainerExecutor(containers); + Context context = mock(Context.class); + when(context.getContainers()).thenReturn(containers); + when(context.getContainerExecutor()).thenReturn(ex); + + CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class); + when(cGroupsHandler.getCGroupParam( + CGroupsHandler.CGroupController.MEMORY, + "", + CGROUP_PARAM_MEMORY_OOM_CONTROL)) + .thenReturn("under_oom 1").thenReturn("under_oom 0"); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1234").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(9)); + // contnainer c2 is out of its limit + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1235").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(11)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(11)); + + DefaultOOMHandler handler = + new DefaultOOMHandler(context, false) { + @Override + protected CGroupsHandler getCGroupsHandler() { + return cGroupsHandler; + } + }; + handler.run(); + + verify(ex, times(1)).signalContainer( + new ContainerSignalContext.Builder() + .setPid("1235") + .setContainer(c2) + .setSignal(ContainerExecutor.Signal.KILL) + .build() + ); + verify(ex, times(1)).signalContainer(any()); + } + + /** + * We have two OPPORTUNISTIC containers, neither of which is out of limit. + * We should kill the later one. + */ + @Test + public void testNoOpportunisticContainerOverLimitOOM() throws Exception { + ConcurrentHashMap containers = + new ConcurrentHashMap<>(); + Container c1 = createContainer(1, false, 1L); + containers.put(c1.getContainerId(), c1); + Container c2 = createContainer(2, false, 2L); + containers.put(c2.getContainerId(), c2); + + ContainerExecutor ex = createContainerExecutor(containers); + Context context = mock(Context.class); + when(context.getContainers()).thenReturn(containers); + when(context.getContainerExecutor()).thenReturn(ex); + + CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class); + when(cGroupsHandler.getCGroupParam( + CGroupsHandler.CGroupController.MEMORY, + "", + CGROUP_PARAM_MEMORY_OOM_CONTROL)) + .thenReturn("under_oom 1").thenReturn("under_oom 0"); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1234").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1235").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(9)); + + DefaultOOMHandler handler = + new DefaultOOMHandler(context, false) { + @Override + protected CGroupsHandler getCGroupsHandler() { + return cGroupsHandler; + } + }; + handler.run(); + + verify(ex, times(1)).signalContainer( + new ContainerSignalContext.Builder() + .setPid("1235") + .setContainer(c2) + .setSignal(ContainerExecutor.Signal.KILL) + .build() + ); + verify(ex, times(1)).signalContainer(any()); + } + + /** + * We have two OPPORTUNISTIC containers and one GUARANTEED container. + * One of the OPPORTUNISTIC container is out of limit. + * OOM is resolved after killing the OPPORTUNISTIC container that + * exceeded its limit even though it is launched earlier than the + * other OPPORTUNISTIC container. + */ + @Test + public void testKillOneOverLimitOpportunisticContainerUponOOM() + throws Exception { + ConcurrentHashMap containers = + new ConcurrentHashMap<>(); + int currentContainerId = 0; + Container c1 = createContainer(currentContainerId++, false, 2); + containers.put(c1.getContainerId(), c1); + Container c2 = createContainer(currentContainerId++, false, 1); + containers.put(c2.getContainerId(), c2); + Container c3 = createContainer(currentContainerId++, true, 1); + containers.put(c3.getContainerId(), c3); + + ContainerExecutor ex = createContainerExecutor(containers); + Context context = mock(Context.class); + when(context.getContainers()).thenReturn(containers); + when(context.getContainerExecutor()).thenReturn(ex); + + CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class); + when(cGroupsHandler.getCGroupParam( + CGroupsHandler.CGroupController.MEMORY, + "", + CGROUP_PARAM_MEMORY_OOM_CONTROL)) + .thenReturn("under_oom 1") + .thenReturn("under_oom 0"); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1234").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(9)); + + // container c2 is out of its limit + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1235").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(11)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(11)); + + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c3.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1236").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c3.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c3.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(9)); + + DefaultOOMHandler handler = + new DefaultOOMHandler(context, false) { + @Override + protected CGroupsHandler getCGroupsHandler() { + return cGroupsHandler; + } + }; + handler.run(); + + verify(ex, times(1)).signalContainer( + new ContainerSignalContext.Builder() + .setPid("1235") + .setContainer(c2) + .setSignal(ContainerExecutor.Signal.KILL) + .build() + ); + verify(ex, times(1)).signalContainer(any()); + } + /** + * We have two OPPORTUNISTIC containers and one GUARANTEED container. + * None of the containers exceeded its memory limit. + * OOM is resolved after killing the most recently launched OPPORTUNISTIC + * container. + */ + @Test + public void testKillOneLaterOpportunisticContainerUponOOM() throws Exception { + ConcurrentHashMap containers = + new ConcurrentHashMap<>(); + int currentContainerId = 0; + Container c1 = createContainer(currentContainerId++, false, 1); + containers.put(c1.getContainerId(), c1); + Container c2 = createContainer(currentContainerId++, false, 2); + containers.put(c2.getContainerId(), c2); + Container c3 = createContainer(currentContainerId++, true, 1); + containers.put(c3.getContainerId(), c3); + + ContainerExecutor ex = createContainerExecutor(containers); + Context context = mock(Context.class); + when(context.getContainers()).thenReturn(containers); + when(context.getContainerExecutor()).thenReturn(ex); + + CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class); + when(cGroupsHandler.getCGroupParam( + CGroupsHandler.CGroupController.MEMORY, + "", + CGROUP_PARAM_MEMORY_OOM_CONTROL)) + .thenReturn("under_oom 1") + .thenReturn("under_oom 0"); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1234").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1235").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c3.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1236").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c3.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c3.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(9)); + + DefaultOOMHandler handler = + new DefaultOOMHandler(context, false) { + @Override + protected CGroupsHandler getCGroupsHandler() { + return cGroupsHandler; + } + }; + handler.run(); + + verify(ex, times(1)).signalContainer( + new ContainerSignalContext.Builder() + .setPid("1235") + .setContainer(c2) + .setSignal(ContainerExecutor.Signal.KILL) + .build() + ); + verify(ex, times(1)).signalContainer(any()); + } + + /** + * We have two OPPORTUNISTIC containers and one GUARANTEED container. + * One of the OPPORTUNISTIC container is out of limit. + * OOM is resolved after killing both OPPORTUNISTIC containers. + */ + @Test + public void testKillBothOpportunisticContainerUponOOM() throws Exception { + int currentContainerId = 0; + + ConcurrentHashMap containers = + new ConcurrentHashMap<>(); + Container c1 = createContainer(currentContainerId++, false, 2); + containers.put(c1.getContainerId(), c1); + Container c2 = createContainer(currentContainerId++, false, 1); + containers.put(c2.getContainerId(), c2); + Container c3 = createContainer(currentContainerId++, true, 1); + containers.put(c3.getContainerId(), c3); + + ContainerExecutor ex = createContainerExecutor(containers); + Context context = mock(Context.class); + when(context.getContainers()).thenReturn(containers); + when(context.getContainerExecutor()).thenReturn(ex); + + CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class); + when(cGroupsHandler.getCGroupParam( + CGroupsHandler.CGroupController.MEMORY, + "", + CGROUP_PARAM_MEMORY_OOM_CONTROL)) + .thenReturn("under_oom 1") + .thenReturn("under_oom 1") + .thenReturn("under_oom 0"); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1234").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1235").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(11)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(11)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c3.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1236").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c3.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c3.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(9)); + + + DefaultOOMHandler handler = + new DefaultOOMHandler(context, false) { + @Override + protected CGroupsHandler getCGroupsHandler() { + return cGroupsHandler; + } + }; + handler.run(); + + verify(ex, times(1)).signalContainer( + new ContainerSignalContext.Builder() + .setPid("1235") + .setContainer(c1) + .setSignal(ContainerExecutor.Signal.KILL) + .build() + ); + verify(ex, times(1)).signalContainer( + new ContainerSignalContext.Builder() + .setPid("1234") + .setContainer(c2) + .setSignal(ContainerExecutor.Signal.KILL) + .build() + ); + verify(ex, times(2)).signalContainer(any()); + } + + /** + * We have two OPPORTUNISTIC containers and one GUARANTEED container. + * the GUARANTEED container is out of limit. OOM is resolved + * after first killing the two OPPORTUNISTIC containers and then the + * GUARANTEED container. + */ + @Test + public void testKillGuaranteedContainerUponOOM() throws Exception { + int currentContainerId = 0; + + ConcurrentHashMap containers = + new ConcurrentHashMap<>(); + Container c1 = createContainer(currentContainerId++, false, 2); + containers.put(c1.getContainerId(), c1); + Container c2 = createContainer(currentContainerId++, false, 1); + containers.put(c2.getContainerId(), c2); + Container c3 = createContainer(currentContainerId++, true, 1); + containers.put(c3.getContainerId(), c3); + + ContainerExecutor ex = createContainerExecutor(containers); + Context context = mock(Context.class); + when(context.getContainers()).thenReturn(containers); + when(context.getContainerExecutor()).thenReturn(ex); + + CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class); + when(cGroupsHandler.getCGroupParam( + CGroupsHandler.CGroupController.MEMORY, + "", + CGROUP_PARAM_MEMORY_OOM_CONTROL)) + .thenReturn("under_oom 1") + .thenReturn("under_oom 1") + .thenReturn("under_oom 1") + .thenReturn("under_oom 0"); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1234").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1235").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c3.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1236").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c3.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(11)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c3.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(11)); + + DefaultOOMHandler handler = + new DefaultOOMHandler(context, false) { + @Override + protected CGroupsHandler getCGroupsHandler() { + return cGroupsHandler; + } + }; + handler.run(); + + verify(ex, times(1)).signalContainer( + new ContainerSignalContext.Builder() + .setPid("1234") + .setContainer(c1) + .setSignal(ContainerExecutor.Signal.KILL) + .build() + ); + verify(ex, times(1)).signalContainer( + new ContainerSignalContext.Builder() + .setPid("1235") + .setContainer(c1) + .setSignal(ContainerExecutor.Signal.KILL) + .build() + ); + verify(ex, times(1)).signalContainer( + new ContainerSignalContext.Builder() + .setPid("1236") + .setContainer(c1) + .setSignal(ContainerExecutor.Signal.KILL) + .build() + ); + verify(ex, times(3)).signalContainer(any()); + } + + /** + * We have two OPPORTUNISTIC containers and one GUARANTEED container. + * None of the containers exceeded its memory limit. + * OOM is resolved after killing all running containers. + */ + @Test + public void testKillAllContainersUponOOM() throws Exception { + int currentContainerId = 0; + + ConcurrentHashMap containers = + new ConcurrentHashMap<>(); + Container c1 = createContainer(currentContainerId++, false, 1); + containers.put(c1.getContainerId(), c1); + Container c2 = createContainer(currentContainerId++, false, 2); + containers.put(c2.getContainerId(), c2); + Container c3 = createContainer(currentContainerId++, true, 1); + containers.put(c3.getContainerId(), c3); + + ContainerExecutor ex = createContainerExecutor(containers); + Context context = mock(Context.class); + when(context.getContainers()).thenReturn(containers); + when(context.getContainerExecutor()).thenReturn(ex); + + CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class); + when(cGroupsHandler.getCGroupParam( + CGroupsHandler.CGroupController.MEMORY, + "", + CGROUP_PARAM_MEMORY_OOM_CONTROL)) + .thenReturn("under_oom 1") + .thenReturn("under_oom 1") + .thenReturn("under_oom 1") + .thenReturn("under_oom 0"); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1234").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1235").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c3.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1236").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c3.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c3.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(9)); + + DefaultOOMHandler handler = + new DefaultOOMHandler(context, false) { + @Override + protected CGroupsHandler getCGroupsHandler() { + return cGroupsHandler; + } + }; + handler.run(); + + verify(ex, times(1)).signalContainer( + new ContainerSignalContext.Builder() + .setPid("1234") + .setContainer(c2) + .setSignal(ContainerExecutor.Signal.KILL) + .build() + ); + verify(ex, times(1)).signalContainer( + new ContainerSignalContext.Builder() + .setPid("1235") + .setContainer(c1) + .setSignal(ContainerExecutor.Signal.KILL) + .build() + ); + verify(ex, times(1)).signalContainer( + new ContainerSignalContext.Builder() + .setPid("1236") + .setContainer(c3) + .setSignal(ContainerExecutor.Signal.KILL) + .build() + ); + verify(ex, times(3)).signalContainer(any()); + } + + /** + * We have two OPPORTUNISTIC containers and one GUARANTEED container. + * None of the containers exceeded its memory limit. + * OOM is not resolved even after killing all running containers. + * A YarnRuntimeException is excepted to be thrown. + */ + @Test(expected = YarnRuntimeException.class) + public void testOOMUnresolvedAfterKillingAllContainers() throws Exception { + int currentContainerId = 0; + + ConcurrentHashMap containers = + new ConcurrentHashMap<>(); + Container c1 = createContainer(currentContainerId++, false, 1); + containers.put(c1.getContainerId(), c1); + Container c2 = createContainer(currentContainerId++, false, 2); + containers.put(c2.getContainerId(), c2); + Container c3 = createContainer(currentContainerId++, true, 3); + containers.put(c3.getContainerId(), c3); + + ContainerExecutor ex = createContainerExecutor(containers); + Context context = mock(Context.class); + when(context.getContainers()).thenReturn(containers); + when(context.getContainerExecutor()).thenReturn(ex); + + CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class); + when(cGroupsHandler.getCGroupParam( + CGroupsHandler.CGroupController.MEMORY, + "", + CGROUP_PARAM_MEMORY_OOM_CONTROL)) + .thenReturn("under_oom 1") + .thenReturn("under_oom 1") + .thenReturn("under_oom 1") + .thenReturn("under_oom 1"); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1234").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1235").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c3.getContainerId().toString(), CGROUP_FILE_TASKS)) + .thenReturn("1236").thenReturn(""); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c3.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) + .thenReturn(getMB(9)); + when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, + c3.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) + .thenReturn(getMB(9)); + + DefaultOOMHandler handler = + new DefaultOOMHandler(context, false) { + @Override + protected CGroupsHandler getCGroupsHandler() { + return cGroupsHandler; + } + }; handler.run(); } - private class AppId extends ApplicationIdPBImpl { - AppId(long clusterTs, int appId) { - this.setClusterTimestamp(clusterTs); - this.setId(appId); - } - } - - private ContainerId createContainerId(int id) { - ApplicationId applicationId = new AppId(1, 1); + private static ContainerId createContainerId(int id) { + ApplicationId applicationId = ApplicationId.newInstance(1, 1); ApplicationAttemptId applicationAttemptId = mock(ApplicationAttemptId.class); @@ -295,13 +973,41 @@ public class TestDefaultOOMHandler { return containerId; } - ContainerTokenIdentifier getToken() { - ContainerTokenIdentifier id = mock(ContainerTokenIdentifier.class); - when(id.getVersion()).thenReturn(1); - return id; + private static Container createContainer(int containerId, + boolean guaranteed, long launchTime) { + Container c1 = mock(Container.class); + ContainerId cid1 = createContainerId(containerId); + when(c1.getContainerId()).thenReturn(cid1); + + ContainerTokenIdentifier token = mock(ContainerTokenIdentifier.class); + ExecutionType type = + guaranteed ? ExecutionType.GUARANTEED : ExecutionType.OPPORTUNISTIC; + when(token.getExecutionType()).thenReturn(type); + when(c1.getContainerTokenIdentifier()).thenReturn(token); + + when(c1.getResource()).thenReturn(Resource.newInstance(10, 1)); + when(c1.getContainerLaunchTime()).thenReturn(launchTime); + + return c1; } String getMB(long mb) { return Long.toString(mb * 1024 * 1024); } + + private static ContainerExecutor createContainerExecutor( + ConcurrentHashMap containers) + throws IOException { + ContainerExecutor ex = mock(ContainerExecutor.class); + when(ex.signalContainer(any())).thenAnswer( + invocation -> { + Object[] arguments = invocation.getArguments(); + Container container = ((ContainerSignalContext) + arguments[0]).getContainer(); + // remove container from NM context immediately + containers.remove(container.getContainerId()); + return true; + }); + return ex; + } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java index 77ebd347aab..325709b07ae 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java @@ -241,6 +241,11 @@ public class MockContainer implements Container { return 0; } + @Override + public long getContainerLaunchTime() { + return 0; + } + @Override public ResourceMappings getResourceMappings() { return null; From 351cf87c92872d90f62c476f85ae4d02e485769c Mon Sep 17 00:00:00 2001 From: Robert Kanter Date: Thu, 7 Jun 2018 17:09:34 -0700 Subject: [PATCH 070/113] Disable mounting cgroups by default (miklos.szegedi@cloudera.com via rkanter) --- .../impl/container-executor.c | 54 +++++++++++++------ .../impl/container-executor.h | 4 ++ .../native/container-executor/impl/main.c | 19 ++++--- .../src/site/markdown/NodeManagerCgroups.md | 2 +- 4 files changed, 55 insertions(+), 24 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c index 1b8842a01c9..baf0e8b4703 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c @@ -73,6 +73,7 @@ static const char* DEFAULT_BANNED_USERS[] = {"yarn", "mapred", "hdfs", "bin", 0} static const int DEFAULT_DOCKER_SUPPORT_ENABLED = 0; static const int DEFAULT_TC_SUPPORT_ENABLED = 0; +static const int DEFAULT_MOUNT_CGROUP_SUPPORT_ENABLED = 0; static const char* PROC_PATH = "/proc"; @@ -482,6 +483,12 @@ int is_tc_support_enabled() { DEFAULT_TC_SUPPORT_ENABLED, &executor_cfg); } +int is_mount_cgroups_support_enabled() { + return is_feature_enabled(MOUNT_CGROUP_SUPPORT_ENABLED_KEY, + DEFAULT_MOUNT_CGROUP_SUPPORT_ENABLED, + &executor_cfg); +} + /** * Utility function to concatenate argB to argA using the concat_pattern. */ @@ -2346,20 +2353,25 @@ void chown_dir_contents(const char *dir_path, uid_t uid, gid_t gid) { DIR *dp; struct dirent *ep; - char *path_tmp = malloc(strlen(dir_path) + NAME_MAX + 2); + size_t len = strlen(dir_path) + NAME_MAX + 2; + char *path_tmp = malloc(len); if (path_tmp == NULL) { return; } - char *buf = stpncpy(path_tmp, dir_path, strlen(dir_path)); - *buf++ = '/'; - dp = opendir(dir_path); if (dp != NULL) { while ((ep = readdir(dp)) != NULL) { - stpncpy(buf, ep->d_name, strlen(ep->d_name)); - buf[strlen(ep->d_name)] = '\0'; - change_owner(path_tmp, uid, gid); + if (strcmp(ep->d_name, ".") != 0 && + strcmp(ep->d_name, "..") != 0 && + strstr(ep->d_name, "..") == NULL) { + int result = snprintf(path_tmp, len, "%s/%s", dir_path, ep->d_name); + if (result > 0 && result < len) { + change_owner(path_tmp, uid, gid); + } else { + fprintf(LOGFILE, "Ignored %s/%s due to length", dir_path, ep->d_name); + } + } } closedir(dp); } @@ -2383,11 +2395,16 @@ int mount_cgroup(const char *pair, const char *hierarchy) { char *mount_path = malloc(len); char hier_path[EXECUTOR_PATH_MAX]; int result = 0; - struct stat sb; if (controller == NULL || mount_path == NULL) { fprintf(LOGFILE, "Failed to mount cgroup controller; not enough memory\n"); result = OUT_OF_MEMORY; + goto cleanup; + } + if (hierarchy == NULL || strstr(hierarchy, "..") != NULL) { + fprintf(LOGFILE, "Unsupported cgroup hierarhy path detected.\n"); + result = INVALID_COMMAND_PROVIDED; + goto cleanup; } if (get_kv_key(pair, controller, len) < 0 || get_kv_value(pair, mount_path, len) < 0) { @@ -2395,13 +2412,10 @@ int mount_cgroup(const char *pair, const char *hierarchy) { pair); result = -1; } else { - if (stat(mount_path, &sb) != 0) { - // Create mount point, if it does not exist - const mode_t mount_perms = S_IRWXU | S_IRGRP | S_IXGRP; - if (mkdirs(mount_path, mount_perms) == 0) { - fprintf(LOGFILE, "Failed to create cgroup mount point %s at %s\n", - controller, mount_path); - } + if (strstr(mount_path, "..") != NULL) { + fprintf(LOGFILE, "Unsupported cgroup mount path detected.\n"); + result = INVALID_COMMAND_PROVIDED; + goto cleanup; } if (mount("none", mount_path, "cgroup", 0, controller) == 0) { char *buf = stpncpy(hier_path, mount_path, strlen(mount_path)); @@ -2410,13 +2424,20 @@ int mount_cgroup(const char *pair, const char *hierarchy) { // create hierarchy as 0750 and chown to Hadoop NM user const mode_t perms = S_IRWXU | S_IRGRP | S_IXGRP; + struct stat sb; + if (stat(hier_path, &sb) == 0 && + (sb.st_uid != nm_uid || sb.st_gid != nm_gid)) { + fprintf(LOGFILE, "cgroup hierarchy %s already owned by another user %d\n", hier_path, sb.st_uid); + result = INVALID_COMMAND_PROVIDED; + goto cleanup; + } if (mkdirs(hier_path, perms) == 0) { change_owner(hier_path, nm_uid, nm_gid); chown_dir_contents(hier_path, nm_uid, nm_gid); } } else { fprintf(LOGFILE, "Failed to mount cgroup controller %s at %s - %s\n", - controller, mount_path, strerror(errno)); + controller, mount_path, strerror(errno)); // if controller is already mounted, don't stop trying to mount others if (errno != EBUSY) { result = -1; @@ -2424,6 +2445,7 @@ int mount_cgroup(const char *pair, const char *hierarchy) { } } +cleanup: free(controller); free(mount_path); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h index 91366064fa6..32e953d89a6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h @@ -64,6 +64,7 @@ enum operations { #define ALLOWED_SYSTEM_USERS_KEY "allowed.system.users" #define DOCKER_SUPPORT_ENABLED_KEY "feature.docker.enabled" #define TC_SUPPORT_ENABLED_KEY "feature.tc.enabled" +#define MOUNT_CGROUP_SUPPORT_ENABLED_KEY "feature.mount-cgroup.enabled" #define TMP_DIR "tmp" extern struct passwd *user_detail; @@ -238,6 +239,9 @@ int is_feature_enabled(const char* feature_key, int default_value, /** Check if tc (traffic control) support is enabled in configuration. */ int is_tc_support_enabled(); +/** Check if cgroup mount support is enabled in configuration. */ +int is_mount_cgroups_support_enabled(); + /** * Run a batch of tc commands that modify interface configuration */ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c index c54fd3ea900..2099ace7a41 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c @@ -262,14 +262,19 @@ static int validate_arguments(int argc, char **argv , int *operation) { } if (strcmp("--mount-cgroups", argv[1]) == 0) { - if (argc < 4) { - display_usage(stdout); - return INVALID_ARGUMENT_NUMBER; + if (is_mount_cgroups_support_enabled()) { + if (argc < 4) { + display_usage(stdout); + return INVALID_ARGUMENT_NUMBER; + } + optind++; + cmd_input.cgroups_hierarchy = argv[optind++]; + *operation = MOUNT_CGROUPS; + return 0; + } else { + display_feature_disabled_message("mount cgroup"); + return FEATURE_DISABLED; } - optind++; - cmd_input.cgroups_hierarchy = argv[optind++]; - *operation = MOUNT_CGROUPS; - return 0; } if (strcmp("--tc-modify-state", argv[1]) == 0) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManagerCgroups.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManagerCgroups.md index d36280113cf..4a83dcea79b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManagerCgroups.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManagerCgroups.md @@ -50,7 +50,7 @@ YARN uses CGroups through a directory structure mounted into the file system by | Option | Description | |:---- |:---- | | Discover CGroups mounted already | This should be used on newer systems like RHEL7 or Ubuntu16 or if the administrator mounts CGroups before YARN starts. Set `yarn.nodemanager.linux-container-executor.cgroups.mount` to false and leave other settings set to their defaults. YARN will locate the mount points in `/proc/mounts`. Common locations include `/sys/fs/cgroup` and `/cgroup`. The default location can vary depending on the Linux distribution in use.| -| CGroups mounted by YARN | If the system does not have CGroups mounted or it is mounted to an inaccessible location then point `yarn.nodemanager.linux-container-executor.cgroups.mount-path` to an empty directory. Set `yarn.nodemanager.linux-container-executor.cgroups.mount` to true. A point to note here is that the container-executor binary will try to create and mount each subsystem as a subdirectory under this path. If `cpu` is already mounted somewhere with `cpuacct`, then the directory `cpu,cpuacct` will be created for the hierarchy.| +| CGroups mounted by YARN | IMPORTANT: This option is deprecated due to security reasons with the `container-executor.cfg` option `feature.mount-cgroup.enabled=0` by default. Please mount cgroups before launching YARN.| | CGroups mounted already or linked but not in `/proc/mounts` | If cgroups is accessible through lxcfs or simulated by another filesystem, then point `yarn.nodemanager.linux-container-executor.cgroups.mount-path` to your CGroups root directory. Set `yarn.nodemanager.linux-container-executor.cgroups.mount` to false. YARN tries to use this path first, before any CGroup mount point discovery. The path should have a subdirectory for each CGroup hierarchy named by the comma separated CGroup subsystems supported like `/cpu,cpuacct`. Valid subsystem names are `cpu, cpuacct, cpuset, memory, net_cls, blkio, freezer, devices`.| CGroups and security From 3b88fe25baf130cd7a77590f9ded5b0bf028ef75 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Thu, 7 Jun 2018 17:09:31 -0700 Subject: [PATCH 071/113] YARN-8359. Exclude containermanager.linux test classes on Windows. Contributed by Jason Lowe. --- .../hadoop-yarn-server-nodemanager/pom.xml | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml index 514682009a2..26a5220ca74 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml @@ -294,6 +294,27 @@ + + native-win + + + Windows + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + + org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.** + + + + + + From c42dcc7c47340d517563890269c6c112996e8897 Mon Sep 17 00:00:00 2001 From: Jitendra Pandey Date: Thu, 7 Jun 2018 23:00:26 -0700 Subject: [PATCH 072/113] HADOOP-15482. Upgrade jackson-databind to version 2.9.5. Contributed by Lokesh Jain. --- hadoop-project/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 8edfd76eb05..8cb5bfc48b4 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -69,7 +69,7 @@ 1.9.13 - 2.9.4 + 2.9.5 1.7.25 From a1272448bfa2f1a159d948b8635558e053b7be78 Mon Sep 17 00:00:00 2001 From: Anu Engineer Date: Fri, 8 Jun 2018 10:27:01 -0700 Subject: [PATCH 073/113] HDDS-157. Upgrade common-langs version to 3.7 in HDDS and Ozone. Contributed by Takanobu Asanuma. --- .../org/apache/hadoop/hdds/scm/XceiverClientHandler.java | 2 +- .../main/java/org/apache/hadoop/hdds/client/BlockID.java | 2 +- .../common/states/endpoint/RegisterEndpointTask.java | 2 +- .../apache/hadoop/ozone/client/rest/response/KeyInfo.java | 4 ++-- .../org/apache/hadoop/ozone/web/response/KeyInfo.java | 4 ++-- .../hadoop/ozone/TestStorageContainerManagerHelper.java | 2 +- .../hadoop/ozone/client/rpc/TestOzoneRpcClient.java | 2 +- .../hadoop/ozone/ksm/TestContainerReportWithKeys.java | 2 +- .../org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java | 2 +- .../apache/hadoop/ozone/ksm/TestKsmBlockVersioning.java | 2 +- .../hadoop/ozone/ksm/TestMultipleContainerReadWrite.java | 2 +- .../org/apache/hadoop/ozone/ozShell/TestOzoneShell.java | 2 +- .../apache/hadoop/ozone/scm/TestAllocateContainer.java | 2 +- .../apache/hadoop/ozone/scm/TestXceiverClientManager.java | 2 +- .../hadoop/ozone/web/TestOzoneRestWithMiniCluster.java | 2 +- .../java/org/apache/hadoop/ozone/web/client/TestKeys.java | 8 ++++---- .../org/apache/hadoop/ozone/web/client/TestKeysRatis.java | 2 +- .../org/apache/hadoop/ozone/web/client/TestVolume.java | 2 +- .../org/apache/hadoop/ozone/ksm/TestChunkStreams.java | 2 +- .../main/java/org/apache/hadoop/ozone/freon/Freon.java | 6 +++--- .../hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java | 2 +- .../hadoop/ozone/genesis/BenchMarkMetadataStoreReads.java | 2 +- .../ozone/genesis/BenchMarkMetadataStoreWrites.java | 2 +- .../hadoop/ozone/genesis/BenchMarkRocksDbStore.java | 2 +- .../java/org/apache/hadoop/ozone/genesis/GenesisUtil.java | 2 +- 25 files changed, 32 insertions(+), 32 deletions(-) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientHandler.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientHandler.java index 6a2286c523f..7c568f6b879 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientHandler.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientHandler.java @@ -21,7 +21,7 @@ import com.google.common.base.Preconditions; import org.apache.ratis.shaded.io.netty.channel.Channel; import org.apache.ratis.shaded.io.netty.channel.ChannelHandlerContext; import org.apache.ratis.shaded.io.netty.channel.SimpleChannelInboundHandler; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java index 7bf8f01ee34..62b12e3e041 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java @@ -16,7 +16,7 @@ */ package org.apache.hadoop.hdds.client; -import org.apache.commons.lang.builder.ToStringBuilder; +import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java index 12b48abb10f..b3d2b620091 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.container.common.states.endpoint; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.ozone.container.common.statemachine diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyInfo.java index 2e1df5fc844..61c2abb2a82 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyInfo.java @@ -22,8 +22,8 @@ import java.io.IOException; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; /** * KeyInfo class is used used for parsing json response diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java index 34885f6e2be..ba47bee7679 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java @@ -19,8 +19,8 @@ package org.apache.hadoop.ozone.web.response; import java.io.IOException; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.ozone.web.utils.JsonUtils; import com.fasterxml.jackson.annotation.JsonAutoDetect; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java index e5873a5b4e3..99e69ecd266 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java @@ -20,7 +20,7 @@ import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import com.google.common.primitives.Longs; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java index e3823b34ff1..cafe5db0d07 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.client.rpc; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; import org.apache.hadoop.ozone.MiniOzoneCluster; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java index 18482d1866c..bafba32008f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java @@ -16,7 +16,7 @@ */ package org.apache.hadoop.ozone.ksm; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java index 9c0a48808c2..f07a97de1ca 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.ksm; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKsmBlockVersioning.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKsmBlockVersioning.java index addd87b185a..15c3fd3b46d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKsmBlockVersioning.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKsmBlockVersioning.java @@ -16,7 +16,7 @@ */ package org.apache.hadoop.ozone.ksm; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.DFSUtil; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestMultipleContainerReadWrite.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestMultipleContainerReadWrite.java index 765ec3f8134..1cb6e82c14a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestMultipleContainerReadWrite.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestMultipleContainerReadWrite.java @@ -16,7 +16,7 @@ */ package org.apache.hadoop.ozone.ksm; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; import org.apache.hadoop.metrics2.MetricsRecordBuilder; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java index 8ffe67da8db..0edfd4198bf 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java @@ -38,7 +38,7 @@ import java.util.Random; import java.util.UUID; import java.util.stream.Collectors; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java index ef6fd5f3c17..b1e9d26f105 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.ozone.scm; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ozone.MiniOzoneCluster; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java index 77e4e1bcfd7..478cf690c7b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.scm; import com.google.common.cache.Cache; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; import org.apache.hadoop.io.IOUtils; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java index a9b8175f95d..0f49ade54cc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java @@ -24,7 +24,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.CHUNK_SIZE; import static org.junit.Assert.*; import org.apache.commons.io.IOUtils; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.client.OzoneQuota; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java index f8df7dceb36..28a138ec527 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java @@ -23,8 +23,8 @@ import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.collections.IteratorUtils; import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; -import org.apache.commons.lang.RandomStringUtils; -import org.apache.commons.lang.math.RandomUtils; +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.commons.lang3.RandomUtils; import org.apache.commons.lang3.tuple.ImmutablePair; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.ReplicationFactor; @@ -172,10 +172,10 @@ public class TestKeys { * @return Key composed of multiple parts delimited by "/" */ static String getMultiPartKey(String delimiter) { - int numParts = RandomUtils.nextInt(5) + 1; + int numParts = RandomUtils.nextInt(0, 5) + 1; String[] nameParts = new String[numParts]; for (int i = 0; i < numParts; i++) { - int stringLength = numParts == 1 ? 5 : RandomUtils.nextInt(5); + int stringLength = numParts == 1 ? 5 : RandomUtils.nextInt(0, 5); nameParts[i] = RandomStringUtils.randomAlphanumeric(stringLength); } return StringUtils.join(delimiter, nameParts); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeysRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeysRatis.java index 645b86610b1..6a9202272b7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeysRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeysRatis.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.ozone.web.client; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.RatisTestHelper; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java index a5104302526..14fc5e3661c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.web.client; import org.apache.commons.io.FileUtils; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.client.OzoneQuota; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.hdds.conf.OzoneConfiguration; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestChunkStreams.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestChunkStreams.java index de4a85ac46b..e6158bddd0c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestChunkStreams.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestChunkStreams.java @@ -16,7 +16,7 @@ */ package org.apache.hadoop.ozone.ksm; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.ozone.client.io.ChunkGroupInputStream; import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream; import org.apache.hadoop.hdds.scm.storage.ChunkInputStream; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java index d933e6f2e70..ab52b86c23a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java @@ -29,9 +29,9 @@ import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.Option; import org.apache.commons.cli.OptionBuilder; import org.apache.commons.cli.Options; -import org.apache.commons.lang.ArrayUtils; -import org.apache.commons.lang.RandomStringUtils; -import org.apache.commons.lang.time.DurationFormatUtils; +import org.apache.commons.lang3.ArrayUtils; +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.commons.lang3.time.DurationFormatUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.hdds.client.OzoneQuota; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java index a5d268d5089..13b04c31ee5 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java @@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.genesis; import org.apache.ratis.shaded.com.google.protobuf.ByteString; import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.io.FileUtils; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.client.BlockID; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkMetadataStoreReads.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkMetadataStoreReads.java index fc3dbcb2305..05e7920dbc4 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkMetadataStoreReads.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkMetadataStoreReads.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.ozone.genesis; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.utils.MetadataStore; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.Param; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkMetadataStoreWrites.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkMetadataStoreWrites.java index f496a7d514f..4321287e185 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkMetadataStoreWrites.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkMetadataStoreWrites.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.ozone.genesis; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.utils.MetadataStore; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.Param; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkRocksDbStore.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkRocksDbStore.java index c4c6f9eed6f..5f4e035cd3e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkRocksDbStore.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkRocksDbStore.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.genesis; import org.apache.commons.io.FileUtils; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.utils.MetadataStore; import org.apache.hadoop.utils.RocksDBStore; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java index 7f864ae6bf4..f4999254d50 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.ozone.genesis; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; From cf4108313da83e28d07676078a33016ec8856ff6 Mon Sep 17 00:00:00 2001 From: Xiao Chen Date: Fri, 8 Jun 2018 15:13:38 -0700 Subject: [PATCH 074/113] HDFS-13642. Creating a file with block size smaller than EC policy's cell size should fail. --- .../server/namenode/FSDirErasureCodingOp.java | 23 ++++++++++++++---- .../server/namenode/FSDirWriteFileOp.java | 10 ++------ .../hdfs/server/namenode/FSNamesystem.java | 21 ++++++++++++---- .../org/apache/hadoop/hdfs/DFSTestUtil.java | 5 ++-- .../hdfs/TestDFSStripedOutputStream.java | 16 ++++++++++++ .../hdfs/TestErasureCodingExerciseAPIs.java | 2 +- .../hdfs/TestErasureCodingPolicies.java | 2 +- .../src/test/resources/editsStored | Bin 7909 -> 7909 bytes .../src/test/resources/editsStored.xml | 2 +- 9 files changed, 58 insertions(+), 23 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java index 3a32db45875..7160b861f77 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; +import org.apache.commons.lang.StringUtils; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.XAttr; @@ -344,16 +345,28 @@ final class FSDirErasureCodingOp { } /** - * Check if the file or directory has an erasure coding policy. + * Get the erasure coding policy information for specified path and policy + * name. If ec policy name is given, it will be parsed and the corresponding + * policy will be returned. Otherwise, get the policy from the parents of the + * iip. * * @param fsn namespace + * @param ecPolicyName the ec policy name * @param iip inodes in the path containing the file - * @return Whether the file or directory has an erasure coding policy. + * @return {@link ErasureCodingPolicy}, or null if no policy is found * @throws IOException */ - static boolean hasErasureCodingPolicy(final FSNamesystem fsn, - final INodesInPath iip) throws IOException { - return unprotectedGetErasureCodingPolicy(fsn, iip) != null; + static ErasureCodingPolicy getErasureCodingPolicy(FSNamesystem fsn, + String ecPolicyName, INodesInPath iip) throws IOException { + ErasureCodingPolicy ecPolicy; + if (!StringUtils.isEmpty(ecPolicyName)) { + ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicyByName( + fsn, ecPolicyName); + } else { + ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy( + fsn, iip); + } + return ecPolicy; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java index 8f34e1c6217..03c349c3dee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdfs.server.namenode; import com.google.common.base.Preconditions; -import org.apache.commons.lang.StringUtils; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.hdfs.AddBlockFlag; @@ -543,13 +542,8 @@ class FSDirWriteFileOp { boolean isStriped = false; ErasureCodingPolicy ecPolicy = null; if (!shouldReplicate) { - if (!StringUtils.isEmpty(ecPolicyName)) { - ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicyByName( - fsd.getFSNamesystem(), ecPolicyName); - } else { - ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy( - fsd.getFSNamesystem(), existing); - } + ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicy( + fsd.getFSNamesystem(), ecPolicyName, existing); if (ecPolicy != null && (!ecPolicy.isReplicationPolicy())) { isStriped = true; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 19ff08d4165..a8c1926051a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2403,11 +2403,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, iip = FSDirWriteFileOp.resolvePathForStartFile( dir, pc, src, flag, createParent); - if (shouldReplicate || - (org.apache.commons.lang.StringUtils.isEmpty(ecPolicyName) && - !FSDirErasureCodingOp.hasErasureCodingPolicy(this, iip))) { - blockManager.verifyReplication(src, replication, clientMachine); - } if (blockSize < minBlockSize) { throw new IOException("Specified block size is less than configured" + @@ -2415,6 +2410,22 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, + "): " + blockSize + " < " + minBlockSize); } + if (shouldReplicate) { + blockManager.verifyReplication(src, replication, clientMachine); + } else { + final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp + .getErasureCodingPolicy(this, ecPolicyName, iip); + if (ecPolicy != null && (!ecPolicy.isReplicationPolicy())) { + if (blockSize < ecPolicy.getCellSize()) { + throw new IOException("Specified block size (" + blockSize + + ") is less than the cell size (" + ecPolicy.getCellSize() + +") of the erasure coding policy (" + ecPolicy + ")."); + } + } else { + blockManager.verifyReplication(src, replication, clientMachine); + } + } + FileEncryptionInfo feInfo = null; if (!iip.isRaw() && provider != null) { EncryptionKeyInfo ezInfo = FSDirEncryptionZoneOp.getEncryptionKeyInfo( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 7ddd24e4bfe..63199f31dd1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -1559,8 +1559,9 @@ public class DFSTestUtil { out.write("replicated".getBytes()); } - try (FSDataOutputStream out = filesystem.createFile( - new Path(ecDir, "RS-3-2")).ecPolicyName(ecPolicyRS32.getName()).build()) { + try (FSDataOutputStream out = filesystem + .createFile(new Path(ecDir, "RS-3-2")) + .ecPolicyName(ecPolicyRS32.getName()).blockSize(1024 * 1024).build()) { out.write("RS-3-2".getBytes()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java index 3714542411d..4b9e8763880 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.fail; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -221,4 +222,19 @@ public class TestDFSStripedOutputStream { StripedFileTestUtil.checkData(fs, testPath, writeBytes, new ArrayList(), null, blockSize * dataBlocks); } + + @Test + public void testFileBlockSizeSmallerThanCellSize() throws Exception { + final Path path = new Path("testFileBlockSizeSmallerThanCellSize"); + final byte[] bytes = StripedFileTestUtil.generateBytes(cellSize * 2); + try { + DFSTestUtil.writeFile(fs, path, bytes, cellSize / 2); + fail("Creating a file with block size smaller than " + + "ec policy's cell size should fail"); + } catch (IOException expected) { + LOG.info("Caught expected exception", expected); + GenericTestUtils + .assertExceptionContains("less than the cell size", expected); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java index c63ba347f73..de59a1d71e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java @@ -71,7 +71,7 @@ public class TestErasureCodingExerciseAPIs { private DistributedFileSystem fs; private HdfsAdmin dfsAdmin; private FileSystemTestWrapper fsWrapper; - private static final int BLOCK_SIZE = 1 << 14; // 16k + private static final int BLOCK_SIZE = 1 << 20; // 1MB private ErasureCodingPolicy ecPolicy; private static ErasureCodingPolicy getEcPolicy() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java index 0b7d25932d5..7d97cce0b90 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java @@ -63,7 +63,7 @@ public class TestErasureCodingPolicies { private Configuration conf; private MiniDFSCluster cluster; private DistributedFileSystem fs; - private static final int BLOCK_SIZE = 16 * 1024; + private static final int BLOCK_SIZE = 1024 * 1024; private ErasureCodingPolicy ecPolicy; private FSNamesystem namesystem; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored index a0ae78eea82f1b65dec44c1d2fbc146e3397a06e..0382432dc86a725f50c1340c1692cea0ca1d6481 100644 GIT binary patch delta 28 kcmaEA`_y(rmMoJ1!{k6F-^n{=#aRxo6rQvBimV_b0Ggi)761SM delta 29 lcmaEA`_y(rmh9v_Ikw4OY@U;M%8IjWkl(Op^A%Y^MgX+H3qk+@ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml index 7e1881c74a5..cc72e0dd84d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml @@ -1510,7 +1510,7 @@ 1 1512607204120 1512607204120 - 512 + 1048576 DFSClient_NONMAPREDUCE_-923924783_1 127.0.0.1 true From fba1c42adc1c8ae57951e1865ec2ab05c8707bdf Mon Sep 17 00:00:00 2001 From: Chao Sun Date: Fri, 8 Jun 2018 16:36:42 -0700 Subject: [PATCH 075/113] HDFS-13664. Refactor ConfiguredFailoverProxyProvider to make inheritance easier. Contributed by Chao Sun. --- .../namenode/ha/ConfiguredFailoverProxyProvider.java | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java index e9c8791c5c5..58f49438cc7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java @@ -52,11 +52,11 @@ public class ConfiguredFailoverProxyProvider extends protected final Configuration conf; protected final List> proxies = new ArrayList>(); - private final UserGroupInformation ugi; + protected final UserGroupInformation ugi; protected final Class xface; private int currentProxyIndex = 0; - private final HAProxyFactory factory; + protected final HAProxyFactory factory; public ConfiguredFailoverProxyProvider(Configuration conf, URI uri, Class xface, HAProxyFactory factory) { @@ -122,6 +122,10 @@ public class ConfiguredFailoverProxyProvider extends @Override public synchronized ProxyInfo getProxy() { AddressRpcProxyPair current = proxies.get(currentProxyIndex); + return getProxy(current); + } + + protected ProxyInfo getProxy(AddressRpcProxyPair current) { if (current.namenode == null) { try { current.namenode = factory.createProxy(conf, @@ -147,7 +151,7 @@ public class ConfiguredFailoverProxyProvider extends * A little pair object to store the address and connected RPC proxy object to * an NN. Note that {@link AddressRpcProxyPair#namenode} may be null. */ - private static class AddressRpcProxyPair { + protected static class AddressRpcProxyPair { public final InetSocketAddress address; public T namenode; From 000a67839666bf7cb39d3955757bb05fa95f1b18 Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Fri, 8 Jun 2018 17:57:12 -0700 Subject: [PATCH 076/113] HDFS-12670. can't renew HDFS tokens with only the hdfs client jar. Contributed by Arpit Agarwal. --- .../services/org.apache.hadoop.security.token.TokenIdentifier | 0 .../services/org.apache.hadoop.security.token.TokenRenewer | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier (100%) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer (100%) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer From ef0118b91e384b9a6d96c2ae64480d9acf5aa6fb Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Sat, 9 Jun 2018 15:33:30 +0100 Subject: [PATCH 077/113] HADOOP-15520. Add tests for various org.apache.hadoop.util classes. Contributed by Arash Nabili --- .../util/TestCloseableReferenceCount.java | 91 +++++++++ .../hadoop/util/TestIntrusiveCollection.java | 193 ++++++++++++++++++ .../hadoop/util/TestLimitInputStream.java | 74 +++++++ .../org/apache/hadoop/util/TestShell.java | 8 + .../apache/hadoop/util/TestStringUtils.java | 27 +++ .../hadoop/util/TestUTF8ByteArrayUtils.java | 57 ++++++ 6 files changed, 450 insertions(+) create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCloseableReferenceCount.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestIntrusiveCollection.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLimitInputStream.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestUTF8ByteArrayUtils.java diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCloseableReferenceCount.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCloseableReferenceCount.java new file mode 100644 index 00000000000..31e1899421f --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCloseableReferenceCount.java @@ -0,0 +1,91 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.util; + +import java.nio.channels.ClosedChannelException; + +import org.junit.Test; + +import org.apache.hadoop.test.HadoopTestBase; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public class TestCloseableReferenceCount extends HadoopTestBase { + @Test + public void testReference() throws ClosedChannelException { + CloseableReferenceCount clr = new CloseableReferenceCount(); + clr.reference(); + assertEquals("Incorrect reference count", 1, clr.getReferenceCount()); + } + + @Test + public void testUnreference() throws ClosedChannelException { + CloseableReferenceCount clr = new CloseableReferenceCount(); + clr.reference(); + clr.reference(); + assertFalse("New reference count should not equal STATUS_CLOSED_MASK", + clr.unreference()); + assertEquals("Incorrect reference count", 1, clr.getReferenceCount()); + } + + @Test + public void testUnreferenceCheckClosed() throws ClosedChannelException { + CloseableReferenceCount clr = new CloseableReferenceCount(); + clr.reference(); + clr.reference(); + clr.unreferenceCheckClosed(); + assertEquals("Incorrect reference count", 1, clr.getReferenceCount()); + } + + @Test + public void testSetClosed() throws ClosedChannelException { + CloseableReferenceCount clr = new CloseableReferenceCount(); + assertTrue("Reference count should be open", clr.isOpen()); + clr.setClosed(); + assertFalse("Reference count should be closed", clr.isOpen()); + } + + @Test(expected = ClosedChannelException.class) + public void testReferenceClosedReference() throws ClosedChannelException { + CloseableReferenceCount clr = new CloseableReferenceCount(); + clr.setClosed(); + assertFalse("Reference count should be closed", clr.isOpen()); + clr.reference(); + } + + @Test(expected = ClosedChannelException.class) + public void testUnreferenceClosedReference() throws ClosedChannelException { + CloseableReferenceCount clr = new CloseableReferenceCount(); + clr.reference(); + clr.setClosed(); + assertFalse("Reference count should be closed", clr.isOpen()); + clr.unreferenceCheckClosed(); + } + + @Test(expected = ClosedChannelException.class) + public void testDoubleClose() throws ClosedChannelException { + CloseableReferenceCount clr = new CloseableReferenceCount(); + assertTrue("Reference count should be open", clr.isOpen()); + clr.setClosed(); + assertFalse("Reference count should be closed", clr.isOpen()); + clr.setClosed(); + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestIntrusiveCollection.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestIntrusiveCollection.java new file mode 100644 index 00000000000..03bbf7b12fe --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestIntrusiveCollection.java @@ -0,0 +1,193 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + *

+ * Story 1
+ * As a software developer,
+ *  I want to use the IntrusiveCollection class;
+ * So that I can save on memory usage during execution.
+ * 
+ */ +package org.apache.hadoop.util; + +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; + +import org.junit.Test; + +import org.apache.hadoop.test.HadoopTestBase; +import org.apache.hadoop.util.IntrusiveCollection.Element; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public class TestIntrusiveCollection extends HadoopTestBase { + static class SimpleElement implements IntrusiveCollection.Element { + private Map, Element> + prevMap, nextMap; + private Map, Boolean> isMemberMap; + + public SimpleElement() { + prevMap = new HashMap<>(); + nextMap = new HashMap<>(); + isMemberMap = new HashMap<>(); + } + + @Override + public void insertInternal(IntrusiveCollection list, + Element prev, Element next) { + isMemberMap.put(list, true); + prevMap.put(list, prev); + nextMap.put(list, next); + } + + @Override + public void setPrev(IntrusiveCollection list, + Element prev) { + prevMap.put(list, prev); + } + + @Override + public void setNext(IntrusiveCollection list, + Element next) { + nextMap.put(list, next); + } + + @Override + public void removeInternal(IntrusiveCollection list) { + prevMap.remove(list); + nextMap.remove(list); + isMemberMap.remove(list); + } + + @Override + public Element getPrev(IntrusiveCollection list) { + return prevMap.getOrDefault(list, null); + } + + @Override + public Element getNext(IntrusiveCollection list) { + return nextMap.getOrDefault(list, null); + } + + @Override + public boolean isInList(IntrusiveCollection list) { + return isMemberMap.getOrDefault(list, false); + } + } + + /** + *
+   * Scenario S1.1: Adding an element
+   * Given  an IntrusiveCollection has been created
+   *  and    the IntrusiveCollection is empty
+   * When    I insert an element
+   * Then    the IntrusiveCollection contains the newly added element.
+   * 
+ */ + @Test + public void testShouldAddElement() { + IntrusiveCollection intrusiveCollection = + new IntrusiveCollection<>(); + + SimpleElement element = new SimpleElement(); + intrusiveCollection.add(element); + + assertFalse("Collection should not be empty", + intrusiveCollection.isEmpty()); + assertTrue("Collection should contain added element", + intrusiveCollection.contains(element)); + } + + /** + *
+   * Scenario S1.2: Removing an element
+   * Given  an IntrusiveCollection has been created
+   *  and    the InstrusiveCollection contains a single element
+   * When    I remove the element
+   * Then    the IntrusiveCollection is empty.
+   * 
+ */ + @Test + public void testShouldRemoveElement() { + IntrusiveCollection intrusiveCollection = + new IntrusiveCollection<>(); + SimpleElement element = new SimpleElement(); + intrusiveCollection.add(element); + + intrusiveCollection.remove(element); + + assertTrue("Collection should be empty", intrusiveCollection.isEmpty()); + assertFalse("Collection should not contain removed element", + intrusiveCollection.contains(element)); + } + + /** + *
+   * Scenario S1.3: Removing all elements
+   * Given  an IntrusiveCollection has been created
+   *  and    the IntrusiveCollection contains multiple elements
+   * When    I remove all elements
+   * Then    the IntrusiveCollection is empty.
+   * 
+ */ + @Test + public void testShouldRemoveAllElements() { + IntrusiveCollection intrusiveCollection = + new IntrusiveCollection<>(); + intrusiveCollection.add(new SimpleElement()); + intrusiveCollection.add(new SimpleElement()); + intrusiveCollection.add(new SimpleElement()); + + intrusiveCollection.clear(); + + assertTrue("Collection should be empty", intrusiveCollection.isEmpty()); + } + + /** + *
+   * Scenario S1.4: Iterating through elements
+   * Given  an IntrusiveCollection has been created
+   *  and    the IntrusiveCollection contains multiple elements
+   * When    I iterate through the IntrusiveCollection
+   * Then    I get each element in the collection, successively.
+   * 
+ */ + @Test + public void testIterateShouldReturnAllElements() { + IntrusiveCollection intrusiveCollection = + new IntrusiveCollection<>(); + SimpleElement elem1 = new SimpleElement(); + SimpleElement elem2 = new SimpleElement(); + SimpleElement elem3 = new SimpleElement(); + intrusiveCollection.add(elem1); + intrusiveCollection.add(elem2); + intrusiveCollection.add(elem3); + + Iterator iterator = intrusiveCollection.iterator(); + + assertEquals("First element returned is incorrect", elem1, iterator.next()); + assertEquals("Second element returned is incorrect", elem2, + iterator.next()); + assertEquals("Third element returned is incorrect", elem3, iterator.next()); + assertFalse("Iterator should not have next element", iterator.hasNext()); + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLimitInputStream.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLimitInputStream.java new file mode 100644 index 00000000000..368fa37b7bd --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLimitInputStream.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.util; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Random; + +import org.junit.Test; + +import org.apache.hadoop.test.HadoopTestBase; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; + +public class TestLimitInputStream extends HadoopTestBase { + static class RandomInputStream extends InputStream { + private Random rn = new Random(0); + + @Override + public int read() { return rn.nextInt(); } + } + + @Test + public void testRead() throws IOException { + try (LimitInputStream limitInputStream = + new LimitInputStream(new RandomInputStream(), 0)) { + assertEquals("Reading byte after reaching limit should return -1", -1, + limitInputStream.read()); + } + try (LimitInputStream limitInputStream = + new LimitInputStream(new RandomInputStream(), 4)) { + assertEquals("Incorrect byte returned", new Random(0).nextInt(), + limitInputStream.read()); + } + } + + @Test(expected = IOException.class) + public void testResetWithoutMark() throws IOException { + try (LimitInputStream limitInputStream = + new LimitInputStream(new RandomInputStream(), 128)) { + limitInputStream.reset(); + } + } + + @Test + public void testReadBytes() throws IOException { + try (LimitInputStream limitInputStream = + new LimitInputStream(new RandomInputStream(), 128)) { + Random r = new Random(0); + byte[] data = new byte[4]; + byte[] expected = { (byte) r.nextInt(), (byte) r.nextInt(), + (byte) r.nextInt(), (byte) r.nextInt() }; + limitInputStream.read(data, 0, 4); + assertArrayEquals("Incorrect bytes returned", expected, data); + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java index d0ebc2b83f7..578d2671141 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java @@ -27,6 +27,7 @@ import java.io.File; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.IOException; +import java.io.InterruptedIOException; import java.io.PrintWriter; import java.lang.management.ManagementFactory; import java.lang.management.ThreadInfo; @@ -38,6 +39,8 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.test.GenericTestUtils; import static org.apache.hadoop.util.Shell.*; +import static org.junit.Assume.assumeTrue; + import org.junit.Assume; import org.junit.Before; import org.junit.Rule; @@ -528,4 +531,9 @@ public class TestShell extends Assert { public void testIsJavaVersionAtLeast() { assertTrue(Shell.isJavaVersionAtLeast(8)); } + + @Test + public void testIsBashSupported() throws InterruptedIOException { + assumeTrue("Bash is not supported", Shell.checkIsBashSupported()); + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java index 96a64823639..3fdc1bb8f8b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java @@ -25,6 +25,7 @@ import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.fail; import java.util.ArrayList; @@ -476,6 +477,32 @@ public class TestStringUtils extends UnitTestcaseTimeLimit { executorService.awaitTermination(50, TimeUnit.SECONDS); } + @Test + public void testFormatTimeSortable() { + long timeDiff = 523452311; + String timeDiffStr = "99hrs, 59mins, 59sec"; + + assertEquals("Incorrect time diff string returned", timeDiffStr, + StringUtils.formatTimeSortable(timeDiff)); + } + + @Test + public void testIsAlpha() { + assertTrue("Reported hello as non-alpha string", + StringUtils.isAlpha("hello")); + assertFalse("Reported hello1 as alpha string", + StringUtils.isAlpha("hello1")); + } + + @Test + public void testEscapeHTML() { + String htmlStr = "

Hello. How are you?

"; + String escapedStr = "<p>Hello. How are you?</p>"; + + assertEquals("Incorrect escaped HTML string returned", + escapedStr, StringUtils.escapeHTML(htmlStr)); + } + // Benchmark for StringUtils split public static void main(String []args) { final String TO_SPLIT = "foo,bar,baz,blah,blah"; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestUTF8ByteArrayUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestUTF8ByteArrayUtils.java new file mode 100644 index 00000000000..3aa549a4ca4 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestUTF8ByteArrayUtils.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.util; + +import org.junit.Test; + +import org.apache.hadoop.test.HadoopTestBase; + +import static org.junit.Assert.assertEquals; + +public class TestUTF8ByteArrayUtils extends HadoopTestBase { + @Test + public void testFindByte() { + byte[] data = "Hello, world!".getBytes(); + assertEquals("Character 'a' does not exist in string", -1, + UTF8ByteArrayUtils.findByte(data, 0, data.length, (byte) 'a')); + assertEquals("Did not find first occurrence of character 'o'", 4, + UTF8ByteArrayUtils.findByte(data, 0, data.length, (byte) 'o')); + } + + @Test + public void testFindBytes() { + byte[] data = "Hello, world!".getBytes(); + assertEquals("Did not find first occurrence of pattern 'ello'", 1, + UTF8ByteArrayUtils.findBytes(data, 0, data.length, "ello".getBytes())); + assertEquals( + "Substring starting at position 2 does not contain pattern 'ello'", -1, + UTF8ByteArrayUtils.findBytes(data, 2, data.length, "ello".getBytes())); + } + + @Test + public void testFindNthByte() { + byte[] data = "Hello, world!".getBytes(); + assertEquals("Did not find 2nd occurrence of character 'l'", 3, + UTF8ByteArrayUtils.findNthByte(data, 0, data.length, (byte) 'l', 2)); + assertEquals("4th occurrence of character 'l' does not exist", -1, + UTF8ByteArrayUtils.findNthByte(data, 0, data.length, (byte) 'l', 4)); + assertEquals("Did not find 3rd occurrence of character 'l'", 10, + UTF8ByteArrayUtils.findNthByte(data, (byte) 'l', 3)); + } +} From ccfb816d39878abf4172933327d788c59b9eb082 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Sat, 9 Jun 2018 16:39:09 -0700 Subject: [PATCH 078/113] HDFS-13667:Typo: Marking all datandoes as stale. Contributed by Nanda Kumar --- .../hadoop/hdfs/server/blockmanagement/DatanodeManager.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index e6cd513881f..9ebc693a235 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -1834,7 +1834,7 @@ public class DatanodeManager { } public void markAllDatanodesStale() { - LOG.info("Marking all datandoes as stale"); + LOG.info("Marking all datanodes as stale"); synchronized (this) { for (DatanodeDescriptor dn : datanodeMap.values()) { for(DatanodeStorageInfo storage : dn.getStorageInfos()) { From 18201b882a38ad875358c5d23c09b0ef903c2f91 Mon Sep 17 00:00:00 2001 From: Sammi Chen Date: Mon, 11 Jun 2018 13:53:37 +0800 Subject: [PATCH 079/113] HADOOP-15499. Performance severe drops when running RawErasureCoderBenchmark with NativeRSRawErasureCoder. Contributed by Sammi Chen. --- .../rawcoder/AbstractNativeRawDecoder.java | 53 +++++++++++-------- .../rawcoder/AbstractNativeRawEncoder.java | 49 ++++++++++------- .../rawcoder/NativeRSRawDecoder.java | 19 +++++-- .../rawcoder/NativeRSRawEncoder.java | 19 +++++-- .../rawcoder/NativeXORRawDecoder.java | 19 +++++-- .../rawcoder/NativeXORRawEncoder.java | 19 +++++-- .../rawcoder/RawErasureCoderBenchmark.java | 6 +++ 7 files changed, 128 insertions(+), 56 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawDecoder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawDecoder.java index e84574709f7..cb71a80d5f5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawDecoder.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawDecoder.java @@ -25,6 +25,7 @@ import org.slf4j.LoggerFactory; import java.io.IOException; import java.nio.ByteBuffer; +import java.util.concurrent.locks.ReentrantReadWriteLock; /** * Abstract native raw decoder for all native coders to extend with. @@ -34,36 +35,46 @@ abstract class AbstractNativeRawDecoder extends RawErasureDecoder { public static Logger LOG = LoggerFactory.getLogger(AbstractNativeRawDecoder.class); + // Protect ISA-L coder data structure in native layer from being accessed and + // updated concurrently by the init, release and decode functions. + protected final ReentrantReadWriteLock decoderLock = + new ReentrantReadWriteLock(); + public AbstractNativeRawDecoder(ErasureCoderOptions coderOptions) { super(coderOptions); } @Override - protected synchronized void doDecode(ByteBufferDecodingState decodingState) + protected void doDecode(ByteBufferDecodingState decodingState) throws IOException { - if (nativeCoder == 0) { - throw new IOException(String.format("%s closed", - getClass().getSimpleName())); - } - int[] inputOffsets = new int[decodingState.inputs.length]; - int[] outputOffsets = new int[decodingState.outputs.length]; - - ByteBuffer buffer; - for (int i = 0; i < decodingState.inputs.length; ++i) { - buffer = decodingState.inputs[i]; - if (buffer != null) { - inputOffsets[i] = buffer.position(); + decoderLock.readLock().lock(); + try { + if (nativeCoder == 0) { + throw new IOException(String.format("%s closed", + getClass().getSimpleName())); } - } + int[] inputOffsets = new int[decodingState.inputs.length]; + int[] outputOffsets = new int[decodingState.outputs.length]; - for (int i = 0; i < decodingState.outputs.length; ++i) { - buffer = decodingState.outputs[i]; - outputOffsets[i] = buffer.position(); - } + ByteBuffer buffer; + for (int i = 0; i < decodingState.inputs.length; ++i) { + buffer = decodingState.inputs[i]; + if (buffer != null) { + inputOffsets[i] = buffer.position(); + } + } - performDecodeImpl(decodingState.inputs, inputOffsets, - decodingState.decodeLength, decodingState.erasedIndexes, - decodingState.outputs, outputOffsets); + for (int i = 0; i < decodingState.outputs.length; ++i) { + buffer = decodingState.outputs[i]; + outputOffsets[i] = buffer.position(); + } + + performDecodeImpl(decodingState.inputs, inputOffsets, + decodingState.decodeLength, decodingState.erasedIndexes, + decodingState.outputs, outputOffsets); + } finally { + decoderLock.readLock().unlock(); + } } protected abstract void performDecodeImpl(ByteBuffer[] inputs, diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawEncoder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawEncoder.java index cab53839b34..44d89c2a1c0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawEncoder.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawEncoder.java @@ -25,6 +25,7 @@ import org.slf4j.LoggerFactory; import java.io.IOException; import java.nio.ByteBuffer; +import java.util.concurrent.locks.ReentrantReadWriteLock; /** * Abstract native raw encoder for all native coders to extend with. @@ -34,34 +35,44 @@ abstract class AbstractNativeRawEncoder extends RawErasureEncoder { public static Logger LOG = LoggerFactory.getLogger(AbstractNativeRawEncoder.class); + // Protect ISA-L coder data structure in native layer from being accessed and + // updated concurrently by the init, release and encode functions. + protected final ReentrantReadWriteLock encoderLock = + new ReentrantReadWriteLock(); + public AbstractNativeRawEncoder(ErasureCoderOptions coderOptions) { super(coderOptions); } @Override - protected synchronized void doEncode(ByteBufferEncodingState encodingState) + protected void doEncode(ByteBufferEncodingState encodingState) throws IOException { - if (nativeCoder == 0) { - throw new IOException(String.format("%s closed", - getClass().getSimpleName())); - } - int[] inputOffsets = new int[encodingState.inputs.length]; - int[] outputOffsets = new int[encodingState.outputs.length]; - int dataLen = encodingState.inputs[0].remaining(); + encoderLock.readLock().lock(); + try { + if (nativeCoder == 0) { + throw new IOException(String.format("%s closed", + getClass().getSimpleName())); + } + int[] inputOffsets = new int[encodingState.inputs.length]; + int[] outputOffsets = new int[encodingState.outputs.length]; + int dataLen = encodingState.inputs[0].remaining(); - ByteBuffer buffer; - for (int i = 0; i < encodingState.inputs.length; ++i) { - buffer = encodingState.inputs[i]; - inputOffsets[i] = buffer.position(); - } + ByteBuffer buffer; + for (int i = 0; i < encodingState.inputs.length; ++i) { + buffer = encodingState.inputs[i]; + inputOffsets[i] = buffer.position(); + } - for (int i = 0; i < encodingState.outputs.length; ++i) { - buffer = encodingState.outputs[i]; - outputOffsets[i] = buffer.position(); - } + for (int i = 0; i < encodingState.outputs.length; ++i) { + buffer = encodingState.outputs[i]; + outputOffsets[i] = buffer.position(); + } - performEncodeImpl(encodingState.inputs, inputOffsets, dataLen, - encodingState.outputs, outputOffsets); + performEncodeImpl(encodingState.inputs, inputOffsets, dataLen, + encodingState.outputs, outputOffsets); + } finally { + encoderLock.readLock().unlock(); + } } protected abstract void performEncodeImpl( diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/NativeRSRawDecoder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/NativeRSRawDecoder.java index 85722223039..dc2c33a6bf6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/NativeRSRawDecoder.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/NativeRSRawDecoder.java @@ -36,19 +36,30 @@ public class NativeRSRawDecoder extends AbstractNativeRawDecoder { public NativeRSRawDecoder(ErasureCoderOptions coderOptions) { super(coderOptions); - initImpl(coderOptions.getNumDataUnits(), coderOptions.getNumParityUnits()); + decoderLock.writeLock().lock(); + try { + initImpl(coderOptions.getNumDataUnits(), + coderOptions.getNumParityUnits()); + } finally { + decoderLock.writeLock().unlock(); + } } @Override - protected synchronized void performDecodeImpl( + protected void performDecodeImpl( ByteBuffer[] inputs, int[] inputOffsets, int dataLen, int[] erased, ByteBuffer[] outputs, int[] outputOffsets) throws IOException { decodeImpl(inputs, inputOffsets, dataLen, erased, outputs, outputOffsets); } @Override - public synchronized void release() { - destroyImpl(); + public void release() { + decoderLock.writeLock().lock(); + try { + destroyImpl(); + } finally { + decoderLock.writeLock().unlock(); + } } @Override diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/NativeRSRawEncoder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/NativeRSRawEncoder.java index 754ec884102..ad06927ffe3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/NativeRSRawEncoder.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/NativeRSRawEncoder.java @@ -36,19 +36,30 @@ public class NativeRSRawEncoder extends AbstractNativeRawEncoder { public NativeRSRawEncoder(ErasureCoderOptions coderOptions) { super(coderOptions); - initImpl(coderOptions.getNumDataUnits(), coderOptions.getNumParityUnits()); + encoderLock.writeLock().lock(); + try { + initImpl(coderOptions.getNumDataUnits(), + coderOptions.getNumParityUnits()); + } finally { + encoderLock.writeLock().unlock(); + } } @Override - protected synchronized void performEncodeImpl( + protected void performEncodeImpl( ByteBuffer[] inputs, int[] inputOffsets, int dataLen, ByteBuffer[] outputs, int[] outputOffsets) throws IOException { encodeImpl(inputs, inputOffsets, dataLen, outputs, outputOffsets); } @Override - public synchronized void release() { - destroyImpl(); + public void release() { + encoderLock.writeLock().lock(); + try { + destroyImpl(); + } finally { + encoderLock.writeLock().unlock(); + } } @Override diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/NativeXORRawDecoder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/NativeXORRawDecoder.java index 17630424985..dd708eb53e1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/NativeXORRawDecoder.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/NativeXORRawDecoder.java @@ -36,19 +36,30 @@ public class NativeXORRawDecoder extends AbstractNativeRawDecoder { public NativeXORRawDecoder(ErasureCoderOptions coderOptions) { super(coderOptions); - initImpl(coderOptions.getNumDataUnits(), coderOptions.getNumParityUnits()); + decoderLock.writeLock().lock(); + try { + initImpl(coderOptions.getNumDataUnits(), + coderOptions.getNumParityUnits()); + } finally { + decoderLock.writeLock().unlock(); + } } @Override - protected synchronized void performDecodeImpl( + protected void performDecodeImpl( ByteBuffer[] inputs, int[] inputOffsets, int dataLen, int[] erased, ByteBuffer[] outputs, int[] outputOffsets) throws IOException { decodeImpl(inputs, inputOffsets, dataLen, erased, outputs, outputOffsets); } @Override - public synchronized void release() { - destroyImpl(); + public void release() { + decoderLock.writeLock().lock(); + try { + destroyImpl(); + } finally { + decoderLock.writeLock().unlock(); + } } private native void initImpl(int numDataUnits, int numParityUnits); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/NativeXORRawEncoder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/NativeXORRawEncoder.java index 7f4265b2fa5..66b0a1bff7d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/NativeXORRawEncoder.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/NativeXORRawEncoder.java @@ -36,19 +36,30 @@ public class NativeXORRawEncoder extends AbstractNativeRawEncoder { public NativeXORRawEncoder(ErasureCoderOptions coderOptions) { super(coderOptions); - initImpl(coderOptions.getNumDataUnits(), coderOptions.getNumParityUnits()); + encoderLock.writeLock().lock(); + try { + initImpl(coderOptions.getNumDataUnits(), + coderOptions.getNumParityUnits()); + } finally { + encoderLock.writeLock().unlock(); + } } @Override - protected synchronized void performEncodeImpl( + protected void performEncodeImpl( ByteBuffer[] inputs, int[] inputOffsets, int dataLen, ByteBuffer[] outputs, int[] outputOffsets) throws IOException { encodeImpl(inputs, inputOffsets, dataLen, outputs, outputOffsets); } @Override - public synchronized void release() { - destroyImpl(); + public void release() { + encoderLock.writeLock().lock(); + try { + destroyImpl(); + } finally { + encoderLock.writeLock().unlock(); + } } private native void initImpl(int numDataUnits, int numParityUnits); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderBenchmark.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderBenchmark.java index c005e77cb3a..df8c54b9cdd 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderBenchmark.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderBenchmark.java @@ -230,6 +230,12 @@ public final class RawErasureCoderBenchmark { throw e; } finally { executor.shutdown(); + if (encoder != null) { + encoder.release(); + } + if (decoder != null) { + decoder.release(); + } } } From c190ac2be88e574b3322cdc73a7c0af0cef708b2 Mon Sep 17 00:00:00 2001 From: Haibo Chen Date: Mon, 11 Jun 2018 11:12:44 -0700 Subject: [PATCH 080/113] YARN-8323. FairScheduler.allocConf should be declared as volatile. (Szilard Nemeth via Haibo Chen) --- .../server/resourcemanager/scheduler/fair/FairScheduler.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index 557e684b673..eb9f6af7101 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -199,7 +199,7 @@ public class FairScheduler extends private AllocationFileLoaderService allocsLoader; @VisibleForTesting - AllocationConfiguration allocConf; + volatile AllocationConfiguration allocConf; // Container size threshold for making a reservation. @VisibleForTesting From 676dcffff575fdf1c4b49aeae4e000bd60ca0a83 Mon Sep 17 00:00:00 2001 From: Haibo Chen Date: Mon, 11 Jun 2018 11:16:21 -0700 Subject: [PATCH 081/113] YARN-8322. Change log level when there is an IOException when the allocation file is loaded. (Szilard Nemeth via Haibo Chen) --- .../scheduler/fair/AllocationFileLoaderService.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java index e541ab7b0dd..32cb2363607 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java @@ -109,7 +109,7 @@ public class AllocationFileLoaderService extends AbstractService { @Override public void serviceInit(Configuration conf) throws Exception { this.allocFile = getAllocationFile(conf); - if(this.allocFile != null) { + if (this.allocFile != null) { this.fs = allocFile.getFileSystem(conf); reloadThread = new Thread(() -> { while (running) { @@ -138,7 +138,7 @@ public class AllocationFileLoaderService extends AbstractService { lastReloadAttemptFailed = true; } } catch (IOException e) { - LOG.info("Exception while loading allocation file: " + e); + LOG.error("Exception while loading allocation file: " + e); } try { Thread.sleep(reloadIntervalMs); From 180b3c960bb693a68431c677d8c8b18821fb4361 Mon Sep 17 00:00:00 2001 From: Haibo Chen Date: Mon, 11 Jun 2018 11:18:44 -0700 Subject: [PATCH 082/113] YARN-8321. AllocationFileLoaderService.getAllocationFile() should be declared as VisibleForTest. (Szilard Nemeth via Haibo Chen) --- .../scheduler/fair/AllocationFileLoaderService.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java index 32cb2363607..56cc8873f65 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java @@ -181,7 +181,8 @@ public class AllocationFileLoaderService extends AbstractService { * path is relative, it is searched for in the * classpath, but loaded like a regular File. */ - public Path getAllocationFile(Configuration conf) + @VisibleForTesting + Path getAllocationFile(Configuration conf) throws UnsupportedFileSystemException { String allocFilePath = conf.get(FairSchedulerConfiguration.ALLOCATION_FILE, FairSchedulerConfiguration.DEFAULT_ALLOCATION_FILE); From 2e5cfe6df338c70965cfb0212a93617de3a6bd79 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Mon, 11 Jun 2018 11:16:52 -0700 Subject: [PATCH 083/113] HDFS-13653. Make dfs.client.failover.random.order a per nameservice configuration. Contributed by Ekanth Sethuramalingam. --- .../ha/ConfiguredFailoverProxyProvider.java | 29 +- .../TestConfiguredFailoverProxyProvider.java | 264 ++++++++++++++++++ .../src/main/resources/hdfs-default.xml | 12 + .../hadoop/tools/TestHdfsConfigFields.java | 1 + 4 files changed, 303 insertions(+), 3 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java index 58f49438cc7..96722fcfab3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java @@ -94,9 +94,7 @@ public class ConfiguredFailoverProxyProvider extends proxies.add(new AddressRpcProxyPair(address)); } // Randomize the list to prevent all clients pointing to the same one - boolean randomized = conf.getBoolean( - HdfsClientConfigKeys.Failover.RANDOM_ORDER, - HdfsClientConfigKeys.Failover.RANDOM_ORDER_DEFAULT); + boolean randomized = getRandomOrder(conf, uri); if (randomized) { Collections.shuffle(proxies); } @@ -111,6 +109,31 @@ public class ConfiguredFailoverProxyProvider extends } } + /** + * Check whether random order is configured for failover proxy provider + * for the namenode/nameservice. + * + * @param conf Configuration + * @param nameNodeUri The URI of namenode/nameservice + * @return random order configuration + */ + private static boolean getRandomOrder( + Configuration conf, URI nameNodeUri) { + String host = nameNodeUri.getHost(); + String configKeyWithHost = HdfsClientConfigKeys.Failover.RANDOM_ORDER + + "." + host; + + if (conf.get(configKeyWithHost) != null) { + return conf.getBoolean( + configKeyWithHost, + HdfsClientConfigKeys.Failover.RANDOM_ORDER_DEFAULT); + } + + return conf.getBoolean( + HdfsClientConfigKeys.Failover.RANDOM_ORDER, + HdfsClientConfigKeys.Failover.RANDOM_ORDER_DEFAULT); + } + @Override public Class getInterface() { return xface; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java new file mode 100644 index 00000000000..d7a5db6b893 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java @@ -0,0 +1,264 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode.ha; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; +import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Time; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; +import org.slf4j.event.Level; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Test {@link ConfiguredFailoverProxyProvider}. + * This manages failover logic for a given set of nameservices/namenodes + * (aka proxies). + */ +public class TestConfiguredFailoverProxyProvider { + private Configuration conf; + private int rpcPort = 8020; + private URI ns1Uri; + private URI ns2Uri; + private String ns1; + private String ns1nn1Hostname = "machine1.foo.bar"; + private InetSocketAddress ns1nn1 = + new InetSocketAddress(ns1nn1Hostname, rpcPort); + private String ns1nn2Hostname = "machine2.foo.bar"; + private InetSocketAddress ns1nn2 = + new InetSocketAddress(ns1nn2Hostname, rpcPort); + private String ns2; + private String ns2nn1Hostname = "router1.foo.bar"; + private InetSocketAddress ns2nn1 = + new InetSocketAddress(ns2nn1Hostname, rpcPort); + private String ns2nn2Hostname = "router2.foo.bar"; + private InetSocketAddress ns2nn2 = + new InetSocketAddress(ns2nn2Hostname, rpcPort); + private String ns2nn3Hostname = "router3.foo.bar"; + private InetSocketAddress ns2nn3 = + new InetSocketAddress(ns2nn3Hostname, rpcPort); + private static final int NUM_ITERATIONS = 50; + + @BeforeClass + public static void setupClass() throws Exception { + GenericTestUtils.setLogLevel(RequestHedgingProxyProvider.LOG, Level.TRACE); + } + + @Before + public void setup() throws URISyntaxException { + ns1 = "mycluster-1-" + Time.monotonicNow(); + ns1Uri = new URI("hdfs://" + ns1); + conf = new Configuration(); + conf.set( + HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns1, + "nn1,nn2,nn3"); + conf.set( + HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns1 + ".nn1", + ns1nn1Hostname + ":" + rpcPort); + conf.set( + HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns1 + ".nn2", + ns1nn2Hostname + ":" + rpcPort); + conf.set( + HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + "." + ns1, + ConfiguredFailoverProxyProvider.class.getName()); + conf.setBoolean( + HdfsClientConfigKeys.Failover.RANDOM_ORDER + "." + ns1, + false); + + ns2 = "myroutercluster-2-" + Time.monotonicNow(); + ns2Uri = new URI("hdfs://" + ns2); + conf.set( + HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns2, + "nn1,nn2,nn3"); + conf.set( + HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns2 + ".nn1", + ns2nn1Hostname + ":" + rpcPort); + conf.set( + HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns2 + ".nn2", + ns2nn2Hostname + ":" + rpcPort); + conf.set( + HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns2 + ".nn3", + ns2nn3Hostname + ":" + rpcPort); + conf.set( + HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + "." + ns2, + ConfiguredFailoverProxyProvider.class.getName()); + conf.setBoolean( + HdfsClientConfigKeys.Failover.RANDOM_ORDER + "." + ns2, + true); + + conf.set(HdfsClientConfigKeys.DFS_NAMESERVICES, ns1 + "," + ns2); + conf.set("fs.defaultFS", "hdfs://" + ns1); + } + + /** + * Tests getProxy with random.order configuration set to false. + * This expects the proxy order to be consistent every time a new + * ConfiguredFailoverProxyProvider is created. + */ + @Test + public void testNonRandomGetProxy() throws Exception { + final AtomicInteger nn1Count = new AtomicInteger(0); + final AtomicInteger nn2Count = new AtomicInteger(0); + + Map proxyMap = new HashMap<>(); + + final ClientProtocol nn1Mock = mock(ClientProtocol.class); + when(nn1Mock.getStats()).thenAnswer(createAnswer(nn1Count, 1)); + proxyMap.put(ns1nn1, nn1Mock); + + final ClientProtocol nn2Mock = mock(ClientProtocol.class); + when(nn2Mock.getStats()).thenAnswer(createAnswer(nn2Count, 2)); + proxyMap.put(ns1nn2, nn2Mock); + + ConfiguredFailoverProxyProvider provider1 = + new ConfiguredFailoverProxyProvider<>(conf, ns1Uri, + ClientProtocol.class, createFactory(proxyMap)); + ClientProtocol proxy1 = provider1.getProxy().proxy; + proxy1.getStats(); + assertEquals(1, nn1Count.get()); + assertEquals(0, nn2Count.get()); + proxy1.getStats(); + assertEquals(2, nn1Count.get()); + assertEquals(0, nn2Count.get()); + nn1Count.set(0); + nn2Count.set(0); + + for (int i = 0; i < NUM_ITERATIONS; i++) { + ConfiguredFailoverProxyProvider provider2 = + new ConfiguredFailoverProxyProvider<>(conf, ns1Uri, + ClientProtocol.class, createFactory(proxyMap)); + ClientProtocol proxy2 = provider2.getProxy().proxy; + proxy2.getStats(); + } + assertEquals(NUM_ITERATIONS, nn1Count.get()); + assertEquals(0, nn2Count.get()); + } + + /** + * Tests getProxy with random.order configuration set to true. + * This expects the proxy order to be random every time a new + * ConfiguredFailoverProxyProvider is created. + */ + @Test + public void testRandomGetProxy() throws Exception { + final AtomicInteger nn1Count = new AtomicInteger(0); + final AtomicInteger nn2Count = new AtomicInteger(0); + final AtomicInteger nn3Count = new AtomicInteger(0); + + Map proxyMap = new HashMap<>(); + + final ClientProtocol nn1Mock = mock(ClientProtocol.class); + when(nn1Mock.getStats()).thenAnswer(createAnswer(nn1Count, 1)); + proxyMap.put(ns2nn1, nn1Mock); + + final ClientProtocol nn2Mock = mock(ClientProtocol.class); + when(nn2Mock.getStats()).thenAnswer(createAnswer(nn2Count, 2)); + proxyMap.put(ns2nn2, nn2Mock); + + final ClientProtocol nn3Mock = mock(ClientProtocol.class); + when(nn3Mock.getStats()).thenAnswer(createAnswer(nn3Count, 3)); + proxyMap.put(ns2nn3, nn3Mock); + + + for (int i = 0; i < NUM_ITERATIONS; i++) { + ConfiguredFailoverProxyProvider provider = + new ConfiguredFailoverProxyProvider<>(conf, ns2Uri, + ClientProtocol.class, createFactory(proxyMap)); + ClientProtocol proxy = provider.getProxy().proxy; + proxy.getStats(); + } + + assertTrue(nn1Count.get() < NUM_ITERATIONS && nn1Count.get() > 0); + assertTrue(nn2Count.get() < NUM_ITERATIONS && nn2Count.get() > 0); + assertTrue(nn3Count.get() < NUM_ITERATIONS && nn3Count.get() > 0); + assertEquals(NUM_ITERATIONS, + nn1Count.get() + nn2Count.get() + nn3Count.get()); + } + + /** + * createAnswer creates an Answer for using with the ClientProtocol mocks. + * @param counter counter to increment + * @param retVal return value from answer + * @return + */ + private Answer createAnswer(final AtomicInteger counter, + final long retVal) { + return new Answer() { + @Override + public long[] answer(InvocationOnMock invocation) throws Throwable { + counter.incrementAndGet(); + return new long[]{retVal}; + } + }; + } + + /** + * createFactory returns a HAProxyFactory for tests. + * This uses a map of name node address to ClientProtocol to route calls to + * different ClientProtocol objects. The tests could create ClientProtocol + * mocks and create name node mappings to use with + * ConfiguredFailoverProxyProvider. + */ + private HAProxyFactory createFactory( + final Map proxies) { + final Map proxyMap = proxies; + return new HAProxyFactory() { + @Override + public ClientProtocol createProxy(Configuration cfg, + InetSocketAddress nnAddr, Class xface, + UserGroupInformation ugi, boolean withRetries, + AtomicBoolean fallbackToSimpleAuth) throws IOException { + if (proxyMap.containsKey(nnAddr)) { + return proxyMap.get(nnAddr); + } else { + throw new IOException("Name node address not found"); + } + } + + @Override + public ClientProtocol createProxy(Configuration cfg, + InetSocketAddress nnAddr, Class xface, + UserGroupInformation ugi, boolean withRetries) throws IOException { + if (proxyMap.containsKey(nnAddr)) { + return proxyMap.get(nnAddr); + } else { + throw new IOException("Name node address not found"); + } + } + }; + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 7a437abcfa5..b55421c162e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -3634,6 +3634,18 @@
+ + dfs.client.failover.random.order + false + + Determines if the failover proxies are picked in random order instead of the + configured order. The prefix can be used with an optional nameservice ID + (of form dfs.client.failover.random.order[.nameservice]) in case multiple + nameservices exist and random order should be enabled for specific + nameservices. + + + dfs.client.key.provider.cache.expiry 864000000 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java index 47db565e37f..5cae2fcd772 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java @@ -41,6 +41,7 @@ public class TestHdfsConfigFields extends TestConfigurationFieldsBase { public void initializeMemberVariables() { xmlFilename = new String("hdfs-default.xml"); configurationClasses = new Class[] { HdfsClientConfigKeys.class, + HdfsClientConfigKeys.Failover.class, HdfsClientConfigKeys.StripedRead.class, DFSConfigKeys.class, HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.class }; From 7c3dc39083ef6608f8a8fe7699195d4d369ec5e4 Mon Sep 17 00:00:00 2001 From: Yongjun Zhang Date: Sun, 10 Jun 2018 23:07:24 -0700 Subject: [PATCH 084/113] Update CHANGES, RELEASENOTES, and jdiff for 3.0.3 release. (cherry picked from commit 5a82f10e3254bc0745c2dda6bcef888d3ff0d1c3) --- .../markdown/release/3.0.3/CHANGES.3.0.3.md | 309 +++++++++++++++++ .../release/3.0.3/RELEASENOTES.3.0.3.md | 31 ++ .../jdiff/Apache_Hadoop_HDFS_3.0.3.xml | 322 ++++++++++++++++++ hadoop-project-dist/pom.xml | 2 +- 4 files changed, 663 insertions(+), 1 deletion(-) create mode 100644 hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.3/CHANGES.3.0.3.md create mode 100644 hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.3/RELEASENOTES.3.0.3.md create mode 100644 hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.3.xml diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.3/CHANGES.3.0.3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.3/CHANGES.3.0.3.md new file mode 100644 index 00000000000..48065430f0b --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.3/CHANGES.3.0.3.md @@ -0,0 +1,309 @@ + + +# Apache Hadoop Changelog + +## Release 3.0.3 - 2018-05-31 + +### INCOMPATIBLE CHANGES: + +| JIRA | Summary | Priority | Component | Reporter | Contributor | +|:---- |:---- | :--- |:---- |:---- |:---- | +| [HDFS-13099](https://issues.apache.org/jira/browse/HDFS-13099) | RBF: Use the ZooKeeper as the default State Store | Minor | documentation | Yiqun Lin | Yiqun Lin | + + +### NEW FEATURES: + +| JIRA | Summary | Priority | Component | Reporter | Contributor | +|:---- |:---- | :--- |:---- |:---- |:---- | +| [HDFS-13283](https://issues.apache.org/jira/browse/HDFS-13283) | Percentage based Reserved Space Calculation for DataNode | Major | datanode, hdfs | Lukas Majercak | Lukas Majercak | + + +### IMPROVEMENTS: + +| JIRA | Summary | Priority | Component | Reporter | Contributor | +|:---- |:---- | :--- |:---- |:---- |:---- | +| [HDFS-12455](https://issues.apache.org/jira/browse/HDFS-12455) | WebHDFS - Adding "snapshot enabled" status to ListStatus query result. | Major | snapshots, webhdfs | Ajay Kumar | Ajay Kumar | +| [HDFS-13062](https://issues.apache.org/jira/browse/HDFS-13062) | Provide support for JN to use separate journal disk per namespace | Major | federation, journal-node | Bharat Viswanadham | Bharat Viswanadham | +| [HDFS-12933](https://issues.apache.org/jira/browse/HDFS-12933) | Improve logging when DFSStripedOutputStream failed to write some blocks | Minor | erasure-coding | Xiao Chen | chencan | +| [HADOOP-13972](https://issues.apache.org/jira/browse/HADOOP-13972) | ADLS to support per-store configuration | Major | fs/adl | John Zhuge | Sharad Sonker | +| [YARN-7813](https://issues.apache.org/jira/browse/YARN-7813) | Capacity Scheduler Intra-queue Preemption should be configurable for each queue | Major | capacity scheduler, scheduler preemption | Eric Payne | Eric Payne | +| [HDFS-13175](https://issues.apache.org/jira/browse/HDFS-13175) | Add more information for checking argument in DiskBalancerVolume | Minor | diskbalancer | Lei (Eddy) Xu | Lei (Eddy) Xu | +| [HDFS-11187](https://issues.apache.org/jira/browse/HDFS-11187) | Optimize disk access for last partial chunk checksum of Finalized replica | Major | datanode | Wei-Chiu Chuang | Gabor Bota | +| [MAPREDUCE-7061](https://issues.apache.org/jira/browse/MAPREDUCE-7061) | SingleCluster setup document needs to be updated | Major | . | Bharat Viswanadham | Bharat Viswanadham | +| [HADOOP-15263](https://issues.apache.org/jira/browse/HADOOP-15263) | hadoop cloud-storage module to mark hadoop-common as provided; add azure-datalake | Minor | build | Steve Loughran | Steve Loughran | +| [MAPREDUCE-7060](https://issues.apache.org/jira/browse/MAPREDUCE-7060) | Cherry Pick PathOutputCommitter class/factory to branch-3.0 | Minor | . | Steve Loughran | Steve Loughran | +| [HADOOP-15279](https://issues.apache.org/jira/browse/HADOOP-15279) | increase maven heap size recommendations | Minor | build, documentation, test | Allen Wittenauer | Allen Wittenauer | +| [HDFS-13170](https://issues.apache.org/jira/browse/HDFS-13170) | Port webhdfs unmaskedpermission parameter to HTTPFS | Major | . | Stephen O'Donnell | Stephen O'Donnell | +| [HDFS-13225](https://issues.apache.org/jira/browse/HDFS-13225) | StripeReader#checkMissingBlocks() 's IOException info is incomplete | Major | erasure-coding, hdfs-client | lufei | lufei | +| [HDFS-11394](https://issues.apache.org/jira/browse/HDFS-11394) | Support for getting erasure coding policy through WebHDFS#FileStatus | Major | erasure-coding, namenode | Kai Sasaki | Kai Sasaki | +| [HADOOP-15311](https://issues.apache.org/jira/browse/HADOOP-15311) | HttpServer2 needs a way to configure the acceptor/selector count | Major | common | Erik Krogen | Erik Krogen | +| [HDFS-11600](https://issues.apache.org/jira/browse/HDFS-11600) | Refactor TestDFSStripedOutputStreamWithFailure test classes | Minor | erasure-coding, test | Andrew Wang | SammiChen | +| [HDFS-12884](https://issues.apache.org/jira/browse/HDFS-12884) | BlockUnderConstructionFeature.truncateBlock should be of type BlockInfo | Major | namenode | Konstantin Shvachko | chencan | +| [HADOOP-15334](https://issues.apache.org/jira/browse/HADOOP-15334) | Upgrade Maven surefire plugin | Major | build | Arpit Agarwal | Arpit Agarwal | +| [HADOOP-15312](https://issues.apache.org/jira/browse/HADOOP-15312) | Undocumented KeyProvider configuration keys | Major | . | Wei-Chiu Chuang | LiXin Ge | +| [YARN-7623](https://issues.apache.org/jira/browse/YARN-7623) | Fix the CapacityScheduler Queue configuration documentation | Major | . | Arun Suresh | Jonathan Hung | +| [HDFS-13314](https://issues.apache.org/jira/browse/HDFS-13314) | NameNode should optionally exit if it detects FsImage corruption | Major | namenode | Arpit Agarwal | Arpit Agarwal | +| [HADOOP-15342](https://issues.apache.org/jira/browse/HADOOP-15342) | Update ADLS connector to use the current SDK version (2.2.7) | Major | fs/adl | Atul Sikaria | Atul Sikaria | +| [HDFS-13462](https://issues.apache.org/jira/browse/HDFS-13462) | Add BIND\_HOST configuration for JournalNode's HTTP and RPC Servers | Major | hdfs, journal-node | Lukas Majercak | Lukas Majercak | +| [HADOOP-14841](https://issues.apache.org/jira/browse/HADOOP-14841) | Kms client should disconnect if unable to get output stream from connection. | Major | kms | Xiao Chen | Rushabh S Shah | +| [HDFS-12981](https://issues.apache.org/jira/browse/HDFS-12981) | renameSnapshot a Non-Existent snapshot to itself should throw error | Minor | hdfs | Sailesh Patel | Kitti Nanasi | +| [YARN-8201](https://issues.apache.org/jira/browse/YARN-8201) | Skip stacktrace of few exception from ClientRMService | Minor | . | Bibin A Chundatt | Bilwa S T | +| [HADOOP-15441](https://issues.apache.org/jira/browse/HADOOP-15441) | Log kms url and token service at debug level. | Minor | . | Wei-Chiu Chuang | Gabor Bota | +| [HDFS-13544](https://issues.apache.org/jira/browse/HDFS-13544) | Improve logging for JournalNode in federated cluster | Major | federation, hdfs | Hanisha Koneru | Hanisha Koneru | +| [HADOOP-15486](https://issues.apache.org/jira/browse/HADOOP-15486) | Make NetworkTopology#netLock fair | Major | net | Nanda kumar | Nanda kumar | +| [HDFS-13493](https://issues.apache.org/jira/browse/HDFS-13493) | Reduce the HttpServer2 thread count on DataNodes | Major | datanode | Erik Krogen | Erik Krogen | +| [HADOOP-15449](https://issues.apache.org/jira/browse/HADOOP-15449) | Increase default timeout of ZK session to avoid frequent NameNode failover | Critical | common | Karthik Palanisamy | Karthik Palanisamy | + + +### BUG FIXES: + +| JIRA | Summary | Priority | Component | Reporter | Contributor | +|:---- |:---- | :--- |:---- |:---- |:---- | +| [HDFS-11968](https://issues.apache.org/jira/browse/HDFS-11968) | ViewFS: StoragePolicies commands fail with HDFS federation | Major | hdfs | Mukul Kumar Singh | Mukul Kumar Singh | +| [HDFS-12813](https://issues.apache.org/jira/browse/HDFS-12813) | RequestHedgingProxyProvider can hide Exception thrown from the Namenode for proxy size of 1 | Major | ha | Mukul Kumar Singh | Mukul Kumar Singh | +| [HDFS-13048](https://issues.apache.org/jira/browse/HDFS-13048) | LowRedundancyReplicatedBlocks metric can be negative | Major | metrics | Akira Ajisaka | Akira Ajisaka | +| [HDFS-13115](https://issues.apache.org/jira/browse/HDFS-13115) | In getNumUnderConstructionBlocks(), ignore the inodeIds for which the inodes have been deleted | Major | . | Yongjun Zhang | Yongjun Zhang | +| [HDFS-10453](https://issues.apache.org/jira/browse/HDFS-10453) | ReplicationMonitor thread could stuck for long time due to the race between replication and delete of same file in a large cluster. | Major | namenode | He Xiaoqiao | He Xiaoqiao | +| [MAPREDUCE-7053](https://issues.apache.org/jira/browse/MAPREDUCE-7053) | Timed out tasks can fail to produce thread dump | Major | . | Jason Lowe | Jason Lowe | +| [HADOOP-15206](https://issues.apache.org/jira/browse/HADOOP-15206) | BZip2 drops and duplicates records when input split size is small | Major | . | Aki Tanaka | Aki Tanaka | +| [YARN-7937](https://issues.apache.org/jira/browse/YARN-7937) | Fix http method name in Cluster Application Timeout Update API example request | Minor | docs, documentation | Charan Hebri | Charan Hebri | +| [YARN-7947](https://issues.apache.org/jira/browse/YARN-7947) | Capacity Scheduler intra-queue preemption can NPE for non-schedulable apps | Major | capacity scheduler, scheduler preemption | Eric Payne | Eric Payne | +| [HADOOP-10571](https://issues.apache.org/jira/browse/HADOOP-10571) | Use Log.\*(Object, Throwable) overload to log exceptions | Major | . | Arpit Agarwal | Andras Bokor | +| [HDFS-12781](https://issues.apache.org/jira/browse/HDFS-12781) | After Datanode down, In Namenode UI Datanode tab is throwing warning message. | Major | datanode | Harshakiran Reddy | Brahma Reddy Battula | +| [HDFS-12070](https://issues.apache.org/jira/browse/HDFS-12070) | Failed block recovery leaves files open indefinitely and at risk for data loss | Major | . | Daryn Sharp | Kihwal Lee | +| [HDFS-13145](https://issues.apache.org/jira/browse/HDFS-13145) | SBN crash when transition to ANN with in-progress edit tailing enabled | Major | ha, namenode | Chao Sun | Chao Sun | +| [HDFS-13114](https://issues.apache.org/jira/browse/HDFS-13114) | CryptoAdmin#ReencryptZoneCommand should resolve Namespace info from path | Major | encryption, hdfs | Hanisha Koneru | Hanisha Koneru | +| [HDFS-13081](https://issues.apache.org/jira/browse/HDFS-13081) | Datanode#checkSecureConfig should allow SASL and privileged HTTP | Major | datanode, security | Xiaoyu Yao | Ajay Kumar | +| [MAPREDUCE-7059](https://issues.apache.org/jira/browse/MAPREDUCE-7059) | Downward Compatibility issue: MR job fails because of unknown setErasureCodingPolicy method from 3.x client to HDFS 2.x cluster | Critical | job submission | Jiandan Yang | Jiandan Yang | +| [HADOOP-15275](https://issues.apache.org/jira/browse/HADOOP-15275) | Incorrect javadoc for return type of RetryPolicy#shouldRetry | Minor | documentation | Nanda kumar | Nanda kumar | +| [YARN-7511](https://issues.apache.org/jira/browse/YARN-7511) | NPE in ContainerLocalizer when localization failed for running container | Major | nodemanager | Tao Yang | Tao Yang | +| [MAPREDUCE-7023](https://issues.apache.org/jira/browse/MAPREDUCE-7023) | TestHadoopArchiveLogs.testCheckFilesAndSeedApps fails on rerun | Minor | test | Gergely Novák | Gergely Novák | +| [HDFS-13040](https://issues.apache.org/jira/browse/HDFS-13040) | Kerberized inotify client fails despite kinit properly | Major | namenode | Wei-Chiu Chuang | Xiao Chen | +| [YARN-7736](https://issues.apache.org/jira/browse/YARN-7736) | Fix itemization in YARN federation document | Minor | documentation | Akira Ajisaka | Sen Zhao | +| [HDFS-13164](https://issues.apache.org/jira/browse/HDFS-13164) | File not closed if streamer fail with DSQuotaExceededException | Major | hdfs-client | Xiao Chen | Xiao Chen | +| [HADOOP-15289](https://issues.apache.org/jira/browse/HADOOP-15289) | FileStatus.readFields() assertion incorrect | Critical | . | Steve Loughran | Steve Loughran | +| [HDFS-13109](https://issues.apache.org/jira/browse/HDFS-13109) | Support fully qualified hdfs path in EZ commands | Major | hdfs | Hanisha Koneru | Hanisha Koneru | +| [HADOOP-15296](https://issues.apache.org/jira/browse/HADOOP-15296) | Fix a wrong link for RBF in the top page | Minor | documentation | Takanobu Asanuma | Takanobu Asanuma | +| [HADOOP-15273](https://issues.apache.org/jira/browse/HADOOP-15273) | distcp can't handle remote stores with different checksum algorithms | Critical | tools/distcp | Steve Loughran | Steve Loughran | +| [MAPREDUCE-6930](https://issues.apache.org/jira/browse/MAPREDUCE-6930) | mapreduce.map.cpu.vcores and mapreduce.reduce.cpu.vcores are both present twice in mapred-default.xml | Major | mrv2 | Daniel Templeton | Sen Zhao | +| [HDFS-13190](https://issues.apache.org/jira/browse/HDFS-13190) | Document WebHDFS support for snapshot diff | Major | documentation, webhdfs | Xiaoyu Yao | Lokesh Jain | +| [HDFS-13244](https://issues.apache.org/jira/browse/HDFS-13244) | Add stack, conf, metrics links to utilities dropdown in NN webUI | Major | . | Bharat Viswanadham | Bharat Viswanadham | +| [HDFS-12156](https://issues.apache.org/jira/browse/HDFS-12156) | TestFSImage fails without -Pnative | Major | test | Akira Ajisaka | Akira Ajisaka | +| [HDFS-13239](https://issues.apache.org/jira/browse/HDFS-13239) | Fix non-empty dir warning message when setting default EC policy | Minor | . | Hanisha Koneru | Bharat Viswanadham | +| [YARN-8022](https://issues.apache.org/jira/browse/YARN-8022) | ResourceManager UI cluster/app/\ page fails to render | Blocker | webapp | Tarun Parimi | Tarun Parimi | +| [MAPREDUCE-7064](https://issues.apache.org/jira/browse/MAPREDUCE-7064) | Flaky test TestTaskAttempt#testReducerCustomResourceTypes | Major | client, test | Peter Bacsko | Peter Bacsko | +| [HDFS-12723](https://issues.apache.org/jira/browse/HDFS-12723) | TestReadStripedFileWithMissingBlocks#testReadFileWithMissingBlocks failing consistently. | Major | . | Rushabh S Shah | Ajay Kumar | +| [YARN-7636](https://issues.apache.org/jira/browse/YARN-7636) | Re-reservation count may overflow when cluster resource exhausted for a long time | Major | capacityscheduler | Tao Yang | Tao Yang | +| [HDFS-12886](https://issues.apache.org/jira/browse/HDFS-12886) | Ignore minReplication for block recovery | Major | hdfs, namenode | Lukas Majercak | Lukas Majercak | +| [HDFS-13296](https://issues.apache.org/jira/browse/HDFS-13296) | GenericTestUtils generates paths with drive letter in Windows and fail webhdfs related test cases | Major | . | Xiao Liang | Xiao Liang | +| [HDFS-13268](https://issues.apache.org/jira/browse/HDFS-13268) | TestWebHdfsFileContextMainOperations fails on Windows | Major | . | Íñigo Goiri | Xiao Liang | +| [YARN-8054](https://issues.apache.org/jira/browse/YARN-8054) | Improve robustness of the LocalDirsHandlerService MonitoringTimerTask thread | Major | . | Jonathan Eagles | Jonathan Eagles | +| [HDFS-13195](https://issues.apache.org/jira/browse/HDFS-13195) | DataNode conf page cannot display the current value after reconfig | Minor | datanode | maobaolong | maobaolong | +| [YARN-8063](https://issues.apache.org/jira/browse/YARN-8063) | DistributedShellTimelinePlugin wrongly check for entityId instead of entityType | Major | . | Rohith Sharma K S | Rohith Sharma K S | +| [YARN-8062](https://issues.apache.org/jira/browse/YARN-8062) | yarn rmadmin -getGroups returns group from which the user has been removed | Critical | . | Sumana Sathish | Sunil Govindan | +| [YARN-8068](https://issues.apache.org/jira/browse/YARN-8068) | Application Priority field causes NPE in app timeline publish when Hadoop 2.7 based clients to 2.8+ | Blocker | yarn | Sunil Govindan | Sunil Govindan | +| [YARN-7734](https://issues.apache.org/jira/browse/YARN-7734) | YARN-5418 breaks TestContainerLogsPage.testContainerLogPageAccess | Major | . | Miklos Szegedi | Tao Yang | +| [HDFS-13087](https://issues.apache.org/jira/browse/HDFS-13087) | Snapshotted encryption zone information should be immutable | Major | encryption | LiXin Ge | LiXin Ge | +| [HADOOP-12862](https://issues.apache.org/jira/browse/HADOOP-12862) | LDAP Group Mapping over SSL can not specify trust store | Major | . | Wei-Chiu Chuang | Wei-Chiu Chuang | +| [HDFS-13349](https://issues.apache.org/jira/browse/HDFS-13349) | Unresolved merge conflict in ViewFs.md | Blocker | documentation | Gera Shegalov | Yiqun Lin | +| [HADOOP-15317](https://issues.apache.org/jira/browse/HADOOP-15317) | Improve NetworkTopology chooseRandom's loop | Major | . | Xiao Chen | Xiao Chen | +| [HADOOP-15355](https://issues.apache.org/jira/browse/HADOOP-15355) | TestCommonConfigurationFields is broken by HADOOP-15312 | Major | test | Konstantin Shvachko | LiXin Ge | +| [HDFS-13350](https://issues.apache.org/jira/browse/HDFS-13350) | Negative legacy block ID will confuse Erasure Coding to be considered as striped block | Major | erasure-coding | Lei (Eddy) Xu | Lei (Eddy) Xu | +| [YARN-7905](https://issues.apache.org/jira/browse/YARN-7905) | Parent directory permission incorrect during public localization | Critical | . | Bibin A Chundatt | Bilwa S T | +| [HDFS-13420](https://issues.apache.org/jira/browse/HDFS-13420) | License header is displayed in ArchivalStorage/MemoryStorage html pages | Minor | documentation | Akira Ajisaka | Akira Ajisaka | +| [HDFS-13328](https://issues.apache.org/jira/browse/HDFS-13328) | Abstract ReencryptionHandler recursive logic in separate class. | Major | namenode | Surendra Singh Lilhore | Surendra Singh Lilhore | +| [HADOOP-15357](https://issues.apache.org/jira/browse/HADOOP-15357) | Configuration.getPropsWithPrefix no longer does variable substitution | Major | . | Jim Brennan | Jim Brennan | +| [MAPREDUCE-7062](https://issues.apache.org/jira/browse/MAPREDUCE-7062) | Update mapreduce.job.tags description for making use for ATSv2 purpose. | Major | . | Charan Hebri | Charan Hebri | +| [YARN-8073](https://issues.apache.org/jira/browse/YARN-8073) | TimelineClientImpl doesn't honor yarn.timeline-service.versions configuration | Major | . | Rohith Sharma K S | Rohith Sharma K S | +| [HDFS-13427](https://issues.apache.org/jira/browse/HDFS-13427) | Fix the section titles of transparent encryption document | Minor | documentation | Akira Ajisaka | Akira Ajisaka | +| [YARN-7527](https://issues.apache.org/jira/browse/YARN-7527) | Over-allocate node resource in async-scheduling mode of CapacityScheduler | Major | capacityscheduler | Tao Yang | Tao Yang | +| [YARN-8120](https://issues.apache.org/jira/browse/YARN-8120) | JVM can crash with SIGSEGV when exiting due to custom leveldb logger | Major | nodemanager, resourcemanager | Jason Lowe | Jason Lowe | +| [YARN-8147](https://issues.apache.org/jira/browse/YARN-8147) | TestClientRMService#testGetApplications sporadically fails | Major | test | Jason Lowe | Jason Lowe | +| [HDFS-13436](https://issues.apache.org/jira/browse/HDFS-13436) | Fix javadoc of package-info.java | Major | documentation | Akira Ajisaka | Akira Ajisaka | +| [HADOOP-14970](https://issues.apache.org/jira/browse/HADOOP-14970) | MiniHadoopClusterManager doesn't respect lack of format option | Minor | . | Erik Krogen | Erik Krogen | +| [HDFS-13330](https://issues.apache.org/jira/browse/HDFS-13330) | ShortCircuitCache#fetchOrCreate never retries | Major | . | Wei-Chiu Chuang | Gabor Bota | +| [YARN-8156](https://issues.apache.org/jira/browse/YARN-8156) | Increase the default value of yarn.timeline-service.app-collector.linger-period.ms | Major | . | Rohith Sharma K S | Charan Hebri | +| [YARN-8165](https://issues.apache.org/jira/browse/YARN-8165) | Incorrect queue name logging in AbstractContainerAllocator | Trivial | capacityscheduler | Weiwei Yang | Weiwei Yang | +| [HDFS-12828](https://issues.apache.org/jira/browse/HDFS-12828) | OIV ReverseXML Processor fails with escaped characters | Critical | hdfs | Erik Krogen | Erik Krogen | +| [HADOOP-15396](https://issues.apache.org/jira/browse/HADOOP-15396) | Some java source files are executable | Minor | . | Akira Ajisaka | Shashikant Banerjee | +| [YARN-6827](https://issues.apache.org/jira/browse/YARN-6827) | [ATS1/1.5] NPE exception while publishing recovering applications into ATS during RM restart. | Major | resourcemanager | Rohith Sharma K S | Rohith Sharma K S | +| [YARN-7786](https://issues.apache.org/jira/browse/YARN-7786) | NullPointerException while launching ApplicationMaster | Major | . | lujie | lujie | +| [HDFS-10183](https://issues.apache.org/jira/browse/HDFS-10183) | Prevent race condition during class initialization | Minor | fs | Pavel Avgustinov | Pavel Avgustinov | +| [HDFS-13388](https://issues.apache.org/jira/browse/HDFS-13388) | RequestHedgingProxyProvider calls multiple configured NNs all the time | Major | hdfs-client | Jinglun | Jinglun | +| [HDFS-13433](https://issues.apache.org/jira/browse/HDFS-13433) | webhdfs requests can be routed incorrectly in federated cluster | Critical | webhdfs | Arpit Agarwal | Arpit Agarwal | +| [HDFS-13408](https://issues.apache.org/jira/browse/HDFS-13408) | MiniDFSCluster to support being built on randomized base directory | Major | test | Xiao Liang | Xiao Liang | +| [HADOOP-15390](https://issues.apache.org/jira/browse/HADOOP-15390) | Yarn RM logs flooded by DelegationTokenRenewer trying to renew KMS tokens | Critical | . | Xiao Chen | Xiao Chen | +| [HDFS-13336](https://issues.apache.org/jira/browse/HDFS-13336) | Test cases of TestWriteToReplica failed in windows | Major | . | Xiao Liang | Xiao Liang | +| [YARN-7598](https://issues.apache.org/jira/browse/YARN-7598) | Document how to use classpath isolation for aux-services in YARN | Major | . | Xuan Gong | Xuan Gong | +| [YARN-8183](https://issues.apache.org/jira/browse/YARN-8183) | Fix ConcurrentModificationException inside RMAppAttemptMetrics#convertAtomicLongMaptoLongMap | Critical | yarn | Sumana Sathish | Suma Shivaprasad | +| [HADOOP-15411](https://issues.apache.org/jira/browse/HADOOP-15411) | AuthenticationFilter should use Configuration.getPropsWithPrefix instead of iterator | Critical | . | Suma Shivaprasad | Suma Shivaprasad | +| [MAPREDUCE-7042](https://issues.apache.org/jira/browse/MAPREDUCE-7042) | Killed MR job data does not move to mapreduce.jobhistory.done-dir when ATS v2 is enabled | Major | . | Yesha Vora | Xuan Gong | +| [YARN-8205](https://issues.apache.org/jira/browse/YARN-8205) | Application State is not updated to ATS if AM launching is delayed. | Critical | . | Sumana Sathish | Rohith Sharma K S | +| [YARN-8004](https://issues.apache.org/jira/browse/YARN-8004) | Add unit tests for inter queue preemption for dominant resource calculator | Critical | yarn | Sumana Sathish | Zian Chen | +| [YARN-8221](https://issues.apache.org/jira/browse/YARN-8221) | RMWebServices also need to honor yarn.resourcemanager.display.per-user-apps | Major | webapp | Sunil Govindan | Sunil Govindan | +| [YARN-8210](https://issues.apache.org/jira/browse/YARN-8210) | AMRMClient logging on every heartbeat to track updation of AM RM token causes too many log lines to be generated in AM logs | Major | yarn | Suma Shivaprasad | Suma Shivaprasad | +| [HDFS-13509](https://issues.apache.org/jira/browse/HDFS-13509) | Bug fix for breakHardlinks() of ReplicaInfo/LocalReplica, and fix TestFileAppend failures on Windows | Major | . | Xiao Liang | Xiao Liang | +| [MAPREDUCE-7073](https://issues.apache.org/jira/browse/MAPREDUCE-7073) | Optimize TokenCache#obtainTokensForNamenodesInternal | Major | . | Bibin A Chundatt | Bibin A Chundatt | +| [HADOOP-15406](https://issues.apache.org/jira/browse/HADOOP-15406) | hadoop-nfs dependencies for mockito and junit are not test scope | Major | nfs | Jason Lowe | Jason Lowe | +| [YARN-6385](https://issues.apache.org/jira/browse/YARN-6385) | Fix checkstyle warnings in TestFileSystemApplicationHistoryStore | Minor | . | Yiqun Lin | Yiqun Lin | +| [YARN-8222](https://issues.apache.org/jira/browse/YARN-8222) | Fix potential NPE when gets RMApp from RM context | Critical | . | Tao Yang | Tao Yang | +| [HDFS-13481](https://issues.apache.org/jira/browse/HDFS-13481) | TestRollingFileSystemSinkWithHdfs#testFlushThread: test failed intermittently | Major | hdfs | Gabor Bota | Gabor Bota | +| [YARN-8217](https://issues.apache.org/jira/browse/YARN-8217) | RmAuthenticationFilterInitializer /TimelineAuthenticationFilterInitializer should use Configuration.getPropsWithPrefix instead of iterator | Major | . | Suma Shivaprasad | Suma Shivaprasad | +| [YARN-8025](https://issues.apache.org/jira/browse/YARN-8025) | UsersManangers#getComputedResourceLimitForActiveUsers throws NPE due to preComputedActiveUserLimit is empty | Major | yarn | Jiandan Yang | Tao Yang | +| [YARN-8232](https://issues.apache.org/jira/browse/YARN-8232) | RMContainer lost queue name when RM HA happens | Major | resourcemanager | Hu Ziqian | Hu Ziqian | +| [HDFS-13136](https://issues.apache.org/jira/browse/HDFS-13136) | Avoid taking FSN lock while doing group member lookup for FSD permission check | Major | namenode | Xiaoyu Yao | Xiaoyu Yao | +| [HDFS-13537](https://issues.apache.org/jira/browse/HDFS-13537) | TestHdfsHelper does not generate jceks path properly for relative path in Windows | Major | . | Xiao Liang | Xiao Liang | +| [YARN-7003](https://issues.apache.org/jira/browse/YARN-7003) | DRAINING state of queues is not recovered after RM restart | Major | capacityscheduler | Tao Yang | Tao Yang | +| [YARN-8244](https://issues.apache.org/jira/browse/YARN-8244) | TestContainerSchedulerQueuing.testStartMultipleContainers failed | Major | . | Miklos Szegedi | Jim Brennan | +| [YARN-8288](https://issues.apache.org/jira/browse/YARN-8288) | Fix wrong number of table columns in Resource Model doc | Major | . | Weiwei Yang | Weiwei Yang | +| [HDFS-13539](https://issues.apache.org/jira/browse/HDFS-13539) | DFSStripedInputStream NPE when reportCheckSumFailure | Major | . | Xiao Chen | Xiao Chen | +| [YARN-8278](https://issues.apache.org/jira/browse/YARN-8278) | DistributedScheduling is not working in HA | Blocker | . | Bibin A Chundatt | Bibin A Chundatt | +| [HDFS-13581](https://issues.apache.org/jira/browse/HDFS-13581) | DN UI logs link is broken when https is enabled | Minor | datanode | Namit Maheshwari | Shashikant Banerjee | +| [HDFS-13586](https://issues.apache.org/jira/browse/HDFS-13586) | Fsync fails on directories on Windows | Critical | datanode, hdfs | Lukas Majercak | Lukas Majercak | +| [YARN-8179](https://issues.apache.org/jira/browse/YARN-8179) | Preemption does not happen due to natural\_termination\_factor when DRF is used | Major | . | kyungwan nam | kyungwan nam | +| [HADOOP-15450](https://issues.apache.org/jira/browse/HADOOP-15450) | Avoid fsync storm triggered by DiskChecker and handle disk full situation | Blocker | . | Kihwal Lee | Arpit Agarwal | +| [HDFS-13601](https://issues.apache.org/jira/browse/HDFS-13601) | Optimize ByteString conversions in PBHelper | Major | . | Andrew Wang | Andrew Wang | +| [HDFS-13540](https://issues.apache.org/jira/browse/HDFS-13540) | DFSStripedInputStream should only allocate new buffers when reading | Major | . | Xiao Chen | Xiao Chen | +| [HDFS-13588](https://issues.apache.org/jira/browse/HDFS-13588) | Fix TestFsDatasetImpl test failures on Windows | Major | . | Xiao Liang | Xiao Liang | +| [YARN-8310](https://issues.apache.org/jira/browse/YARN-8310) | Handle old NMTokenIdentifier, AMRMTokenIdentifier, and ContainerTokenIdentifier formats | Major | . | Robert Kanter | Robert Kanter | +| [YARN-8344](https://issues.apache.org/jira/browse/YARN-8344) | Missing nm.stop() in TestNodeManagerResync to fix testKillContainersOnResync | Major | . | Giovanni Matteo Fumarola | Giovanni Matteo Fumarola | +| [YARN-8327](https://issues.apache.org/jira/browse/YARN-8327) | Fix TestAggregatedLogFormat#testReadAcontainerLogs1 on Windows | Major | log-aggregation | Giovanni Matteo Fumarola | Giovanni Matteo Fumarola | +| [YARN-8346](https://issues.apache.org/jira/browse/YARN-8346) | Upgrading to 3.1 kills running containers with error "Opportunistic container queue is full" | Blocker | . | Rohith Sharma K S | Jason Lowe | +| [HDFS-13611](https://issues.apache.org/jira/browse/HDFS-13611) | Unsafe use of Text as a ConcurrentHashMap key in PBHelperClient | Major | . | Andrew Wang | Andrew Wang | +| [HDFS-13618](https://issues.apache.org/jira/browse/HDFS-13618) | Fix TestDataNodeFaultInjector test failures on Windows | Major | test | Xiao Liang | Xiao Liang | +| [HADOOP-15473](https://issues.apache.org/jira/browse/HADOOP-15473) | Configure serialFilter in KeyProvider to avoid UnrecoverableKeyException caused by JDK-8189997 | Critical | kms | Gabor Bota | Gabor Bota | +| [YARN-8338](https://issues.apache.org/jira/browse/YARN-8338) | TimelineService V1.5 doesn't come up after HADOOP-15406 | Critical | . | Vinod Kumar Vavilapalli | Vinod Kumar Vavilapalli | + + +### TESTS: + +| JIRA | Summary | Priority | Component | Reporter | Contributor | +|:---- |:---- | :--- |:---- |:---- |:---- | +| [HADOOP-15313](https://issues.apache.org/jira/browse/HADOOP-15313) | TestKMS should close providers | Major | kms, test | Xiao Chen | Xiao Chen | +| [HDFS-13503](https://issues.apache.org/jira/browse/HDFS-13503) | Fix TestFsck test failures on Windows | Major | hdfs | Xiao Liang | Xiao Liang | +| [HDFS-13542](https://issues.apache.org/jira/browse/HDFS-13542) | TestBlockManager#testNeededReplicationWhileAppending fails due to improper cluster shutdown in TestBlockManager#testBlockManagerMachinesArray on Windows | Minor | . | Anbang Hu | Anbang Hu | +| [HDFS-13551](https://issues.apache.org/jira/browse/HDFS-13551) | TestMiniDFSCluster#testClusterSetStorageCapacity does not shut down cluster | Minor | . | Anbang Hu | Anbang Hu | +| [HDFS-11700](https://issues.apache.org/jira/browse/HDFS-11700) | TestHDFSServerPorts#testBackupNodePorts doesn't pass on Windows | Minor | . | Anbang Hu | Anbang Hu | +| [HDFS-13548](https://issues.apache.org/jira/browse/HDFS-13548) | TestResolveHdfsSymlink#testFcResolveAfs fails on Windows | Minor | . | Anbang Hu | Anbang Hu | +| [HDFS-13567](https://issues.apache.org/jira/browse/HDFS-13567) | TestNameNodeMetrics#testGenerateEDEKTime,TestNameNodeMetrics#testResourceCheck should use a different cluster basedir | Minor | . | Anbang Hu | Anbang Hu | +| [HDFS-13557](https://issues.apache.org/jira/browse/HDFS-13557) | TestDFSAdmin#testListOpenFiles fails on Windows | Minor | . | Anbang Hu | Anbang Hu | +| [HDFS-13550](https://issues.apache.org/jira/browse/HDFS-13550) | TestDebugAdmin#testComputeMetaCommand fails on Windows | Minor | . | Anbang Hu | Anbang Hu | +| [HDFS-13559](https://issues.apache.org/jira/browse/HDFS-13559) | TestBlockScanner does not close TestContext properly | Minor | . | Anbang Hu | Anbang Hu | +| [HDFS-13570](https://issues.apache.org/jira/browse/HDFS-13570) | TestQuotaByStorageType,TestQuota,TestDFSOutputStream fail on Windows | Minor | . | Anbang Hu | Anbang Hu | +| [HDFS-13558](https://issues.apache.org/jira/browse/HDFS-13558) | TestDatanodeHttpXFrame does not shut down cluster | Minor | . | Anbang Hu | Anbang Hu | +| [HDFS-13554](https://issues.apache.org/jira/browse/HDFS-13554) | TestDatanodeRegistration#testForcedRegistration does not shut down cluster | Minor | . | Anbang Hu | Anbang Hu | +| [HDFS-13556](https://issues.apache.org/jira/browse/HDFS-13556) | TestNestedEncryptionZones does not shut down cluster | Minor | . | Anbang Hu | Anbang Hu | +| [HDFS-13560](https://issues.apache.org/jira/browse/HDFS-13560) | Insufficient system resources exist to complete the requested service for some tests on Windows | Minor | . | Anbang Hu | Anbang Hu | +| [HDFS-13592](https://issues.apache.org/jira/browse/HDFS-13592) | TestNameNodePrunesMissingStorages#testNameNodePrunesUnreportedStorages does not shut down cluster properly | Minor | . | Anbang Hu | Anbang Hu | +| [HDFS-13593](https://issues.apache.org/jira/browse/HDFS-13593) | TestBlockReaderLocalLegacy#testBlockReaderLocalLegacyWithAppend fails on Windows | Minor | test | Anbang Hu | Anbang Hu | +| [HDFS-13587](https://issues.apache.org/jira/browse/HDFS-13587) | TestQuorumJournalManager fails on Windows | Minor | . | Anbang Hu | Anbang Hu | +| [HDFS-13619](https://issues.apache.org/jira/browse/HDFS-13619) | TestAuditLoggerWithCommands fails on Windows | Minor | test | Anbang Hu | Anbang Hu | +| [HDFS-13620](https://issues.apache.org/jira/browse/HDFS-13620) | Randomize the test directory path for TestHDFSFileSystemContract | Minor | . | Anbang Hu | Anbang Hu | +| [HDFS-13591](https://issues.apache.org/jira/browse/HDFS-13591) | TestDFSShell#testSetrepLow fails on Windows | Minor | . | Anbang Hu | Anbang Hu | +| [HDFS-13632](https://issues.apache.org/jira/browse/HDFS-13632) | Randomize baseDir for MiniJournalCluster in MiniQJMHACluster for TestDFSAdminWithHA | Minor | . | Anbang Hu | Anbang Hu | + + +### SUB-TASKS: + +| JIRA | Summary | Priority | Component | Reporter | Contributor | +|:---- |:---- | :--- |:---- |:---- |:---- | +| [HDFS-13068](https://issues.apache.org/jira/browse/HDFS-13068) | RBF: Add router admin option to manage safe mode | Major | . | Íñigo Goiri | Yiqun Lin | +| [HADOOP-15040](https://issues.apache.org/jira/browse/HADOOP-15040) | Upgrade AWS SDK to 1.11.271: NPE bug spams logs w/ Yarn Log Aggregation | Blocker | fs/s3 | Aaron Fabbri | Aaron Fabbri | +| [HDFS-13119](https://issues.apache.org/jira/browse/HDFS-13119) | RBF: Manage unavailable clusters | Major | . | Íñigo Goiri | Yiqun Lin | +| [HADOOP-15247](https://issues.apache.org/jira/browse/HADOOP-15247) | Move commons-net up to 3.6 | Minor | fs | Steve Loughran | Steve Loughran | +| [HDFS-13187](https://issues.apache.org/jira/browse/HDFS-13187) | RBF: Fix Routers information shown in the web UI | Minor | . | Wei Yan | Wei Yan | +| [HDFS-13184](https://issues.apache.org/jira/browse/HDFS-13184) | RBF: Improve the unit test TestRouterRPCClientRetries | Minor | test | Yiqun Lin | Yiqun Lin | +| [HDFS-13199](https://issues.apache.org/jira/browse/HDFS-13199) | RBF: Fix the hdfs router page missing label icon issue | Major | federation, hdfs | maobaolong | maobaolong | +| [HADOOP-15264](https://issues.apache.org/jira/browse/HADOOP-15264) | AWS "shaded" SDK 1.11.271 is pulling in netty 4.1.17 | Blocker | fs/s3 | Steve Loughran | Steve Loughran | +| [HADOOP-15090](https://issues.apache.org/jira/browse/HADOOP-15090) | Add ADL troubleshooting doc | Major | documentation, fs/adl | Steve Loughran | Steve Loughran | +| [HDFS-13214](https://issues.apache.org/jira/browse/HDFS-13214) | RBF: Complete document of Router configuration | Major | . | Tao Jie | Yiqun Lin | +| [HADOOP-15267](https://issues.apache.org/jira/browse/HADOOP-15267) | S3A multipart upload fails when SSE-C encryption is enabled | Critical | fs/s3 | Anis Elleuch | Anis Elleuch | +| [HDFS-13230](https://issues.apache.org/jira/browse/HDFS-13230) | RBF: ConnectionManager's cleanup task will compare each pool's own active conns with its total conns | Minor | . | Wei Yan | Chao Sun | +| [HDFS-13233](https://issues.apache.org/jira/browse/HDFS-13233) | RBF: MountTableResolver doesn't return the correct mount point of the given path | Major | hdfs | wangzhiyuan | wangzhiyuan | +| [HADOOP-15277](https://issues.apache.org/jira/browse/HADOOP-15277) | remove .FluentPropertyBeanIntrospector from CLI operation log output | Minor | conf | Steve Loughran | Steve Loughran | +| [HDFS-13212](https://issues.apache.org/jira/browse/HDFS-13212) | RBF: Fix router location cache issue | Major | federation, hdfs | Weiwei Wu | Weiwei Wu | +| [HDFS-13232](https://issues.apache.org/jira/browse/HDFS-13232) | RBF: ConnectionPool should return first usable connection | Minor | . | Wei Yan | Ekanth S | +| [HDFS-13240](https://issues.apache.org/jira/browse/HDFS-13240) | RBF: Update some inaccurate document descriptions | Minor | . | Yiqun Lin | Yiqun Lin | +| [HDFS-11399](https://issues.apache.org/jira/browse/HDFS-11399) | Many tests fails in Windows due to injecting disk failures | Major | . | Yiqun Lin | Yiqun Lin | +| [HDFS-13241](https://issues.apache.org/jira/browse/HDFS-13241) | RBF: TestRouterSafemode failed if the port 8888 is in use | Major | hdfs, test | maobaolong | maobaolong | +| [HDFS-13253](https://issues.apache.org/jira/browse/HDFS-13253) | RBF: Quota management incorrect parent-child relationship judgement | Major | . | Yiqun Lin | Yiqun Lin | +| [HDFS-13226](https://issues.apache.org/jira/browse/HDFS-13226) | RBF: Throw the exception if mount table entry validated failed | Major | hdfs | maobaolong | maobaolong | +| [HDFS-12505](https://issues.apache.org/jira/browse/HDFS-12505) | Extend TestFileStatusWithECPolicy with a random EC policy | Major | erasure-coding, test | Takanobu Asanuma | Takanobu Asanuma | +| [HDFS-12587](https://issues.apache.org/jira/browse/HDFS-12587) | Use Parameterized tests in TestBlockInfoStriped and TestLowRedundancyBlockQueues to test all EC policies | Major | erasure-coding, test | Takanobu Asanuma | Takanobu Asanuma | +| [HADOOP-15308](https://issues.apache.org/jira/browse/HADOOP-15308) | TestConfiguration fails on Windows because of paths | Major | test | Íñigo Goiri | Xiao Liang | +| [HDFS-12773](https://issues.apache.org/jira/browse/HDFS-12773) | RBF: Improve State Store FS implementation | Major | . | Íñigo Goiri | Íñigo Goiri | +| [HDFS-13198](https://issues.apache.org/jira/browse/HDFS-13198) | RBF: RouterHeartbeatService throws out CachedStateStore related exceptions when starting router | Minor | . | Wei Yan | Wei Yan | +| [HDFS-13224](https://issues.apache.org/jira/browse/HDFS-13224) | RBF: Resolvers to support mount points across multiple subclusters | Major | . | Íñigo Goiri | Íñigo Goiri | +| [HADOOP-15262](https://issues.apache.org/jira/browse/HADOOP-15262) | AliyunOSS: move files under a directory in parallel when rename a directory | Major | fs/oss | wujinhu | wujinhu | +| [HDFS-13215](https://issues.apache.org/jira/browse/HDFS-13215) | RBF: Move Router to its own module | Major | . | Íñigo Goiri | Wei Yan | +| [HDFS-13250](https://issues.apache.org/jira/browse/HDFS-13250) | RBF: Router to manage requests across multiple subclusters | Major | . | Íñigo Goiri | Íñigo Goiri | +| [HDFS-13318](https://issues.apache.org/jira/browse/HDFS-13318) | RBF: Fix FindBugs in hadoop-hdfs-rbf | Minor | . | Íñigo Goiri | Ekanth S | +| [HDFS-12792](https://issues.apache.org/jira/browse/HDFS-12792) | RBF: Test Router-based federation using HDFSContract | Major | . | Íñigo Goiri | Íñigo Goiri | +| [YARN-7986](https://issues.apache.org/jira/browse/YARN-7986) | ATSv2 REST API queries do not return results for uppercase application tags | Critical | . | Charan Hebri | Charan Hebri | +| [HDFS-12512](https://issues.apache.org/jira/browse/HDFS-12512) | RBF: Add WebHDFS | Major | fs | Íñigo Goiri | Wei Yan | +| [HDFS-13291](https://issues.apache.org/jira/browse/HDFS-13291) | RBF: Implement available space based OrderResolver | Major | . | Yiqun Lin | Yiqun Lin | +| [HDFS-13204](https://issues.apache.org/jira/browse/HDFS-13204) | RBF: Optimize name service safe mode icon | Minor | . | liuhongtong | liuhongtong | +| [HDFS-13352](https://issues.apache.org/jira/browse/HDFS-13352) | RBF: Add xsl stylesheet for hdfs-rbf-default.xml | Major | documentation | Takanobu Asanuma | Takanobu Asanuma | +| [HDFS-13347](https://issues.apache.org/jira/browse/HDFS-13347) | RBF: Cache datanode reports | Minor | . | Íñigo Goiri | Íñigo Goiri | +| [HDFS-13289](https://issues.apache.org/jira/browse/HDFS-13289) | RBF: TestConnectionManager#testCleanup() test case need correction | Minor | . | Dibyendu Karmakar | Dibyendu Karmakar | +| [HDFS-13364](https://issues.apache.org/jira/browse/HDFS-13364) | RBF: Support NamenodeProtocol in the Router | Major | . | Íñigo Goiri | Íñigo Goiri | +| [HADOOP-14651](https://issues.apache.org/jira/browse/HADOOP-14651) | Update okhttp version to 2.7.5 | Major | fs/adl | Ray Chiang | Ray Chiang | +| [YARN-6936](https://issues.apache.org/jira/browse/YARN-6936) | [Atsv2] Retrospect storing entities into sub application table from client perspective | Major | . | Rohith Sharma K S | Rohith Sharma K S | +| [HDFS-13353](https://issues.apache.org/jira/browse/HDFS-13353) | RBF: TestRouterWebHDFSContractCreate failed | Major | test | Takanobu Asanuma | Takanobu Asanuma | +| [YARN-8107](https://issues.apache.org/jira/browse/YARN-8107) | Give an informative message when incorrect format is used in ATSv2 filter attributes | Major | ATSv2 | Charan Hebri | Rohith Sharma K S | +| [HDFS-13402](https://issues.apache.org/jira/browse/HDFS-13402) | RBF: Fix java doc for StateStoreFileSystemImpl | Minor | hdfs | Yiran Wu | Yiran Wu | +| [HDFS-13410](https://issues.apache.org/jira/browse/HDFS-13410) | RBF: Support federation with no subclusters | Minor | . | Íñigo Goiri | Íñigo Goiri | +| [HDFS-13384](https://issues.apache.org/jira/browse/HDFS-13384) | RBF: Improve timeout RPC call mechanism | Minor | . | Íñigo Goiri | Íñigo Goiri | +| [HDFS-13045](https://issues.apache.org/jira/browse/HDFS-13045) | RBF: Improve error message returned from subcluster | Minor | . | Wei Yan | Íñigo Goiri | +| [HDFS-13428](https://issues.apache.org/jira/browse/HDFS-13428) | RBF: Remove LinkedList From StateStoreFileImpl.java | Trivial | federation | BELUGA BEHR | BELUGA BEHR | +| [HDFS-13386](https://issues.apache.org/jira/browse/HDFS-13386) | RBF: Wrong date information in list file(-ls) result | Minor | . | Dibyendu Karmakar | Dibyendu Karmakar | +| [YARN-8027](https://issues.apache.org/jira/browse/YARN-8027) | Setting hostname of docker container breaks for --net=host in docker 1.13 | Major | yarn | Jim Brennan | Jim Brennan | +| [HADOOP-14999](https://issues.apache.org/jira/browse/HADOOP-14999) | AliyunOSS: provide one asynchronous multi-part based uploading mechanism | Major | fs/oss | Genmao Yu | Genmao Yu | +| [YARN-7810](https://issues.apache.org/jira/browse/YARN-7810) | TestDockerContainerRuntime test failures due to UID lookup of a non-existent user | Major | . | Shane Kumpf | Shane Kumpf | +| [HDFS-13435](https://issues.apache.org/jira/browse/HDFS-13435) | RBF: Improve the error loggings for printing the stack trace | Major | . | Yiqun Lin | Yiqun Lin | +| [YARN-7189](https://issues.apache.org/jira/browse/YARN-7189) | Container-executor doesn't remove Docker containers that error out early | Major | yarn | Eric Badger | Eric Badger | +| [HDFS-13466](https://issues.apache.org/jira/browse/HDFS-13466) | RBF: Add more router-related information to the UI | Minor | . | Wei Yan | Wei Yan | +| [HDFS-13453](https://issues.apache.org/jira/browse/HDFS-13453) | RBF: getMountPointDates should fetch latest subdir time/date when parent dir is not present but /parent/child dirs are present in mount table | Major | . | Dibyendu Karmakar | Dibyendu Karmakar | +| [HDFS-13478](https://issues.apache.org/jira/browse/HDFS-13478) | RBF: Disabled Nameservice store API | Major | . | Íñigo Goiri | Íñigo Goiri | +| [HDFS-13490](https://issues.apache.org/jira/browse/HDFS-13490) | RBF: Fix setSafeMode in the Router | Major | . | Íñigo Goiri | Íñigo Goiri | +| [HDFS-13484](https://issues.apache.org/jira/browse/HDFS-13484) | RBF: Disable Nameservices from the federation | Major | . | Íñigo Goiri | Íñigo Goiri | +| [HDFS-13326](https://issues.apache.org/jira/browse/HDFS-13326) | RBF: Improve the interfaces to modify and view mount tables | Minor | . | Wei Yan | Gang Li | +| [HDFS-13499](https://issues.apache.org/jira/browse/HDFS-13499) | RBF: Show disabled name services in the UI | Minor | . | Íñigo Goiri | Íñigo Goiri | +| [YARN-8215](https://issues.apache.org/jira/browse/YARN-8215) | ATS v2 returns invalid YARN\_CONTAINER\_ALLOCATED\_HOST\_HTTP\_ADDRESS from NM | Critical | ATSv2 | Yesha Vora | Rohith Sharma K S | +| [HDFS-13508](https://issues.apache.org/jira/browse/HDFS-13508) | RBF: Normalize paths (automatically) when adding, updating, removing or listing mount table entries | Minor | . | Ekanth S | Ekanth S | +| [HDFS-13434](https://issues.apache.org/jira/browse/HDFS-13434) | RBF: Fix dead links in RBF document | Major | documentation | Akira Ajisaka | Chetna Chaudhari | +| [YARN-8212](https://issues.apache.org/jira/browse/YARN-8212) | Pending backlog for async allocation threads should be configurable | Major | . | Weiwei Yang | Tao Yang | +| [HDFS-13488](https://issues.apache.org/jira/browse/HDFS-13488) | RBF: Reject requests when a Router is overloaded | Major | . | Íñigo Goiri | Íñigo Goiri | +| [HDFS-13525](https://issues.apache.org/jira/browse/HDFS-13525) | RBF: Add unit test TestStateStoreDisabledNameservice | Major | . | Yiqun Lin | Yiqun Lin | +| [YARN-8253](https://issues.apache.org/jira/browse/YARN-8253) | HTTPS Ats v2 api call fails with "bad HTTP parsed" | Critical | ATSv2 | Yesha Vora | Charan Hebri | +| [HADOOP-15454](https://issues.apache.org/jira/browse/HADOOP-15454) | TestRollingFileSystemSinkWithLocal fails on Windows | Major | test | Xiao Liang | Xiao Liang | +| [YARN-8247](https://issues.apache.org/jira/browse/YARN-8247) | Incorrect HTTP status code returned by ATSv2 for non-whitelisted users | Critical | ATSv2 | Charan Hebri | Rohith Sharma K S | +| [YARN-8130](https://issues.apache.org/jira/browse/YARN-8130) | Race condition when container events are published for KILLED applications | Major | ATSv2 | Charan Hebri | Rohith Sharma K S | +| [HADOOP-15498](https://issues.apache.org/jira/browse/HADOOP-15498) | TestHadoopArchiveLogs (#testGenerateScript, #testPrepareWorkingDir) fails on Windows | Minor | . | Anbang Hu | Anbang Hu | +| [HADOOP-15497](https://issues.apache.org/jira/browse/HADOOP-15497) | TestTrash should use proper test path to avoid failing on Windows | Minor | . | Anbang Hu | Anbang Hu | + + +### OTHER: + +| JIRA | Summary | Priority | Component | Reporter | Contributor | +|:---- |:---- | :--- |:---- |:---- |:---- | +| [HDFS-13052](https://issues.apache.org/jira/browse/HDFS-13052) | WebHDFS: Add support for snasphot diff | Major | . | Lokesh Jain | Lokesh Jain | +| [HADOOP-14742](https://issues.apache.org/jira/browse/HADOOP-14742) | Document multi-URI replication Inode for ViewFS | Major | documentation, viewfs | Chris Douglas | Gera Shegalov | + + diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.3/RELEASENOTES.3.0.3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.3/RELEASENOTES.3.0.3.md new file mode 100644 index 00000000000..9f35dbe15e4 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.3/RELEASENOTES.3.0.3.md @@ -0,0 +1,31 @@ + + +# Apache Hadoop 3.0.3 Release Notes + +These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements. + + +--- + +* [HDFS-13099](https://issues.apache.org/jira/browse/HDFS-13099) | *Minor* | **RBF: Use the ZooKeeper as the default State Store** + +Change default State Store from local file to ZooKeeper. This will require additional zk address to be configured. + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.3.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.3.xml new file mode 100644 index 00000000000..eea6c599a22 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.3.xml @@ -0,0 +1,322 @@ + + + + + + + + + + + A distributed implementation of {@link +org.apache.hadoop.fs.FileSystem}. This is loosely modelled after +Google's GFS.

+ +

The most important difference is that unlike GFS, Hadoop DFS files +have strictly one writer at any one time. Bytes are always appended +to the end of the writer's stream. There is no notion of "record appends" +or "mutations" that are then checked or reordered. Writers simply emit +a byte stream. That byte stream is guaranteed to be stored in the +order written.

]]> +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + This method must return as quickly as possible, since it's called + in a critical section of the NameNode's operation. + + @param succeeded Whether authorization succeeded. + @param userName Name of the user executing the request. + @param addr Remote address of the request. + @param cmd The requested command. + @param src Path of affected source file. + @param dst Path of affected destination file (if any). + @param stat File information for operations that change the file's + metadata (permissions, owner, times, etc).]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml index 5f83da3ec1f..5e21b4a5971 100644 --- a/hadoop-project-dist/pom.xml +++ b/hadoop-project-dist/pom.xml @@ -145,7 +145,7 @@ false - 2.9.1 + 3.0.3 -unstable From 23bfd9f7e4ef672613ec59c83d4b47b051949cd1 Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Mon, 11 Jun 2018 16:02:32 -0700 Subject: [PATCH 085/113] HDDS-72. Add deleteTransactionId field in ContainerInfo. Contributed by Lokesh Jain. --- .../common/helpers/ContainerInfo.java | 27 +++- .../org/apache/hadoop/ozone/OzoneConsts.java | 2 + .../hadoop/utils/MetadataKeyFilters.java | 118 ++++++++++++++---- hadoop-hdds/common/src/main/proto/hdds.proto | 1 + .../hadoop/ozone/TestMetadataStore.java | 61 ++++++++- .../common/helpers/ContainerData.java | 21 ++++ .../common/helpers/ContainerReport.java | 12 ++ .../common/impl/ContainerManagerImpl.java | 15 ++- .../background/BlockDeletingService.java | 16 ++- .../DeleteBlocksCommandHandler.java | 3 + .../StorageContainerDatanodeProtocol.proto | 1 + .../hdds/scm/block/BlockManagerImpl.java | 9 +- .../hdds/scm/block/DeletedBlockLog.java | 3 +- .../hdds/scm/block/DeletedBlockLogImpl.java | 8 +- .../hdds/scm/container/ContainerMapping.java | 35 ++++++ .../scm/container/ContainerStateManager.java | 12 ++ .../hadoop/hdds/scm/container/Mapping.java | 11 ++ .../hdds/scm/block/TestBlockManager.java | 16 +++ .../scm/container/TestContainerMapping.java | 6 +- .../container/closer/TestContainerCloser.java | 3 +- .../TestStorageContainerManagerHelper.java | 6 +- .../common/TestBlockDeletingService.java | 31 ++++- .../hadoop/ozone/ksm/TestKeySpaceManager.java | 3 +- .../ozone/ksm/KSMMetadataManagerImpl.java | 9 +- .../genesis/BenchMarkContainerStateMap.java | 44 +++++-- 25 files changed, 402 insertions(+), 71 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java index 10fd96c175a..2c38d457284 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java @@ -32,6 +32,8 @@ import org.apache.hadoop.util.Time; import java.io.IOException; import java.util.Comparator; +import static java.lang.Math.max; + /** * Class wraps ozone container info. */ @@ -60,6 +62,7 @@ public class ContainerInfo private long stateEnterTime; private String owner; private long containerID; + private long deleteTransactionId; ContainerInfo( long containerID, HddsProtos.LifeCycleState state, @@ -68,7 +71,8 @@ public class ContainerInfo long usedBytes, long numberOfKeys, long stateEnterTime, - String owner) { + String owner, + long deleteTransactionId) { this.containerID = containerID; this.pipeline = pipeline; this.allocatedBytes = allocatedBytes; @@ -78,6 +82,7 @@ public class ContainerInfo this.state = state; this.stateEnterTime = stateEnterTime; this.owner = owner; + this.deleteTransactionId = deleteTransactionId; } /** @@ -96,6 +101,7 @@ public class ContainerInfo builder.setStateEnterTime(info.getStateEnterTime()); builder.setOwner(info.getOwner()); builder.setContainerID(info.getContainerID()); + builder.setDeleteTransactionId(info.getDeleteTransactionId()); return builder.build(); } @@ -141,6 +147,14 @@ public class ContainerInfo return numberOfKeys; } + public long getDeleteTransactionId() { + return deleteTransactionId; + } + + public void updateDeleteTransactionId(long transactionId) { + deleteTransactionId = max(transactionId, deleteTransactionId); + } + public ContainerID containerID() { return new ContainerID(getContainerID()); } @@ -174,6 +188,7 @@ public class ContainerInfo builder.setState(state); builder.setStateEnterTime(stateEnterTime); builder.setContainerID(getContainerID()); + builder.setDeleteTransactionId(deleteTransactionId); if (getOwner() != null) { builder.setOwner(getOwner()); @@ -292,6 +307,7 @@ public class ContainerInfo private long stateEnterTime; private String owner; private long containerID; + private long deleteTransactionId; public Builder setContainerID(long id) { Preconditions.checkState(id >= 0); @@ -334,10 +350,15 @@ public class ContainerInfo return this; } + public Builder setDeleteTransactionId(long deleteTransactionId) { + this.deleteTransactionId = deleteTransactionId; + return this; + } + public ContainerInfo build() { return new - ContainerInfo(containerID, state, pipeline, - allocated, used, keys, stateEnterTime, owner); + ContainerInfo(containerID, state, pipeline, allocated, + used, keys, stateEnterTime, owner, deleteTransactionId); } } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index 451a08f6089..c40dc8e4ee1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -114,6 +114,8 @@ public final class OzoneConsts { public static final String OZONE_HANDLER_LOCAL = "local"; public static final String DELETING_KEY_PREFIX = "#deleting#"; + public static final String DELETED_KEY_PREFIX = "#deleted#"; + public static final String DELETE_TRANSACTION_KEY_PREFIX = "#delTX#"; public static final String OPEN_KEY_PREFIX = "#open#"; public static final String OPEN_KEY_ID_DELIMINATOR = "#"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java index 153e2f79441..a3430f868d0 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java @@ -22,17 +22,27 @@ import com.google.common.base.Strings; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.ozone.OzoneConsts; +import java.util.ArrayList; +import java.util.List; + /** * An utility class to filter levelDB keys. */ public final class MetadataKeyFilters { private static KeyPrefixFilter deletingKeyFilter = - new MetadataKeyFilters.KeyPrefixFilter(OzoneConsts.DELETING_KEY_PREFIX); + new MetadataKeyFilters.KeyPrefixFilter() + .addFilter(OzoneConsts.DELETING_KEY_PREFIX); + + private static KeyPrefixFilter deletedKeyFilter = + new MetadataKeyFilters.KeyPrefixFilter() + .addFilter(OzoneConsts.DELETED_KEY_PREFIX); private static KeyPrefixFilter normalKeyFilter = - new MetadataKeyFilters.KeyPrefixFilter(OzoneConsts.DELETING_KEY_PREFIX, - true); + new MetadataKeyFilters.KeyPrefixFilter() + .addFilter(OzoneConsts.DELETING_KEY_PREFIX, true) + .addFilter(OzoneConsts.DELETED_KEY_PREFIX, true) + .addFilter(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX, true); private MetadataKeyFilters() { } @@ -41,6 +51,10 @@ public final class MetadataKeyFilters { return deletingKeyFilter; } + public static KeyPrefixFilter getDeletedKeyFilter() { + return deletedKeyFilter; + } + public static KeyPrefixFilter getNormalKeyFilter() { return normalKeyFilter; } @@ -73,37 +87,95 @@ public final class MetadataKeyFilters { */ public static class KeyPrefixFilter implements MetadataKeyFilter { - private String keyPrefix = null; + private List positivePrefixList = new ArrayList<>(); + private List negativePrefixList = new ArrayList<>(); + private boolean atleastOnePositiveMatch; private int keysScanned = 0; private int keysHinted = 0; - private Boolean negative; - public KeyPrefixFilter(String keyPrefix) { - this(keyPrefix, false); + public KeyPrefixFilter() {} + + /** + * KeyPrefixFilter constructor. It is made of positive and negative prefix + * list. PositivePrefixList is the list of prefixes which are accepted + * whereas negativePrefixList contains the list of prefixes which are + * rejected. + * + * @param atleastOnePositiveMatch if positive it requires key to be accepted + * by atleast one positive filter. + */ + public KeyPrefixFilter(boolean atleastOnePositiveMatch) { + this.atleastOnePositiveMatch = atleastOnePositiveMatch; } - public KeyPrefixFilter(String keyPrefix, boolean negative) { - this.keyPrefix = keyPrefix; - this.negative = negative; + public KeyPrefixFilter addFilter(String keyPrefix) { + addFilter(keyPrefix, false); + return this; + } + + public KeyPrefixFilter addFilter(String keyPrefix, boolean negative) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(keyPrefix), + "KeyPrefix is null or empty: " + keyPrefix); + // keyPrefix which needs to be added should not be prefix of any opposing + // filter already present. If keyPrefix is a negative filter it should not + // be a prefix of any positive filter. Nor should any opposing filter be + // a prefix of keyPrefix. + // For example if b0 is accepted b can not be rejected and + // if b is accepted b0 can not be rejected. If these scenarios need to be + // handled we need to add priorities. + if (negative) { + Preconditions.checkArgument(positivePrefixList.stream().noneMatch( + prefix -> prefix.startsWith(keyPrefix) || keyPrefix + .startsWith(prefix)), + "KeyPrefix: " + keyPrefix + " already accepted."); + this.negativePrefixList.add(keyPrefix); + } else { + Preconditions.checkArgument(negativePrefixList.stream().noneMatch( + prefix -> prefix.startsWith(keyPrefix) || keyPrefix + .startsWith(prefix)), + "KeyPrefix: " + keyPrefix + " already rejected."); + this.positivePrefixList.add(keyPrefix); + } + return this; } @Override public boolean filterKey(byte[] preKey, byte[] currentKey, byte[] nextKey) { keysScanned++; - boolean accept = false; - if (Strings.isNullOrEmpty(keyPrefix)) { - accept = true; - } else { - byte [] prefixBytes = DFSUtil.string2Bytes(keyPrefix); - if (currentKey != null && prefixMatch(prefixBytes, currentKey)) { - keysHinted++; - accept = true; - } else { - accept = false; - } + if (currentKey == null) { + return false; } - return (negative) ? !accept : accept; + boolean accept; + + // There are no filters present + if (positivePrefixList.isEmpty() && negativePrefixList.isEmpty()) { + return true; + } + + accept = !positivePrefixList.isEmpty() && positivePrefixList.stream() + .anyMatch(prefix -> { + byte[] prefixBytes = DFSUtil.string2Bytes(prefix); + return prefixMatch(prefixBytes, currentKey); + }); + if (accept) { + keysHinted++; + return true; + } else if (atleastOnePositiveMatch) { + return false; + } + + accept = !negativePrefixList.isEmpty() && negativePrefixList.stream() + .allMatch(prefix -> { + byte[] prefixBytes = DFSUtil.string2Bytes(prefix); + return !prefixMatch(prefixBytes, currentKey); + }); + if (accept) { + keysHinted++; + return true; + } + + return false; } @Override @@ -116,7 +188,7 @@ public final class MetadataKeyFilters { return keysHinted; } - private boolean prefixMatch(byte[] prefix, byte[] key) { + private static boolean prefixMatch(byte[] prefix, byte[] key) { Preconditions.checkNotNull(prefix); Preconditions.checkNotNull(key); if (key.length < prefix.length) { diff --git a/hadoop-hdds/common/src/main/proto/hdds.proto b/hadoop-hdds/common/src/main/proto/hdds.proto index f834c73e59b..a9a703eb000 100644 --- a/hadoop-hdds/common/src/main/proto/hdds.proto +++ b/hadoop-hdds/common/src/main/proto/hdds.proto @@ -146,6 +146,7 @@ message SCMContainerInfo { required uint64 numberOfKeys = 6; optional int64 stateEnterTime = 7; required string owner = 8; + optional int64 deleteTransactionId = 9; } message GetScmInfoRequestProto { diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/TestMetadataStore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/TestMetadataStore.java index a946c097a78..a5f2f93a044 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/TestMetadataStore.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/TestMetadataStore.java @@ -275,7 +275,7 @@ public class TestMetadataStore { // Filter keys by prefix. // It should returns all "b*" entries. - MetadataKeyFilter filter1 = new KeyPrefixFilter("b"); + MetadataKeyFilter filter1 = new KeyPrefixFilter().addFilter("b"); result = store.getRangeKVs(null, 100, filter1); Assert.assertEquals(10, result.size()); Assert.assertTrue(result.stream().allMatch(entry -> @@ -422,4 +422,63 @@ public class TestMetadataStore { Assert.assertEquals(8, count.get()); } + + @Test + public void testKeyPrefixFilter() throws IOException { + List> result = null; + RuntimeException exception = null; + + try { + new KeyPrefixFilter().addFilter("b0", true).addFilter("b"); + } catch (IllegalArgumentException e) { + exception = e; + } + Assert.assertTrue( + exception.getMessage().contains("KeyPrefix: b already rejected")); + + try { + new KeyPrefixFilter().addFilter("b0").addFilter("b", true); + } catch (IllegalArgumentException e) { + exception = e; + } + Assert.assertTrue( + exception.getMessage().contains("KeyPrefix: b already accepted")); + + try { + new KeyPrefixFilter().addFilter("b", true).addFilter("b0"); + } catch (IllegalArgumentException e) { + exception = e; + } + Assert.assertTrue( + exception.getMessage().contains("KeyPrefix: b0 already rejected")); + + try { + new KeyPrefixFilter().addFilter("b").addFilter("b0", true); + } catch (IllegalArgumentException e) { + exception = e; + } + Assert.assertTrue( + exception.getMessage().contains("KeyPrefix: b0 already accepted")); + + MetadataKeyFilter filter1 = new KeyPrefixFilter(true) + .addFilter("a0") + .addFilter("a1") + .addFilter("b", true); + result = store.getRangeKVs(null, 100, filter1); + Assert.assertEquals(2, result.size()); + Assert.assertTrue(result.stream() + .anyMatch(entry -> new String(entry.getKey()).startsWith("a0")) + && result.stream() + .anyMatch(entry -> new String(entry.getKey()).startsWith("a1"))); + + filter1 = new KeyPrefixFilter(true).addFilter("b", true); + result = store.getRangeKVs(null, 100, filter1); + Assert.assertEquals(0, result.size()); + + filter1 = new KeyPrefixFilter().addFilter("b", true); + result = store.getRangeKVs(null, 100, filter1); + Assert.assertEquals(10, result.size()); + Assert.assertTrue(result.stream() + .allMatch(entry -> new String(entry.getKey()).startsWith("a"))); + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java index 020f45d5895..5767f76b3dc 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java @@ -33,6 +33,8 @@ import java.util.Map; import java.util.TreeMap; import java.util.concurrent.atomic.AtomicLong; +import static java.lang.Math.max; + /** * This class maintains the information about a container in the ozone world. *

@@ -57,6 +59,7 @@ public class ContainerData { * Number of pending deletion blocks in container. */ private int numPendingDeletionBlocks; + private long deleteTransactionId; private AtomicLong readBytes; private AtomicLong writeBytes; private AtomicLong readCount; @@ -78,6 +81,7 @@ public class ContainerData { this.containerID = containerID; this.state = ContainerLifeCycleState.OPEN; this.numPendingDeletionBlocks = 0; + this.deleteTransactionId = 0; this.readCount = new AtomicLong(0L); this.readBytes = new AtomicLong(0L); this.writeCount = new AtomicLong(0L); @@ -101,6 +105,7 @@ public class ContainerData { this.containerID = containerID; this.state = state; this.numPendingDeletionBlocks = 0; + this.deleteTransactionId = 0; this.readCount = new AtomicLong(0L); this.readBytes = new AtomicLong(0L); this.writeCount = new AtomicLong(0L); @@ -425,6 +430,22 @@ public class ContainerData { return this.numPendingDeletionBlocks; } + /** + * Sets deleteTransactionId to latest delete transactionId for the container. + * + * @param transactionId latest transactionId of the container. + */ + public void updateDeleteTransactionId(long transactionId) { + deleteTransactionId = max(transactionId, deleteTransactionId); + } + + /** + * Return the latest deleteTransactionId of the container. + */ + public long getDeleteTransactionId() { + return deleteTransactionId; + } + /** * Get the number of bytes read from the container. * @return the number of bytes read from the container. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java index 19634f48b81..b2427549cf6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java @@ -20,6 +20,8 @@ package org.apache.hadoop.ozone.container.common.helpers; import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerInfo; +import static java.lang.Math.max; + /** * Container Report iterates the closed containers and sends a container report * to SCM. @@ -35,6 +37,7 @@ public class ContainerReport { private long readBytes; private long writeBytes; private long containerID; + private long deleteTransactionId; public long getContainerID() { return containerID; @@ -63,6 +66,7 @@ public class ContainerReport { this.readBytes = 0L; this.writeCount = 0L; this.writeBytes = 0L; + this.deleteTransactionId = 0; } /** @@ -96,6 +100,9 @@ public class ContainerReport { if (info.hasWriteBytes()) { report.setWriteBytes(info.getWriteBytes()); } + if (info.hasDeleteTransactionId()) { + report.updateDeleteTransactionId(info.getDeleteTransactionId()); + } report.setContainerID(info.getContainerID()); return report; @@ -186,6 +193,10 @@ public class ContainerReport { this.bytesUsed = bytesUsed; } + public void updateDeleteTransactionId(long transactionId) { + this.deleteTransactionId = max(transactionId, deleteTransactionId); + } + /** * Gets a containerInfo protobuf message from ContainerReports. * @@ -202,6 +213,7 @@ public class ContainerReport { .setWriteBytes(this.getWriteBytes()) .setFinalhash(this.getFinalhash()) .setContainerID(this.getContainerID()) + .setDeleteTransactionId(this.deleteTransactionId) .build(); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java index eb437afc02b..02572a8fa2b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java @@ -20,11 +20,13 @@ package org.apache.hadoop.ozone.container.common.impl; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import com.google.common.primitives.Longs; import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers .StorageContainerException; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; @@ -246,12 +248,18 @@ public class ContainerManagerImpl implements ContainerManager { } containerData = ContainerData.getFromProtBuf(containerDataProto, conf); - // Initialize pending deletion blocks count in in-memory - // container status. + // Initialize pending deletion blocks and deleted blocks count in + // in-memory containerData. MetadataStore metadata = KeyUtils.getDB(containerData, conf); List> underDeletionBlocks = metadata .getSequentialRangeKVs(null, Integer.MAX_VALUE, MetadataKeyFilters.getDeletingKeyFilter()); + byte[] transactionID = metadata.get(DFSUtil.string2Bytes( + OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX + containerID)); + if (transactionID != null) { + containerData + .updateDeleteTransactionId(Longs.fromByteArray(transactionID)); + } containerData.incrPendingDeletionBlocks(underDeletionBlocks.size()); List> liveKeys = metadata @@ -908,7 +916,8 @@ public class ContainerManagerImpl implements ContainerManager { .setWriteCount(container.getWriteCount()) .setReadBytes(container.getReadBytes()) .setWriteBytes(container.getWriteBytes()) - .setState(getState(containerId)); + .setState(getState(containerId)) + .setDeleteTransactionId(container.getDeleteTransactionId()); crBuilder.addReports(ciBuilder.build()); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java index 99845fa1e40..63f57b48458 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java @@ -175,8 +175,8 @@ public class BlockDeletingService extends BackgroundService{ // Scan container's db and get list of under deletion blocks MetadataStore meta = KeyUtils.getDB(containerData, conf); // # of blocks to delete is throttled - KeyPrefixFilter filter = new KeyPrefixFilter( - OzoneConsts.DELETING_KEY_PREFIX); + KeyPrefixFilter filter = + new KeyPrefixFilter().addFilter(OzoneConsts.DELETING_KEY_PREFIX); List> toDeleteBlocks = meta.getSequentialRangeKVs(null, blockLimitPerTask, filter); if (toDeleteBlocks.isEmpty()) { @@ -214,10 +214,16 @@ public class BlockDeletingService extends BackgroundService{ } }); - // Once files are deleted ... clean up DB + // Once files are deleted... replace deleting entries with deleted entries BatchOperation batch = new BatchOperation(); - succeedBlocks.forEach(entry -> - batch.delete(DFSUtil.string2Bytes(entry))); + succeedBlocks.forEach(entry -> { + String blockId = + entry.substring(OzoneConsts.DELETING_KEY_PREFIX.length()); + String deletedEntry = OzoneConsts.DELETED_KEY_PREFIX + blockId; + batch.put(DFSUtil.string2Bytes(deletedEntry), + DFSUtil.string2Bytes(blockId)); + batch.delete(DFSUtil.string2Bytes(entry)); + }); meta.writeBatch(batch); // update count of pending deletion blocks in in-memory container status containerManager.decrPendingDeletionBlocks(succeedBlocks.size(), diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java index ab69bdc38a6..f954d98f939 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java @@ -186,6 +186,9 @@ public class DeleteBlocksCommandHandler implements CommandHandler { LOG.debug("Block {} not found or already under deletion in" + " container {}, skip deleting it.", blk, containerId); } + containerDB.put(DFSUtil.string2Bytes( + OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX + delTX.getContainerID()), + Longs.toByteArray(delTX.getTxID())); } // update pending deletion blocks count in in-memory container status diff --git a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto index ac2314e2c02..95e210e9c8f 100644 --- a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto +++ b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto @@ -160,6 +160,7 @@ message ContainerInfo { optional int64 writeBytes = 8; optional string finalhash = 9; optional hadoop.hdds.LifeCycleState state = 10; + optional int64 deleteTransactionId = 11; } /* diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java index d17d6c07f06..7cfbdabe1a9 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java @@ -361,13 +361,10 @@ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean { } } - // We update SCM DB first, so if this step fails, we end up here, - // nothing gets into the delLog so no blocks will be accidentally - // removed. If we write the log first, once log is written, the - // async deleting service will start to scan and might be picking - // up some blocks to do real deletions, that might cause data loss. try { - deletedBlockLog.addTransactions(containerBlocks); + Map deleteTransactionsMap = + deletedBlockLog.addTransactions(containerBlocks); + containerManager.updateDeleteTransactionId(deleteTransactionsMap); } catch (IOException e) { throw new IOException( "Skip writing the deleted blocks info to" diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java index cc32b35a4da..4f4c75563b7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java @@ -108,9 +108,10 @@ public interface DeletedBlockLog extends Closeable { * number of containers) together (on success) or non (on failure). * * @param containerBlocksMap a map of containerBlocks. + * @return Mapping from containerId to latest transactionId for the container. * @throws IOException */ - void addTransactions(Map> containerBlocksMap) + Map addTransactions(Map> containerBlocksMap) throws IOException; /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java index cedc5060352..48fa2eb1112 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java @@ -36,6 +36,7 @@ import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; @@ -306,12 +307,15 @@ public class DeletedBlockLogImpl implements DeletedBlockLog { * {@inheritDoc} * * @param containerBlocksMap a map of containerBlocks. + * @return Mapping from containerId to latest transactionId for the container. * @throws IOException */ @Override - public void addTransactions(Map> containerBlocksMap) + public Map addTransactions( + Map> containerBlocksMap) throws IOException { BatchOperation batch = new BatchOperation(); + Map deleteTransactionsMap = new HashMap<>(); lock.lock(); try { long currentLatestID = lastTxID; @@ -321,11 +325,13 @@ public class DeletedBlockLogImpl implements DeletedBlockLog { byte[] key = Longs.toByteArray(currentLatestID); DeletedBlocksTransaction tx = constructNewTransaction(currentLatestID, entry.getKey(), entry.getValue()); + deleteTransactionsMap.put(entry.getKey(), currentLatestID); batch.put(key, tx.toByteArray()); } lastTxID = currentLatestID; batch.put(LATEST_TXID, Longs.toByteArray(lastTxID)); deletedStore.writeBatch(batch); + return deleteTransactionsMap; } finally { lock.unlock(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java index b961c38c133..b563e90e765 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java @@ -341,6 +341,39 @@ public class ContainerMapping implements Mapping { } } + /** + * Update deleteTransactionId according to deleteTransactionMap. + * + * @param deleteTransactionMap Maps the containerId to latest delete + * transaction id for the container. + * @throws IOException + */ + public void updateDeleteTransactionId(Map deleteTransactionMap) + throws IOException { + lock.lock(); + try { + for (Map.Entry entry : deleteTransactionMap.entrySet()) { + long containerID = entry.getKey(); + byte[] dbKey = Longs.toByteArray(containerID); + byte[] containerBytes = containerStore.get(dbKey); + if (containerBytes == null) { + throw new SCMException( + "Failed to increment number of deleted blocks for container " + + containerID + ", reason : " + "container doesn't exist.", + SCMException.ResultCodes.FAILED_TO_FIND_CONTAINER); + } + ContainerInfo containerInfo = ContainerInfo.fromProtobuf( + HddsProtos.SCMContainerInfo.parseFrom(containerBytes)); + containerInfo.updateDeleteTransactionId(entry.getValue()); + containerStore.put(dbKey, containerInfo.getProtobuf().toByteArray()); + containerStateManager + .updateDeleteTransactionId(containerID, entry.getValue()); + } + } finally { + lock.unlock(); + } + } + /** * Returns the container State Manager. * @@ -441,6 +474,7 @@ public class ContainerMapping implements Mapping { builder.setState(knownState.getState()); builder.setStateEnterTime(knownState.getStateEnterTime()); builder.setContainerID(knownState.getContainerID()); + builder.setDeleteTransactionId(knownState.getDeleteTransactionId()); if (knownState.getOwner() != null) { builder.setOwner(knownState.getOwner()); } @@ -571,6 +605,7 @@ public class ContainerMapping implements Mapping { .setPipeline(oldInfo.getPipeline()) .setState(oldInfo.getState()) .setUsedBytes(oldInfo.getUsedBytes()) + .setDeleteTransactionId(oldInfo.getDeleteTransactionId()) .build(); containerStore.put(dbKey, newInfo.getProtobuf().toByteArray()); } else { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java index 9dfa660fd16..08733bd707d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java @@ -304,6 +304,7 @@ public class ContainerStateManager implements Closeable { .setStateEnterTime(Time.monotonicNow()) .setOwner(owner) .setContainerID(containerCount.incrementAndGet()) + .setDeleteTransactionId(0) .build(); Preconditions.checkNotNull(containerInfo); containers.addContainer(containerInfo); @@ -351,6 +352,17 @@ public class ContainerStateManager implements Closeable { return containers.getContainerInfo(info); } + /** + * Update deleteTransactionId for a container. + * + * @param containerID ContainerID of the container whose delete + * transactionId needs to be updated. + * @param transactionId latest transactionId to be updated for the container + */ + public void updateDeleteTransactionId(Long containerID, long transactionId) { + containers.getContainerMap().get(ContainerID.valueof(containerID)) + .updateDeleteTransactionId(transactionId); + } /** * Return a container matching the attributes specified. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java index ab425205fac..e77a4b60f25 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hdds.scm.node.NodeManager; import java.io.Closeable; import java.io.IOException; import java.util.List; +import java.util.Map; /** * Mapping class contains the mapping from a name to a pipeline mapping. This is @@ -104,6 +105,16 @@ public interface Mapping extends Closeable { ContainerReportsProto reports) throws IOException; + /** + * Update deleteTransactionId according to deleteTransactionMap. + * + * @param deleteTransactionMap Maps the containerId to latest delete + * transaction id for the container. + * @throws IOException + */ + void updateDeleteTransactionId(Map deleteTransactionMap) + throws IOException; + /** * Returns the nodeManager. * @return NodeManager diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java index f3e42ea7316..9fbb9fa5f55 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java @@ -109,8 +109,24 @@ public class TestBlockManager { AllocatedBlock block = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, containerOwner); Assert.assertNotNull(block); + long transactionId = + mapping.getContainer(block.getBlockID().getContainerID()) + .getDeleteTransactionId(); + Assert.assertEquals(0, transactionId); blockManager.deleteBlocks(Collections.singletonList( block.getBlockID())); + Assert.assertEquals(++transactionId, + mapping.getContainer(block.getBlockID().getContainerID()) + .getDeleteTransactionId()); + + block = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE, + type, factor, containerOwner); + Assert.assertNotNull(block); + blockManager.deleteBlocks(Collections.singletonList( + block.getBlockID())); + Assert.assertEquals(++transactionId, + mapping.getContainer(block.getBlockID().getContainerID()) + .getDeleteTransactionId()); } @Test diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java index ba2ab64a766..eefb639d59e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java @@ -203,7 +203,8 @@ public class TestContainerMapping { .setWriteCount(100000000L) .setReadBytes(2000000000L) .setWriteBytes(2000000000L) - .setContainerID(info.getContainerID()); + .setContainerID(info.getContainerID()) + .setDeleteTransactionId(0); reports.add(ciBuilder.build()); @@ -237,7 +238,8 @@ public class TestContainerMapping { .setWriteCount(500000000L) .setReadBytes(5368705120L) .setWriteBytes(5368705120L) - .setContainerID(info.getContainerID()); + .setContainerID(info.getContainerID()) + .setDeleteTransactionId(0); reports.add(ciBuilder.build()); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java index 0a3efda8302..0d7848f7987 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java @@ -212,7 +212,8 @@ public class TestContainerCloser { .setReadCount(100000000L) .setWriteCount(100000000L) .setReadBytes(2000000000L) - .setWriteBytes(2000000000L); + .setWriteBytes(2000000000L) + .setDeleteTransactionId(0); reports.addReports(ciBuilder); mapping.processContainerReports(TestUtils.getDatanodeDetails(), reports.build()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java index 99e69ecd266..c937980f517 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java @@ -38,6 +38,7 @@ import org.apache.hadoop.ozone.web.handlers.UserArgs; import org.apache.hadoop.ozone.web.handlers.VolumeArgs; import org.apache.hadoop.ozone.web.interfaces.StorageHandler; import org.apache.hadoop.ozone.web.utils.OzoneUtils; +import org.apache.hadoop.utils.MetadataKeyFilters; import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter; import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter; import org.apache.hadoop.utils.MetadataStore; @@ -120,7 +121,7 @@ public class TestStorageContainerManagerHelper { List pendingDeletionBlocks = Lists.newArrayList(); MetadataStore meta = getContainerMetadata(containerID); KeyPrefixFilter filter = - new KeyPrefixFilter(OzoneConsts.DELETING_KEY_PREFIX); + new KeyPrefixFilter().addFilter(OzoneConsts.DELETING_KEY_PREFIX); List> kvs = meta .getRangeKVs(null, Integer.MAX_VALUE, filter); kvs.forEach(entry -> { @@ -147,7 +148,8 @@ public class TestStorageContainerManagerHelper { (preKey, currentKey, nextKey) -> !DFSUtil.bytes2String(currentKey) .startsWith(OzoneConsts.DELETING_KEY_PREFIX); List> kvs = - meta.getRangeKVs(null, Integer.MAX_VALUE, filter); + meta.getRangeKVs(null, Integer.MAX_VALUE, + MetadataKeyFilters.getNormalKeyFilter()); kvs.forEach(entry -> { allBlocks.add(Longs.fromByteArray(entry.getKey())); }); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java index 56fd0b179ac..0686e4e5d30 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java @@ -66,8 +66,6 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys .OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER; import static org.apache.hadoop.ozone.OzoneConfigKeys .OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL; -import static org.apache.hadoop.ozone.container - .ContainerTestHelper.createSingleNodePipeline; /** * Tests to test block deleting service. @@ -183,8 +181,15 @@ public class TestBlockDeletingService { private int getUnderDeletionBlocksCount(MetadataStore meta) throws IOException { List> underDeletionBlocks = - meta.getRangeKVs(null, 100, new MetadataKeyFilters.KeyPrefixFilter( - OzoneConsts.DELETING_KEY_PREFIX)); + meta.getRangeKVs(null, 100, new MetadataKeyFilters.KeyPrefixFilter() + .addFilter(OzoneConsts.DELETING_KEY_PREFIX)); + return underDeletionBlocks.size(); + } + + private int getDeletedBlocksCount(MetadataStore db) throws IOException { + List> underDeletionBlocks = + db.getRangeKVs(null, 100, new MetadataKeyFilters.KeyPrefixFilter() + .addFilter(OzoneConsts.DELETED_KEY_PREFIX)); return underDeletionBlocks.size(); } @@ -205,20 +210,34 @@ public class TestBlockDeletingService { List containerData = Lists.newArrayList(); containerManager.listContainer(0L, 1, containerData); Assert.assertEquals(1, containerData.size()); - MetadataStore meta = KeyUtils.getDB(containerData.get(0), conf); - // Ensure there is 100 blocks under deletion + MetadataStore meta = KeyUtils.getDB(containerData.get(0), conf); + Map containerMap = + ((ContainerManagerImpl) containerManager).getContainerMap(); + long transactionId = + containerMap.get(containerData.get(0).getContainerID()) + .getDeleteTransactionId(); + + // Number of deleted blocks in container should be equal to 0 before + // block delete + Assert.assertEquals(0, transactionId); + + // Ensure there are 3 blocks under deletion and 0 deleted blocks Assert.assertEquals(3, getUnderDeletionBlocksCount(meta)); + Assert.assertEquals(0, getDeletedBlocksCount(meta)); // An interval will delete 1 * 2 blocks deleteAndWait(svc, 1); Assert.assertEquals(1, getUnderDeletionBlocksCount(meta)); + Assert.assertEquals(2, getDeletedBlocksCount(meta)); deleteAndWait(svc, 2); Assert.assertEquals(0, getUnderDeletionBlocksCount(meta)); + Assert.assertEquals(3, getDeletedBlocksCount(meta)); deleteAndWait(svc, 3); Assert.assertEquals(0, getUnderDeletionBlocksCount(meta)); + Assert.assertEquals(3, getDeletedBlocksCount(meta)); svc.shutdown(); shutdownContainerMangaer(containerManager); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java index f07a97de1ca..36e4b866427 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java @@ -638,7 +638,8 @@ public class TestKeySpaceManager { MetadataStore store = cluster.getKeySpaceManager(). getMetadataManager().getStore(); List> list = store.getRangeKVs(null, 10, - new MetadataKeyFilters.KeyPrefixFilter(DELETING_KEY_PREFIX)); + new MetadataKeyFilters.KeyPrefixFilter() + .addFilter(DELETING_KEY_PREFIX)); Assert.assertEquals(1, list.size()); // Delete the key again to test deleting non-existing key. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManagerImpl.java index 13cc40bb344..6664a324620 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManagerImpl.java @@ -352,8 +352,8 @@ public class KSMMetadataManagerImpl implements KSMMetadataManager { ResultCodes.FAILED_BUCKET_NOT_FOUND); } - MetadataKeyFilter filter = new KeyPrefixFilter( - getKeyWithDBPrefix(volumeName, bucketName, keyPrefix)); + MetadataKeyFilter filter = new KeyPrefixFilter() + .addFilter(getKeyWithDBPrefix(volumeName, bucketName, keyPrefix)); List> rangeResult; if (!Strings.isNullOrEmpty(startKey)) { @@ -449,7 +449,8 @@ public class KSMMetadataManagerImpl implements KSMMetadataManager { private VolumeList getAllVolumes() throws IOException { // Scan all users in database - KeyPrefixFilter filter = new KeyPrefixFilter(OzoneConsts.KSM_USER_PREFIX); + KeyPrefixFilter filter = + new KeyPrefixFilter().addFilter(OzoneConsts.KSM_USER_PREFIX); // We are not expecting a huge number of users per cluster, // it should be fine to scan all users in db and return us a // list of volume names in string per user. @@ -497,7 +498,7 @@ public class KSMMetadataManagerImpl implements KSMMetadataManager { List keyBlocksList = Lists.newArrayList(); long now = Time.now(); final MetadataKeyFilter openKeyFilter = - new KeyPrefixFilter(OPEN_KEY_PREFIX); + new KeyPrefixFilter().addFilter(OPEN_KEY_PREFIX); List> rangeResult = store.getSequentialRangeKVs(null, Integer.MAX_VALUE, openKeyFilter); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java index e3f6cc95d89..1b1153b18a1 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java @@ -64,9 +64,14 @@ public class BenchMarkContainerStateMap { .setPipeline(pipeline) // This is bytes allocated for blocks inside container, not the // container size - .setAllocatedBytes(0).setUsedBytes(0).setNumberOfKeys(0) - .setStateEnterTime(Time.monotonicNow()).setOwner("OZONE") - .setContainerID(x).build(); + .setAllocatedBytes(0) + .setUsedBytes(0) + .setNumberOfKeys(0) + .setStateEnterTime(Time.monotonicNow()) + .setOwner("OZONE") + .setContainerID(x) + .setDeleteTransactionId(0) + .build(); stateMap.addContainer(containerInfo); currentCount++; } catch (SCMException e) { @@ -80,9 +85,14 @@ public class BenchMarkContainerStateMap { .setPipeline(pipeline) // This is bytes allocated for blocks inside container, not the // container size - .setAllocatedBytes(0).setUsedBytes(0).setNumberOfKeys(0) - .setStateEnterTime(Time.monotonicNow()).setOwner("OZONE") - .setContainerID(y).build(); + .setAllocatedBytes(0) + .setUsedBytes(0) + .setNumberOfKeys(0) + .setStateEnterTime(Time.monotonicNow()) + .setOwner("OZONE") + .setContainerID(y) + .setDeleteTransactionId(0) + .build(); stateMap.addContainer(containerInfo); currentCount++; } catch (SCMException e) { @@ -95,9 +105,14 @@ public class BenchMarkContainerStateMap { .setPipeline(pipeline) // This is bytes allocated for blocks inside container, not the // container size - .setAllocatedBytes(0).setUsedBytes(0).setNumberOfKeys(0) - .setStateEnterTime(Time.monotonicNow()).setOwner("OZONE") - .setContainerID(currentCount++).build(); + .setAllocatedBytes(0) + .setUsedBytes(0) + .setNumberOfKeys(0) + .setStateEnterTime(Time.monotonicNow()) + .setOwner("OZONE") + .setContainerID(currentCount++) + .setDeleteTransactionId(0) + .build(); stateMap.addContainer(containerInfo); } catch (SCMException e) { e.printStackTrace(); @@ -155,9 +170,14 @@ public class BenchMarkContainerStateMap { .setPipeline(pipeline) // This is bytes allocated for blocks inside container, not the // container size - .setAllocatedBytes(0).setUsedBytes(0).setNumberOfKeys(0) - .setStateEnterTime(Time.monotonicNow()).setOwner("OZONE") - .setContainerID(cid).build(); + .setAllocatedBytes(0) + .setUsedBytes(0) + .setNumberOfKeys(0) + .setStateEnterTime(Time.monotonicNow()) + .setOwner("OZONE") + .setContainerID(cid) + .setDeleteTransactionId(0) + .build(); state.stateMap.addContainer(containerInfo); } From 2b2f672022547e8c19658213ac5a4090bf5b6c72 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Mon, 11 Jun 2018 19:25:56 -0700 Subject: [PATCH 086/113] YARN-8370. Some Node Manager tests fail on Windows due to improper path/file separator. Contributed by Anbang Hu. --- .../yarn/server/nodemanager/TestLocalDirsHandlerService.java | 4 ++-- .../logaggregation/TestAppLogAggregatorImpl.java | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java index e704c8fe36b..4183fbedfec 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java @@ -150,9 +150,9 @@ public class TestLocalDirsHandlerService { Assert .assertEquals(utilizationPerc, nm.getGoodLogDirsDiskUtilizationPerc()); - Assert.assertEquals(localDir2, + Assert.assertEquals(new Path(localDir2).toString(), dirSvc.getConfig().get(LocalDirsHandlerService.NM_GOOD_LOCAL_DIRS)); - Assert.assertEquals(logDir2, + Assert.assertEquals(new Path(logDir2).toString(), dirSvc.getConfig().get(LocalDirsHandlerService.NM_GOOD_LOG_DIRS)); Assert.assertEquals(localDir1 + "," + localDir2, dirSvc.getConfig().get(YarnConfiguration.NM_LOCAL_DIRS)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java index 95f4c320cbc..b74eabc0d98 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java @@ -324,7 +324,7 @@ public class TestAppLogAggregatorImpl { for(int i = 0; i < tasks.length; i++) { FileDeletionTask task = (FileDeletionTask) tasks[i]; for (Path path: task.getBaseDirs()) { - paths.add(path.toUri().getRawPath()); + paths.add(new File(path.toUri().getRawPath()).getAbsolutePath()); } } verifyFilesToDelete(expectedPathsForDeletion, paths); From 2df73dace06cfd2b3193a14cd455297f8f989617 Mon Sep 17 00:00:00 2001 From: Rohith Sharma K S Date: Tue, 12 Jun 2018 17:13:09 +0530 Subject: [PATCH 087/113] =?UTF-8?q?YARN-8405.=20RM=20zk-state-store.parent?= =?UTF-8?q?-path=20ACLs=20has=20been=20changed=20since=20HADOOP-14773.=20C?= =?UTF-8?q?ontributed=20by=20=20=C3=8D=C3=B1igo=20Goiri.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../hadoop/util/curator/ZKCuratorManager.java | 14 ++++++++- .../driver/impl/StateStoreZooKeeperImpl.java | 6 +++- .../impl/ZookeeperFederationStateStore.java | 8 +++-- .../recovery/ZKRMStateStore.java | 2 +- .../capacity/conf/ZKConfigurationStore.java | 2 +- .../recovery/TestZKRMStateStore.java | 29 ++++++++++++------- ...TestZKRMStateStoreZKClientConnections.java | 22 +++++++------- 7 files changed, 53 insertions(+), 30 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java index 11d38c2298f..8276b6e29c6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java @@ -290,6 +290,18 @@ public final class ZKCuratorManager { * @throws Exception If it cannot create the file. */ public void createRootDirRecursively(String path) throws Exception { + createRootDirRecursively(path, null); + } + + /** + * Utility function to ensure that the configured base znode exists. + * This recursively creates the znode as well as all of its parents. + * @param path Path of the znode to create. + * @param zkAcl ACLs for ZooKeeper. + * @throws Exception If it cannot create the file. + */ + public void createRootDirRecursively(String path, List zkAcl) + throws Exception { String[] pathParts = path.split("/"); Preconditions.checkArgument( pathParts.length >= 1 && pathParts[0].isEmpty(), @@ -298,7 +310,7 @@ public final class ZKCuratorManager { for (int i = 1; i < pathParts.length; i++) { sb.append("/").append(pathParts[i]); - create(sb.toString()); + create(sb.toString(), zkAcl); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreZooKeeperImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreZooKeeperImpl.java index cd5372d6eed..c6441caf821 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreZooKeeperImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreZooKeeperImpl.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord; import org.apache.hadoop.hdfs.server.federation.store.records.Query; import org.apache.hadoop.hdfs.server.federation.store.records.QueryResult; import org.apache.hadoop.util.curator.ZKCuratorManager; +import org.apache.zookeeper.data.ACL; import org.apache.zookeeper.data.Stat; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -70,6 +71,8 @@ public class StateStoreZooKeeperImpl extends StateStoreSerializableImpl { /** Interface to ZooKeeper. */ private ZKCuratorManager zkManager; + /** ACLs for ZooKeeper. */ + private List zkAcl; @Override @@ -83,6 +86,7 @@ public class StateStoreZooKeeperImpl extends StateStoreSerializableImpl { try { this.zkManager = new ZKCuratorManager(conf); this.zkManager.start(); + this.zkAcl = ZKCuratorManager.getZKAcls(conf); } catch (IOException e) { LOG.error("Cannot initialize the ZK connection", e); return false; @@ -95,7 +99,7 @@ public class StateStoreZooKeeperImpl extends StateStoreSerializableImpl { String className, Class clazz) { try { String checkPath = getNodePath(baseZNode, className); - zkManager.createRootDirRecursively(checkPath); + zkManager.createRootDirRecursively(checkPath, zkAcl); return true; } catch (Exception e) { LOG.error("Cannot initialize ZK node for {}: {}", diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java index 6ae7d3c688a..fde5f2c54da 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java @@ -73,6 +73,7 @@ import org.apache.hadoop.yarn.server.federation.store.utils.FederationMembership import org.apache.hadoop.yarn.server.federation.store.utils.FederationPolicyStoreInputValidator; import org.apache.hadoop.yarn.server.federation.store.utils.FederationStateStoreUtils; import org.apache.hadoop.yarn.server.records.Version; +import org.apache.zookeeper.data.ACL; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -133,9 +134,10 @@ public class ZookeeperFederationStateStore implements FederationStateStore { // Create base znode for each entity try { - zkManager.createRootDirRecursively(membershipZNode); - zkManager.createRootDirRecursively(appsZNode); - zkManager.createRootDirRecursively(policiesZNode); + List zkAcl = ZKCuratorManager.getZKAcls(conf); + zkManager.createRootDirRecursively(membershipZNode, zkAcl); + zkManager.createRootDirRecursively(appsZNode, zkAcl); + zkManager.createRootDirRecursively(policiesZNode, zkAcl); } catch (Exception e) { String errMsg = "Cannot create base directories: " + e.getMessage(); FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java index de1f1ada367..bd76a8c434b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java @@ -379,7 +379,7 @@ public class ZKRMStateStore extends RMStateStore { @Override public synchronized void startInternal() throws Exception { // ensure root dirs exist - zkManager.createRootDirRecursively(znodeWorkingPath); + zkManager.createRootDirRecursively(znodeWorkingPath, zkAcl); create(zkRootNodePath); setRootNodeAcls(); delete(fencingNodePath); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ZKConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ZKConfigurationStore.java index 497ebfeff9e..d9fd0e4a414 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ZKConfigurationStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ZKConfigurationStore.java @@ -90,7 +90,7 @@ public class ZKConfigurationStore extends YarnConfigurationStore { this.confStorePath = getNodePath(znodeParentPath, CONF_STORE_PATH); this.fencingNodePath = getNodePath(znodeParentPath, FENCING_PATH); - zkManager.createRootDirRecursively(znodeParentPath); + zkManager.createRootDirRecursively(znodeParentPath, zkAcl); zkManager.delete(fencingNodePath); if (!zkManager.exists(logsPath)) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java index 4cba2664d15..11be3b15ac4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java @@ -25,6 +25,7 @@ import org.apache.curator.test.TestingServer; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; import org.apache.hadoop.io.Text; @@ -206,7 +207,7 @@ public class TestZKRMStateStore extends RMStateStoreTestBase { private RMStateStore createStore(Configuration conf) throws Exception { workingZnode = "/jira/issue/3077/rmstore"; - conf.set(YarnConfiguration.RM_ZK_ADDRESS, + conf.set(CommonConfigurationKeys.ZK_ADDRESS, curatorTestingServer.getConnectString()); conf.set(YarnConfiguration.ZK_RM_STATE_STORE_PARENT_PATH, workingZnode); conf.setLong(YarnConfiguration.RM_EPOCH, epoch); @@ -339,7 +340,7 @@ public class TestZKRMStateStore extends RMStateStoreTestBase { public RMStateStore getRMStateStore() throws Exception { YarnConfiguration conf = new YarnConfiguration(); workingZnode = "/jira/issue/3077/rmstore"; - conf.set(YarnConfiguration.RM_ZK_ADDRESS, + conf.set(CommonConfigurationKeys.ZK_ADDRESS, curatorTestingServer.getConnectString()); conf.set(YarnConfiguration.ZK_RM_STATE_STORE_PARENT_PATH, workingZnode); this.store = new TestZKRMStateStoreInternal(conf, workingZnode) { @@ -380,9 +381,9 @@ public class TestZKRMStateStore extends RMStateStoreTestBase { conf.set(YarnConfiguration.RM_HA_IDS, rmIds); conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true); conf.set(YarnConfiguration.RM_STORE, ZKRMStateStore.class.getName()); - conf.set(YarnConfiguration.RM_ZK_ADDRESS, + conf.set(CommonConfigurationKeys.ZK_ADDRESS, curatorTestServer.getConnectString()); - conf.setInt(YarnConfiguration.RM_ZK_TIMEOUT_MS, ZK_TIMEOUT_MS); + conf.setInt(CommonConfigurationKeys.ZK_TIMEOUT_MS, ZK_TIMEOUT_MS); conf.set(YarnConfiguration.RM_HA_ID, rmId); conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS, "localhost:0"); conf.setBoolean( @@ -419,31 +420,37 @@ public class TestZKRMStateStore extends RMStateStoreTestBase { public void testZKRootPathAcls() throws Exception { StateChangeRequestInfo req = new StateChangeRequestInfo( HAServiceProtocol.RequestSource.REQUEST_BY_USER); - String rootPath = - YarnConfiguration.DEFAULT_ZK_RM_STATE_STORE_PARENT_PATH + "/" + - ZKRMStateStore.ROOT_ZNODE_NAME; + String parentPath = YarnConfiguration.DEFAULT_ZK_RM_STATE_STORE_PARENT_PATH; + String rootPath = parentPath + "/" + ZKRMStateStore.ROOT_ZNODE_NAME; // Start RM with HA enabled Configuration conf = createHARMConf("rm1,rm2", "rm1", 1234, false, curatorTestingServer); + conf.set(YarnConfiguration.RM_ZK_ACL, "world:anyone:rwca"); + int perm = 23;// rwca=1+2+4+16 ResourceManager rm = new MockRM(conf); rm.start(); rm.getRMContext().getRMAdminService().transitionToActive(req); - List acls = - ((ZKRMStateStore)rm.getRMContext().getStateStore()).getACL(rootPath); + ZKRMStateStore stateStore = (ZKRMStateStore) rm.getRMContext().getStateStore(); + List acls = stateStore.getACL(rootPath); assertEquals(acls.size(), 2); // CREATE and DELETE permissions for root node based on RM ID verifyZKACL("digest", "localhost", Perms.CREATE | Perms.DELETE, acls); verifyZKACL( "world", "anyone", Perms.ALL ^ (Perms.CREATE | Perms.DELETE), acls); + + acls = stateStore.getACL(parentPath); + assertEquals(1, acls.size()); + assertEquals(perm, acls.get(0).getPerms()); rm.close(); // Now start RM with HA disabled. NoAuth Exception should not be thrown. conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, false); + conf.set(YarnConfiguration.RM_ZK_ACL, YarnConfiguration.DEFAULT_RM_ZK_ACL); rm = new MockRM(conf); rm.start(); rm.getRMContext().getRMAdminService().transitionToActive(req); - acls = ((ZKRMStateStore)rm.getRMContext().getStateStore()).getACL(rootPath); + acls = stateStore.getACL(rootPath); assertEquals(acls.size(), 1); verifyZKACL("world", "anyone", Perms.ALL, acls); rm.close(); @@ -453,7 +460,7 @@ public class TestZKRMStateStore extends RMStateStoreTestBase { rm = new MockRM(conf); rm.start(); rm.getRMContext().getRMAdminService().transitionToActive(req); - acls = ((ZKRMStateStore)rm.getRMContext().getStateStore()).getACL(rootPath); + acls = stateStore.getACL(rootPath); assertEquals(acls.size(), 2); verifyZKACL("digest", "localhost", Perms.CREATE | Perms.DELETE, acls); verifyZKACL( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStoreZKClientConnections.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStoreZKClientConnections.java index 6b19be3904b..cb4e521516f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStoreZKClientConnections.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStoreZKClientConnections.java @@ -22,13 +22,12 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.curator.test.TestingServer; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStoreTestBase.TestDispatcher; import org.apache.hadoop.util.ZKUtil; -import org.apache.zookeeper.CreateMode; -import org.apache.zookeeper.ZooDefs; import org.apache.zookeeper.server.auth.DigestAuthenticationProvider; import org.junit.After; import org.junit.Assert; @@ -38,7 +37,6 @@ import org.junit.Test; import java.security.NoSuchAlgorithmException; import java.util.concurrent.atomic.AtomicBoolean; -import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -90,7 +88,7 @@ public class TestZKRMStateStoreZKClientConnections { public RMStateStore getRMStateStore(Configuration conf) throws Exception { String workingZnode = "/Test"; - conf.set(YarnConfiguration.RM_ZK_ADDRESS, + conf.set(CommonConfigurationKeys.ZK_ADDRESS, testingServer.getConnectString()); conf.set(YarnConfiguration.ZK_RM_STATE_STORE_PARENT_PATH, workingZnode); this.store = new TestZKRMStateStore(conf, workingZnode); @@ -103,8 +101,8 @@ public class TestZKRMStateStoreZKClientConnections { TestZKClient zkClientTester = new TestZKClient(); final String path = "/test"; YarnConfiguration conf = new YarnConfiguration(); - conf.setInt(YarnConfiguration.RM_ZK_TIMEOUT_MS, ZK_TIMEOUT_MS); - conf.setLong(YarnConfiguration.RM_ZK_RETRY_INTERVAL_MS, 100); + conf.setInt(CommonConfigurationKeys.ZK_TIMEOUT_MS, ZK_TIMEOUT_MS); + conf.setLong(CommonConfigurationKeys.ZK_RETRY_INTERVAL_MS, 100); final ZKRMStateStore store = (ZKRMStateStore) zkClientTester.getRMStateStore(conf); TestDispatcher dispatcher = new TestDispatcher(); @@ -133,7 +131,7 @@ public class TestZKRMStateStoreZKClientConnections { public void testSetZKAcl() { TestZKClient zkClientTester = new TestZKClient(); YarnConfiguration conf = new YarnConfiguration(); - conf.set(YarnConfiguration.RM_ZK_ACL, "world:anyone:rwca"); + conf.set(CommonConfigurationKeys.ZK_ACL, "world:anyone:rwca"); try { zkClientTester.store.delete(zkClientTester.store .znodeWorkingPath); @@ -146,7 +144,7 @@ public class TestZKRMStateStoreZKClientConnections { public void testInvalidZKAclConfiguration() { TestZKClient zkClientTester = new TestZKClient(); YarnConfiguration conf = new YarnConfiguration(); - conf.set(YarnConfiguration.RM_ZK_ACL, "randomstring&*"); + conf.set(CommonConfigurationKeys.ZK_ACL, "randomstring&*"); try { zkClientTester.getRMStateStore(conf); fail("ZKRMStateStore created with bad ACL"); @@ -163,10 +161,10 @@ public class TestZKRMStateStoreZKClientConnections { public void testZKAuths() throws Exception { TestZKClient zkClientTester = new TestZKClient(); YarnConfiguration conf = new YarnConfiguration(); - conf.setInt(YarnConfiguration.RM_ZK_NUM_RETRIES, 1); - conf.setInt(YarnConfiguration.RM_ZK_TIMEOUT_MS, ZK_TIMEOUT_MS); - conf.set(YarnConfiguration.RM_ZK_ACL, TEST_ACL); - conf.set(YarnConfiguration.RM_ZK_AUTH, TEST_AUTH_GOOD); + conf.setInt(CommonConfigurationKeys.ZK_NUM_RETRIES, 1); + conf.setInt(CommonConfigurationKeys.ZK_TIMEOUT_MS, ZK_TIMEOUT_MS); + conf.set(CommonConfigurationKeys.ZK_ACL, TEST_ACL); + conf.set(CommonConfigurationKeys.ZK_AUTH, TEST_AUTH_GOOD); zkClientTester.getRMStateStore(conf); } From 95303f50d3467fd9bc64e504c9e64a64f118b25f Mon Sep 17 00:00:00 2001 From: Rohith Sharma K S Date: Tue, 12 Jun 2018 17:14:22 +0530 Subject: [PATCH 088/113] YARN-8413. Flow activity page is failing with 'Timeline server failed with an error'. Contributed by Sunil Govindan. --- .../hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js index 01daa7a6a27..66428697758 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js @@ -105,7 +105,7 @@ function updateConfigs(application) { $.ajax({ type: 'GET', dataType: 'json', - async: true, + async: false, context: this, url: getTimeLineURL(rmhost, isHttpsSchemeEnabled), success: function(data) { From b3612dd90c419c6ac5d59ab0793b4adec40ef5c6 Mon Sep 17 00:00:00 2001 From: Sunil G Date: Tue, 12 Jun 2018 17:40:41 +0530 Subject: [PATCH 089/113] YARN-8419. [UI2] User cannot submit a new service as submit button is always disabled. Contributed by Suma Shivaprasad. --- .../src/main/webapp/app/components/deploy-service.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js index ff939855e24..f73f33fca08 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js @@ -167,7 +167,7 @@ export default Ember.Component.extend({ if (this.get('isLoading')) { return false; } - if (this.get('isUserNameGiven')) { + if (this.get('isSecurityNotEnabled') && this.get('isUserNameGiven')) { return false; } if (this.get('isStandardViewType')) { From aa7614c5f36060a3630b742590845da7efd85caa Mon Sep 17 00:00:00 2001 From: Mukul Kumar Singh Date: Tue, 12 Jun 2018 20:36:23 +0530 Subject: [PATCH 090/113] HDDS-158. DatanodeStateMachine endPoint task throws NullPointerException. Contributed by Nanda Kumar. --- .../endpoint/HeartbeatEndpointTask.java | 12 ++- .../common/report/TestReportPublisher.java | 79 +++++++++++++++++++ 2 files changed, 88 insertions(+), 3 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java index 3986faf37ff..1ee6375a562 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.container.common.states.endpoint; import com.google.common.base.Preconditions; +import com.google.protobuf.Descriptors; import com.google.protobuf.GeneratedMessage; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -125,9 +126,14 @@ public class HeartbeatEndpointTask */ private void addReports(SCMHeartbeatRequestProto.Builder requestBuilder) { for (GeneratedMessage report : context.getAllAvailableReports()) { - requestBuilder.setField( - SCMHeartbeatRequestProto.getDescriptor().findFieldByName( - report.getDescriptorForType().getName()), report); + String reportName = report.getDescriptorForType().getFullName(); + for (Descriptors.FieldDescriptor descriptor : + SCMHeartbeatRequestProto.getDescriptor().getFields()) { + String heartbeatFieldName = descriptor.getMessageType().getFullName(); + if (heartbeatFieldName.equals(reportName)) { + requestBuilder.setField(descriptor, report); + } + } } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java index 067c5624f63..5fd9cf60479 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java @@ -18,13 +18,25 @@ package org.apache.hadoop.ozone.container.common.report; import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.google.protobuf.Descriptors; import com.google.protobuf.GeneratedMessage; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.NodeReportProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.util.concurrent.HadoopExecutors; import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; +import java.util.Random; +import java.util.UUID; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; @@ -103,4 +115,71 @@ public class TestReportPublisher { } + @Test + public void testAddingReportToHeartbeat() { + Configuration conf = new OzoneConfiguration(); + ReportPublisherFactory factory = new ReportPublisherFactory(conf); + ReportPublisher nodeReportPublisher = factory.getPublisherFor( + NodeReportProto.class); + ReportPublisher containerReportPubliser = factory.getPublisherFor( + ContainerReportsProto.class); + GeneratedMessage nodeReport = nodeReportPublisher.getReport(); + GeneratedMessage containerReport = containerReportPubliser.getReport(); + SCMHeartbeatRequestProto.Builder heartbeatBuilder = + SCMHeartbeatRequestProto.newBuilder(); + heartbeatBuilder.setDatanodeDetails( + getDatanodeDetails().getProtoBufMessage()); + addReport(heartbeatBuilder, nodeReport); + addReport(heartbeatBuilder, containerReport); + SCMHeartbeatRequestProto heartbeat = heartbeatBuilder.build(); + Assert.assertTrue(heartbeat.hasNodeReport()); + Assert.assertTrue(heartbeat.hasContainerReport()); + } + + /** + * Get a datanode details. + * + * @return DatanodeDetails + */ + private static DatanodeDetails getDatanodeDetails() { + String uuid = UUID.randomUUID().toString(); + Random random = new Random(); + String ipAddress = + random.nextInt(256) + "." + random.nextInt(256) + "." + random + .nextInt(256) + "." + random.nextInt(256); + + DatanodeDetails.Port containerPort = DatanodeDetails.newPort( + DatanodeDetails.Port.Name.STANDALONE, 0); + DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( + DatanodeDetails.Port.Name.RATIS, 0); + DatanodeDetails.Port restPort = DatanodeDetails.newPort( + DatanodeDetails.Port.Name.REST, 0); + DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); + builder.setUuid(uuid) + .setHostName("localhost") + .setIpAddress(ipAddress) + .addPort(containerPort) + .addPort(ratisPort) + .addPort(restPort); + return builder.build(); + } + + /** + * Adds the report to heartbeat. + * + * @param requestBuilder builder to which the report has to be added. + * @param report the report to be added. + */ + private static void addReport(SCMHeartbeatRequestProto.Builder requestBuilder, + GeneratedMessage report) { + String reportName = report.getDescriptorForType().getFullName(); + for (Descriptors.FieldDescriptor descriptor : + SCMHeartbeatRequestProto.getDescriptor().getFields()) { + String heartbeatFieldName = descriptor.getMessageType().getFullName(); + if (heartbeatFieldName.equals(reportName)) { + requestBuilder.setField(descriptor, report); + } + } + } + } From 5c7ad52573819728ed0b57d1f84258b5fff7c43c Mon Sep 17 00:00:00 2001 From: Anu Engineer Date: Tue, 12 Jun 2018 08:25:18 -0700 Subject: [PATCH 091/113] HDDS-111. Include tests for Rest Client in TestVolume and TestBucket. Contributed by Lokesh Jain. --- .../hadoop/ozone/client/OzoneClientUtils.java | 18 ++++----- .../apache/hadoop/ozone/RatisTestHelper.java | 8 +++- .../hadoop/ozone/ksm/TestKeySpaceManager.java | 11 +++-- .../hadoop/ozone/web/client/TestBuckets.java | 40 +++++++++++++++++-- .../ozone/web/client/TestBucketsRatis.java | 35 +++++++++++++++- .../hadoop/ozone/web/client/TestVolume.java | 36 ++++++++++++++++- .../ozone/web/client/TestVolumeRatis.java | 29 +++++++++++++- .../storage/DistributedStorageHandler.java | 21 +++++----- 8 files changed, 165 insertions(+), 33 deletions(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java index 6be61e227a3..0aaee31ffb9 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java @@ -18,14 +18,12 @@ package org.apache.hadoop.ozone.client; import org.apache.hadoop.hdds.client.OzoneQuota; +import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.rest.response.BucketInfo; import org.apache.hadoop.ozone.client.rest.response.KeyInfo; import org.apache.hadoop.ozone.client.rest.response.VolumeInfo; import org.apache.hadoop.ozone.client.rest.response.VolumeOwner; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; - -import static org.apache.hadoop.ozone.web.utils.OzoneUtils.formatTime; /** A utility class for OzoneClient. */ public final class OzoneClientUtils { @@ -43,7 +41,8 @@ public final class OzoneClientUtils { public static BucketInfo asBucketInfo(OzoneBucket bucket) { BucketInfo bucketInfo = new BucketInfo(bucket.getVolumeName(), bucket.getName()); - bucketInfo.setCreatedOn(OzoneUtils.formatTime(bucket.getCreationTime())); + bucketInfo + .setCreatedOn(HddsClientUtils.formatDateTime(bucket.getCreationTime())); bucketInfo.setStorageType(bucket.getStorageType()); bucketInfo.setVersioning( OzoneConsts.Versioning.getVersioning(bucket.getVersioning())); @@ -60,9 +59,9 @@ public final class OzoneClientUtils { * @return VolumeInfo instance */ public static VolumeInfo asVolumeInfo(OzoneVolume volume) { - VolumeInfo volumeInfo = - new VolumeInfo(volume.getName(), formatTime(volume.getCreationTime()), - volume.getOwner()); + VolumeInfo volumeInfo = new VolumeInfo(volume.getName(), + HddsClientUtils.formatDateTime(volume.getCreationTime()), + volume.getOwner()); volumeInfo.setQuota(OzoneQuota.getOzoneQuota(volume.getQuota())); volumeInfo.setOwner(new VolumeOwner(volume.getOwner())); return volumeInfo; @@ -79,8 +78,9 @@ public final class OzoneClientUtils { public static KeyInfo asKeyInfo(OzoneKey key) { KeyInfo keyInfo = new KeyInfo(); keyInfo.setKeyName(key.getName()); - keyInfo.setCreatedOn(formatTime(key.getCreationTime())); - keyInfo.setModifiedOn(formatTime(key.getModificationTime())); + keyInfo.setCreatedOn(HddsClientUtils.formatDateTime(key.getCreationTime())); + keyInfo.setModifiedOn( + HddsClientUtils.formatDateTime(key.getModificationTime())); keyInfo.setSize(key.getDataSize()); return keyInfo; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java index fce9e77947b..7a9a09a1e1d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java @@ -33,6 +33,7 @@ import org.slf4j.LoggerFactory; import java.io.Closeable; import java.io.IOException; import java.net.URISyntaxException; +import java.util.concurrent.TimeoutException; /** * Helpers for Ratis tests. @@ -54,7 +55,8 @@ public interface RatisTestHelper { * RATIS_ENABLED = true, and * OZONE_HANDLER_TYPE_KEY = "distributed". */ - public RatisTestSuite(final Class clazz) throws IOException { + public RatisTestSuite(final Class clazz) + throws IOException, TimeoutException, InterruptedException { conf = newOzoneConfiguration(clazz, RPC); cluster = newMiniOzoneCluster(NUM_DATANODES, conf); } @@ -99,9 +101,11 @@ public interface RatisTestHelper { } static MiniOzoneCluster newMiniOzoneCluster( - int numDatanodes, OzoneConfiguration conf) throws IOException { + int numDatanodes, OzoneConfiguration conf) + throws IOException, TimeoutException, InterruptedException { final MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numDatanodes).build(); + cluster.waitForClusterToBeReady(); return cluster; } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java index 36e4b866427..8a16bfe86d2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.ksm; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; import org.apache.hadoop.net.NetUtils; @@ -1115,10 +1116,12 @@ public class TestKeySpaceManager { KeyInfo keyInfo = storageHandler.getKeyInfo(keyArgs); // Compare the time in second unit since the date string reparsed to // millisecond will lose precision. - Assert.assertTrue((OzoneUtils.formatDate(keyInfo.getCreatedOn()) - / 1000) >= (currentTime / 1000)); - Assert.assertTrue((OzoneUtils.formatDate(keyInfo.getModifiedOn()) - / 1000) >= (currentTime / 1000)); + Assert.assertTrue( + (HddsClientUtils.formatDateTime(keyInfo.getCreatedOn()) / 1000) >= ( + currentTime / 1000)); + Assert.assertTrue( + (HddsClientUtils.formatDateTime(keyInfo.getModifiedOn()) / 1000) >= ( + currentTime / 1000)); Assert.assertEquals(keyName, keyInfo.getKeyName()); // with out data written, the size would be 0 Assert.assertEquals(0, keyInfo.getSize()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java index 684f4d35a3a..3861699c85e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java @@ -27,23 +27,29 @@ import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.rest.OzoneException; +import org.apache.hadoop.ozone.client.rest.RestClient; import org.apache.hadoop.ozone.client.rpc.RpcClient; import org.apache.hadoop.ozone.web.request.OzoneQuota; import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.junit.AfterClass; +import org.junit.Before; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; import org.junit.rules.Timeout; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; import java.io.IOException; import java.net.URISyntaxException; import java.text.ParseException; import java.util.Arrays; +import java.util.Collection; import java.util.Iterator; import java.util.List; +import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; import static org.junit.Assert.assertEquals; @@ -54,6 +60,7 @@ import static org.junit.Assert.fail; /** * Test Ozone Bucket Lifecycle. */ +@RunWith(value = Parameterized.class) public class TestBuckets { /** * Set the timeout for every test. @@ -63,6 +70,18 @@ public class TestBuckets { private static MiniOzoneCluster cluster = null; private static ClientProtocol client = null; + private static OzoneConfiguration conf; + + @Parameterized.Parameters + public static Collection clientProtocol() { + Object[][] params = new Object[][] { + {RpcClient.class}, + {RestClient.class}}; + return Arrays.asList(params); + } + + @Parameterized.Parameter + public static Class clientProtocol; /** * Create a MiniDFSCluster for testing. @@ -74,9 +93,10 @@ public class TestBuckets { * @throws IOException */ @BeforeClass - public static void init() throws IOException, - URISyntaxException, OzoneException { - OzoneConfiguration conf = new OzoneConfiguration(); + public static void init() + throws IOException, URISyntaxException, OzoneException, TimeoutException, + InterruptedException { + conf = new OzoneConfiguration(); String path = GenericTestUtils .getTempPath(TestBuckets.class.getSimpleName()); @@ -87,7 +107,16 @@ public class TestBuckets { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) .build(); - client = new RpcClient(conf); + cluster.waitForClusterToBeReady(); + } + + @Before + public void setup() throws Exception { + if (clientProtocol.equals(RestClient.class)) { + client = new RestClient(conf); + } else { + client = new RpcClient(conf); + } } /** @@ -260,6 +289,9 @@ public class TestBuckets { @Test public void testListBucket() throws Exception { + if (clientProtocol.equals(RestClient.class)) { + return; + } runTestListBucket(client); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBucketsRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBucketsRatis.java index 9f80184226b..39d2e0ce082 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBucketsRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBucketsRatis.java @@ -17,31 +17,61 @@ */ package org.apache.hadoop.ozone.web.client; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.RatisTestHelper; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.client.rest.OzoneException; +import org.apache.hadoop.ozone.client.rest.RestClient; +import org.apache.hadoop.ozone.client.rpc.RpcClient; import org.junit.AfterClass; +import org.junit.Before; import org.junit.BeforeClass; import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.rules.Timeout; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; /** The same as {@link TestBuckets} except that this test is Ratis enabled. */ @Ignore("Disabling Ratis tests for pipeline work.") +@RunWith(value = Parameterized.class) public class TestBucketsRatis { @Rule public Timeout testTimeout = new Timeout(300000); private static RatisTestHelper.RatisTestSuite suite; private static ClientProtocol client; + private static OzoneConfiguration conf; + + @Parameterized.Parameters + public static Collection clientProtocol() { + Object[][] params = new Object[][] { + {RpcClient.class}, + {RestClient.class}}; + return Arrays.asList(params); + } + + @Parameterized.Parameter + public static Class clientProtocol; @BeforeClass public static void init() throws Exception { suite = new RatisTestHelper.RatisTestSuite(TestBucketsRatis.class); - client = suite.newOzoneClient(); + conf = suite.getConf(); + } + + @Before + public void setup() throws Exception { + if (clientProtocol.equals(RestClient.class)) { + client = new RestClient(conf); + } else { + client = new RpcClient(conf); + } } @AfterClass @@ -72,6 +102,9 @@ public class TestBucketsRatis { } @Test public void testListBucket() throws Exception { + if (clientProtocol.equals(RestClient.class)) { + return; + } TestBuckets.runTestListBucket(client); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java index 14fc5e3661c..1c58c9bd8a2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; +import org.apache.hadoop.ozone.client.rest.RestClient; import org.apache.hadoop.ozone.client.rpc.RpcClient; import org.apache.hadoop.ozone.client.rest.OzoneException; import org.apache.hadoop.ozone.client.OzoneVolume; @@ -35,14 +36,19 @@ import org.apache.hadoop.util.Time; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.junit.AfterClass; +import org.junit.Before; import org.junit.BeforeClass; import org.junit.Assert; import org.junit.Test; import org.junit.Ignore; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; import java.io.File; import java.io.IOException; import java.text.ParseException; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.junit.Assert.assertEquals; @@ -53,9 +59,22 @@ import static org.junit.Assert.assertTrue; /** * Test Ozone Volumes Lifecycle. */ +@RunWith(value = Parameterized.class) public class TestVolume { private static MiniOzoneCluster cluster = null; private static ClientProtocol client = null; + private static OzoneConfiguration conf; + + @Parameterized.Parameters + public static Collection clientProtocol() { + Object[][] params = new Object[][] { + {RpcClient.class}, + {RestClient.class}}; + return Arrays.asList(params); + } + + @Parameterized.Parameter + public Class clientProtocol; /** * Create a MiniDFSCluster for testing. @@ -68,7 +87,7 @@ public class TestVolume { */ @BeforeClass public static void init() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); + conf = new OzoneConfiguration(); String path = GenericTestUtils .getTempPath(TestVolume.class.getSimpleName()); @@ -81,8 +100,15 @@ public class TestVolume { cluster = MiniOzoneCluster.newBuilder(conf).build(); cluster.waitForClusterToBeReady(); + } - client = new RpcClient(conf); + @Before + public void setup() throws Exception { + if (clientProtocol.equals(RestClient.class)) { + client = new RestClient(conf); + } else { + client = new RpcClient(conf); + } } /** @@ -202,6 +228,9 @@ public class TestVolume { @Test public void testListVolume() throws OzoneException, IOException { + if (clientProtocol.equals(RestClient.class)) { + return; + } runTestListVolume(client); } @@ -286,6 +315,9 @@ public class TestVolume { @Test public void testListVolumes() throws Exception { + if (clientProtocol.equals(RestClient.class)) { + return; + } runTestListVolumes(client); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java index dcb4030c24d..d6783ad8e5c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java @@ -26,27 +26,45 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.client.rest.OzoneException; +import org.apache.hadoop.ozone.client.rest.RestClient; import org.apache.hadoop.ozone.client.rpc.RpcClient; import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.junit.*; import org.junit.rules.Timeout; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; import java.io.File; import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; /** The same as {@link TestVolume} except that this test is Ratis enabled. */ @Ignore("Disabling Ratis tests for pipeline work.") +@RunWith(value = Parameterized.class) public class TestVolumeRatis { @Rule public Timeout testTimeout = new Timeout(300000); private static ClientProtocol client; private static MiniOzoneCluster cluster; + private static OzoneConfiguration conf; + + @Parameterized.Parameters + public static Collection clientProtocol() { + Object[][] params = new Object[][] { + {RpcClient.class}, + {RestClient.class}}; + return Arrays.asList(params); + } + + @Parameterized.Parameter + public Class clientProtocol; @BeforeClass public static void init() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); + conf = new OzoneConfiguration(); // This enables Ratis in the cluster. conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true); @@ -66,8 +84,15 @@ public class TestVolumeRatis { final int port = cluster.getHddsDatanodes().get(0) .getDatanodeDetails() .getPort(DatanodeDetails.Port.Name.REST).getValue(); + } - client = new RpcClient(conf); + @Before + public void setup() throws Exception { + if (clientProtocol.equals(RestClient.class)) { + client = new RestClient(conf); + } else { + client = new RpcClient(conf); + } } @AfterClass diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java index 45270ea4a0f..fedc0f007a0 100644 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java +++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.web.storage; import com.google.common.base.Strings; +import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ozone.client.io.LengthInputStream; import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs; @@ -55,7 +56,6 @@ import org.apache.hadoop.ozone.web.interfaces.StorageHandler; import org.apache.hadoop.ozone.web.response.ListVolumes; import org.apache.hadoop.ozone.web.response.VolumeInfo; import org.apache.hadoop.ozone.web.response.VolumeOwner; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.ozone.web.response.ListBuckets; import org.apache.hadoop.ozone.web.response.BucketInfo; import org.apache.hadoop.ozone.web.response.KeyInfo; @@ -209,7 +209,8 @@ public final class DistributedStorageHandler implements StorageHandler { info.setOwner(new VolumeOwner(infoProto.getOwnerName())); info.setQuota(OzoneQuota.getOzoneQuota(infoProto.getQuotaInBytes())); info.setVolumeName(infoProto.getVolume()); - info.setCreatedOn(OzoneUtils.formatTime(infoProto.getCreationTime())); + info.setCreatedOn( + HddsClientUtils.formatDateTime(infoProto.getCreationTime())); result.addVolume(info); } @@ -233,7 +234,8 @@ public final class DistributedStorageHandler implements StorageHandler { volumeArgs.getAdminName()); volInfo.setOwner(new VolumeOwner(volumeArgs.getOwnerName())); volInfo.setQuota(OzoneQuota.getOzoneQuota(volumeArgs.getQuotaInBytes())); - volInfo.setCreatedOn(OzoneUtils.formatTime(volumeArgs.getCreationTime())); + volInfo.setCreatedOn( + HddsClientUtils.formatDateTime(volumeArgs.getCreationTime())); return volInfo; } @@ -363,7 +365,8 @@ public final class DistributedStorageHandler implements StorageHandler { bk.setBucketName(bucketInfo.getBucketName()); bk.setStorageType(bucketInfo.getStorageType()); bk.setAcls(bucketInfo.getAcls()); - bk.setCreatedOn(OzoneUtils.formatTime(bucketInfo.getCreationTime())); + bk.setCreatedOn( + HddsClientUtils.formatDateTime(bucketInfo.getCreationTime())); result.addBucket(bk); } return result; @@ -391,7 +394,7 @@ public final class DistributedStorageHandler implements StorageHandler { bucketInfo.setStorageType(ksmBucketInfo.getStorageType()); bucketInfo.setAcls(ksmBucketInfo.getAcls()); bucketInfo.setCreatedOn( - OzoneUtils.formatTime(ksmBucketInfo.getCreationTime())); + HddsClientUtils.formatDateTime(ksmBucketInfo.getCreationTime())); return bucketInfo; } @@ -481,9 +484,9 @@ public final class DistributedStorageHandler implements StorageHandler { keyInfo.setKeyName(ksmKeyInfo.getKeyName()); keyInfo.setSize(ksmKeyInfo.getDataSize()); keyInfo.setCreatedOn( - OzoneUtils.formatTime(ksmKeyInfo.getCreationTime())); + HddsClientUtils.formatDateTime(ksmKeyInfo.getCreationTime())); keyInfo.setModifiedOn( - OzoneUtils.formatTime(ksmKeyInfo.getModificationTime())); + HddsClientUtils.formatDateTime(ksmKeyInfo.getModificationTime())); return keyInfo; } @@ -524,9 +527,9 @@ public final class DistributedStorageHandler implements StorageHandler { tempInfo.setKeyName(info.getKeyName()); tempInfo.setSize(info.getDataSize()); tempInfo.setCreatedOn( - OzoneUtils.formatTime(info.getCreationTime())); + HddsClientUtils.formatDateTime(info.getCreationTime())); tempInfo.setModifiedOn( - OzoneUtils.formatTime(info.getModificationTime())); + HddsClientUtils.formatDateTime(info.getModificationTime())); result.addKey(tempInfo); } From 652bcbb3e4950758e61ce123ecc1798ae2b60a7f Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Wed, 13 Jun 2018 00:34:57 +0900 Subject: [PATCH 092/113] YARN-8363. Upgrade commons-lang version to 3.7 in hadoop-yarn-project. Contributed by Takanobu Asanuma. --- .../hadoop-yarn/hadoop-yarn-api/pom.xml | 4 -- .../GetApplicationsRequest.java | 22 +++--- .../hadoop/yarn/api/records/Resource.java | 2 +- .../yarn/util/resource/ResourceUtils.java | 4 +- .../pom.xml | 4 -- .../applications/distributedshell/Client.java | 2 +- .../yarn/service/client/ApiServiceClient.java | 2 +- .../hadoop-yarn-services-core/pom.xml | 5 -- .../service/api/records/Configuration.java | 2 +- .../yarn/service/client/ServiceClient.java | 2 +- .../yarn/service/monitor/probe/Probe.java | 2 +- .../provider/AbstractClientProvider.java | 2 +- .../provider/AbstractProviderService.java | 2 +- .../provider/docker/DockerClientProvider.java | 2 +- .../docker/DockerProviderService.java | 2 +- .../tarball/TarballClientProvider.java | 2 +- .../yarn/service/utils/ServiceApiUtil.java | 2 +- .../yarn/service/utils/ServiceUtils.java | 4 +- .../hadoop-yarn/hadoop-yarn-client/pom.xml | 4 -- .../hadoop/yarn/client/cli/ClusterCLI.java | 2 +- .../hadoop/yarn/client/cli/LogsCLI.java | 2 +- .../hadoop/yarn/client/cli/NodeCLI.java | 4 +- .../hadoop/yarn/client/cli/RMAdminCLI.java | 2 +- .../apache/hadoop/yarn/client/cli/TopCLI.java | 6 +- .../hadoop/yarn/client/cli/TestLogsCLI.java | 2 +- .../hadoop/yarn/client/cli/TestYarnCLI.java | 2 +- .../hadoop-yarn/hadoop-yarn-common/pom.xml | 4 -- .../impl/pb/GetApplicationsRequestPBImpl.java | 30 ++++---- .../yarn/logaggregation/LogCLIHelpers.java | 2 +- .../yarn/logaggregation/LogToolUtils.java | 2 +- .../LogAggregationFileController.java | 2 +- .../LogAggregationFileControllerFactory.java | 2 +- .../LogAggregationIndexedFileController.java | 2 +- .../nodelabels/CommonNodeLabelsManager.java | 2 +- .../hadoop/yarn/nodelabels/RMNodeLabel.java | 2 +- .../org/apache/hadoop/yarn/state/Graph.java | 2 +- .../Log4jWarningErrorMetricsAppender.java | 2 +- .../yarn/util/ProcfsBasedProcessTree.java | 2 +- .../org/apache/hadoop/yarn/webapp/Router.java | 2 +- .../apache/hadoop/yarn/webapp/WebApps.java | 2 +- .../hadoop/yarn/webapp/hamlet/HamletImpl.java | 6 +- .../yarn/webapp/hamlet2/HamletImpl.java | 6 +- .../hadoop/yarn/webapp/view/JQueryUI.java | 6 +- .../hadoop/yarn/webapp/view/TextView.java | 6 +- .../yarn/api/BasePBImplRecordsTest.java | 8 ++- .../yarn/api/TestGetApplicationsRequest.java | 7 +- .../hadoop/yarn/api/TestPBImplRecords.java | 4 +- .../TestCommonNodeLabelsManager.java | 2 +- .../apache/hadoop/yarn/webapp/TestWebApp.java | 2 +- .../hadoop-yarn/hadoop-yarn-registry/pom.xml | 5 -- .../client/api/RegistryOperationsFactory.java | 2 +- .../registry/client/binding/JsonSerDeser.java | 2 +- .../client/binding/RegistryUtils.java | 2 +- .../impl/FSRegistryOperationsService.java | 6 +- .../client/impl/zk/RegistrySecurity.java | 2 +- .../registry/server/dns/ReverseZoneUtils.java | 2 +- .../integration/SelectByYarnPersistence.java | 2 +- .../services/MicroZookeeperService.java | 2 +- .../server/services/RegistryAdminService.java | 2 +- .../hadoop/registry/RegistryTestHelper.java | 2 +- .../ApplicationHistoryClientService.java | 4 +- .../yarn/server/timeline/RollingLevelDB.java | 2 +- .../timeline/RollingLevelDBTimelineStore.java | 2 +- .../webapp/TestAHSWebServices.java | 2 +- .../store/impl/SQLFederationStateStore.java | 6 +- .../utils/FederationStateStoreFacade.java | 4 +- .../OpportunisticContainerAllocator.java | 2 +- .../uam/UnmanagedApplicationManager.java | 2 +- .../util/timeline/TimelineServerUtils.java | 2 +- .../yarn/server/webapp/AppAttemptBlock.java | 6 +- .../hadoop/yarn/server/webapp/AppBlock.java | 4 +- .../hadoop/yarn/server/webapp/AppsBlock.java | 14 ++-- .../yarn/server/webapp/WebServices.java | 4 +- .../hadoop-yarn-server-nodemanager/pom.xml | 4 -- .../nodemanager/DefaultContainerExecutor.java | 2 +- .../WindowsSecureContainerExecutor.java | 2 +- .../container/ContainerImpl.java | 2 +- .../PrivilegedOperationExecutor.java | 2 +- .../linux/resources/DefaultOOMHandler.java | 2 +- .../executor/ContainerSignalContext.java | 2 +- .../nodemanager/webapp/NMWebServices.java | 2 +- .../launcher/TestContainerLaunch.java | 48 ++++++------- .../TestLogAggregationService.java | 2 +- .../TestNonAggregatingLogHandler.java | 4 +- .../pom.xml | 4 -- .../resourcemanager/ClientRMService.java | 12 ++-- .../ProportionalCapacityPreemptionPolicy.java | 2 +- .../placement/QueuePlacementRuleUtils.java | 2 +- .../UserGroupMappingPlacementRule.java | 2 +- .../resourcemanager/rmapp/RMAppImpl.java | 2 +- .../rmapp/attempt/RMAppAttemptMetrics.java | 2 +- .../SchedulerApplicationAttempt.java | 6 +- .../scheduler/SchedulerUtils.java | 2 +- .../scheduler/capacity/AbstractCSQueue.java | 2 +- .../scheduler/capacity/CapacityScheduler.java | 4 +- .../scheduler/capacity/LeafQueue.java | 2 +- .../scheduler/capacity/ParentQueue.java | 2 +- .../scheduler/capacity/QueueCapacities.java | 2 +- .../allocator/RegularContainerAllocator.java | 2 +- .../constraint/AllocationTagsManager.java | 2 +- .../algorithm/LocalAllocationTagsManager.java | 2 +- .../webapp/FairSchedulerAppsBlock.java | 10 +-- .../webapp/RMAppAttemptBlock.java | 8 +-- .../resourcemanager/webapp/RMAppBlock.java | 4 +- .../resourcemanager/webapp/RMAppsBlock.java | 14 ++-- .../webapp/dao/AppAttemptInfo.java | 2 +- .../TestContainerResourceUsage.java | 2 +- .../TestRMAppAttemptImplDiagnostics.java | 2 +- ...CapacitySchedulerAutoCreatedQueueBase.java | 4 +- .../clientrm/FederationClientInterceptor.java | 58 +++++++-------- .../yarn/server/router/webapp/AppsBlock.java | 6 +- .../webapp/FederationInterceptorREST.java | 70 +++++++++---------- .../pom.xml | 5 -- .../storage/TimelineSchemaCreator.java | 2 +- .../pom.xml | 5 -- .../reader/TimelineReaderUtils.java | 2 +- .../TimelineReaderWebServicesUtils.java | 2 +- 117 files changed, 281 insertions(+), 318 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml index 8750d58fcec..3994104ff69 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml @@ -34,10 +34,6 @@ - - commons-lang - commons-lang - com.google.guava guava diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java index 5cc9cc12d71..a52b4059126 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java @@ -21,7 +21,7 @@ package org.apache.hadoop.yarn.api.protocolrecords; import java.util.EnumSet; import java.util.Set; -import org.apache.commons.lang.math.LongRange; +import org.apache.commons.lang3.Range; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; @@ -80,8 +80,8 @@ public abstract class GetApplicationsRequest { Set applicationTypes, Set applicationTags, EnumSet applicationStates, - LongRange startRange, - LongRange finishRange, + Range startRange, + Range finishRange, Long limit) { GetApplicationsRequest request = Records.newRecord(GetApplicationsRequest.class); @@ -95,11 +95,11 @@ public abstract class GetApplicationsRequest { request.setApplicationStates(applicationStates); if (startRange != null) { request.setStartRange( - startRange.getMinimumLong(), startRange.getMaximumLong()); + startRange.getMinimum(), startRange.getMaximum()); } if (finishRange != null) { request.setFinishRange( - finishRange.getMinimumLong(), finishRange.getMaximumLong()); + finishRange.getMinimum(), finishRange.getMaximum()); } if (limit != null) { request.setLimit(limit); @@ -302,11 +302,11 @@ public abstract class GetApplicationsRequest { /** * Get the range of start times to filter applications on * - * @return {@link LongRange} of start times to filter applications on + * @return {@link Range} of start times to filter applications on */ @Private @Unstable - public abstract LongRange getStartRange(); + public abstract Range getStartRange(); /** * Set the range of start times to filter applications on @@ -315,7 +315,7 @@ public abstract class GetApplicationsRequest { */ @Private @Unstable - public abstract void setStartRange(LongRange range); + public abstract void setStartRange(Range range); /** * Set the range of start times to filter applications on @@ -332,11 +332,11 @@ public abstract class GetApplicationsRequest { /** * Get the range of finish times to filter applications on * - * @return {@link LongRange} of finish times to filter applications on + * @return {@link Range} of finish times to filter applications on */ @Private @Unstable - public abstract LongRange getFinishRange(); + public abstract Range getFinishRange(); /** * Set the range of finish times to filter applications on @@ -345,7 +345,7 @@ public abstract class GetApplicationsRequest { */ @Private @Unstable - public abstract void setFinishRange(LongRange range); + public abstract void setFinishRange(Range range); /** * Set the range of finish times to filter applications on diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java index 78a691be61a..71a6b54352b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java @@ -23,7 +23,7 @@ import java.util.Arrays; import java.util.List; import java.util.Map; -import org.apache.commons.lang.NotImplementedException; +import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java index f211f49ff05..3dbd609b534 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java @@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.util.resource; import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; import org.apache.hadoop.yarn.api.records.Resource; @@ -457,7 +457,7 @@ public class ResourceUtils { } String units = resourceValue.substring(i); - if((StringUtils.isAlpha(units))) { + if (StringUtils.isAlpha(units) || units.equals("")) { resource[0] = units; resource[1] = resourceValue.substring(0, i); return resource; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml index f3a8712a031..7cb7ac7323f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml @@ -49,10 +49,6 @@ log4j log4j - - commons-lang - commons-lang - com.google.guava guava diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java index 976e6a33362..c8a71b320c0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java @@ -39,7 +39,7 @@ import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.apache.commons.io.IOUtils; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java index 18d45fae781..9232fc81f66 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java @@ -27,7 +27,7 @@ import java.util.Map; import javax.ws.rs.core.MediaType; import com.google.common.base.Preconditions; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml index 7efe8bd8200..80a03f9a95b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml @@ -197,11 +197,6 @@ commons-io - - commons-lang - commons-lang - - org.apache.curator curator-client diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Configuration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Configuration.java index 859c1ea7cd0..3ff020f24be 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Configuration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Configuration.java @@ -21,7 +21,7 @@ import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import io.swagger.annotations.ApiModel; import io.swagger.annotations.ApiModelProperty; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.yarn.service.utils.ServiceUtils; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java index 3f6e8966e4f..b86120f21e6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java @@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.service.client; import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.curator.framework.CuratorFramework; import org.apache.curator.framework.CuratorFrameworkFactory; import org.apache.curator.retry.RetryNTimes; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/Probe.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/Probe.java index 341a0c8f46d..78c79c7535b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/Probe.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/Probe.java @@ -17,7 +17,7 @@ package org.apache.hadoop.yarn.service.monitor.probe; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; import java.io.IOException; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java index d16c698d6a6..672c4352433 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service.provider; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java index 6d213c86652..9c71e66823d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.service.provider; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.records.Container; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerClientProvider.java index d4a2254ff42..f91742edb36 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerClientProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerClientProvider.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.service.provider.docker; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.yarn.service.api.records.Artifact; import org.apache.hadoop.yarn.service.api.records.ConfigFile; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java index 071b30a29f1..7844621b4ae 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.service.provider.docker; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; import org.apache.hadoop.yarn.service.provider.AbstractProviderService; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballClientProvider.java index 01f7b209aae..3b890fd85a6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballClientProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballClientProvider.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.service.provider.tarball; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.service.api.records.Artifact; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java index 6101bf01363..549927327d6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java @@ -23,7 +23,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.Multimap; import com.google.common.collect.Sets; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java index 707bbf06264..0f0b3265e2d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java @@ -21,8 +21,8 @@ package org.apache.hadoop.yarn.service.utils; import com.google.common.base.Preconditions; import org.apache.commons.compress.archivers.tar.TarArchiveEntry; import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; -import org.apache.commons.lang.ArrayUtils; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.ArrayUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml index 37e5d73f89d..913c3832249 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml @@ -46,10 +46,6 @@ commons-logging commons-logging - - commons-lang - commons-lang - commons-cli commons-cli diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java index b930014f95d..a29b0db7362 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java @@ -32,7 +32,7 @@ import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.MissingArgumentException; import org.apache.commons.cli.Options; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.ToolRunner; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java index 6953a4d89d2..cfd4c79fe7b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java @@ -59,7 +59,7 @@ import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.apache.commons.io.IOUtils; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.math3.util.Pair; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java index 288a5d2c50d..e9253eb6909 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java @@ -35,8 +35,8 @@ import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.MissingArgumentException; import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; -import org.apache.commons.lang.StringUtils; -import org.apache.commons.lang.time.DateFormatUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.time.DateFormatUtils; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.util.ToolRunner; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java index 027a786db33..8d1d56b3f93 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java @@ -36,7 +36,7 @@ import org.apache.commons.cli.MissingArgumentException; import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java index d6c33f4f5a8..b890bee216f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java @@ -52,9 +52,9 @@ import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.apache.commons.io.IOUtils; -import org.apache.commons.lang.StringUtils; -import org.apache.commons.lang.time.DateFormatUtils; -import org.apache.commons.lang.time.DurationFormatUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.time.DateFormatUtils; +import org.apache.commons.lang3.time.DurationFormatUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.HttpConfig.Policy; import org.apache.hadoop.security.UserGroupInformation; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java index c292430bb1c..d8440b88205 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java @@ -54,7 +54,7 @@ import java.util.List; import java.util.Map; import java.util.Set; import org.apache.commons.io.IOUtils; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java index fc2eeab8333..518cd1cc4f4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java @@ -47,7 +47,7 @@ import java.util.Set; import java.util.regex.Pattern; import org.apache.commons.cli.Options; -import org.apache.commons.lang.time.DateFormatUtils; +import org.apache.commons.lang3.time.DateFormatUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsRequest; import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsResponse; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml index a25c5244eb8..3338052c2f5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml @@ -65,10 +65,6 @@ org.apache.commons commons-compress - - commons-lang - commons-lang - javax.servlet javax.servlet-api diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java index ad009d6f42d..a6abb99b439 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java @@ -23,7 +23,7 @@ import java.util.HashSet; import java.util.List; import java.util.Set; -import org.apache.commons.lang.math.LongRange; +import org.apache.commons.lang3.Range; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.util.StringUtils; @@ -51,8 +51,8 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest { Set users = null; Set queues = null; long limit = Long.MAX_VALUE; - LongRange start = null; - LongRange finish = null; + Range start = null; + Range finish = null; private Set applicationTags; private ApplicationsRequestScope scope; @@ -103,12 +103,12 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest { builder.setScope(ProtoUtils.convertToProtoFormat(scope)); } if (start != null) { - builder.setStartBegin(start.getMinimumLong()); - builder.setStartEnd(start.getMaximumLong()); + builder.setStartBegin(start.getMinimum()); + builder.setStartEnd(start.getMaximum()); } if (finish != null) { - builder.setFinishBegin(finish.getMinimumLong()); - builder.setFinishEnd(finish.getMaximumLong()); + builder.setFinishBegin(finish.getMinimum()); + builder.setFinishEnd(finish.getMaximum()); } if (limit != Long.MAX_VALUE) { builder.setLimit(limit); @@ -316,20 +316,20 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest { } @Override - public LongRange getStartRange() { + public Range getStartRange() { if (this.start == null) { GetApplicationsRequestProtoOrBuilder p = viaProto ? proto: builder; if (p.hasStartBegin() || p.hasStartEnd()) { long begin = p.hasStartBegin() ? p.getStartBegin() : 0L; long end = p.hasStartEnd() ? p.getStartEnd() : Long.MAX_VALUE; - this.start = new LongRange(begin, end); + this.start = Range.between(begin, end); } } return this.start; } @Override - public void setStartRange(LongRange range) { + public void setStartRange(Range range) { this.start = range; } @@ -340,24 +340,24 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest { throw new IllegalArgumentException("begin > end in range (begin, " + "end): (" + begin + ", " + end + ")"); } - this.start = new LongRange(begin, end); + this.start = Range.between(begin, end); } @Override - public LongRange getFinishRange() { + public Range getFinishRange() { if (this.finish == null) { GetApplicationsRequestProtoOrBuilder p = viaProto ? proto: builder; if (p.hasFinishBegin() || p.hasFinishEnd()) { long begin = p.hasFinishBegin() ? p.getFinishBegin() : 0L; long end = p.hasFinishEnd() ? p.getFinishEnd() : Long.MAX_VALUE; - this.finish = new LongRange(begin, end); + this.finish = Range.between(begin, end); } } return this.finish; } @Override - public void setFinishRange(LongRange range) { + public void setFinishRange(Range range) { this.finish = range; } @@ -367,7 +367,7 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest { throw new IllegalArgumentException("begin > end in range (begin, " + "end): (" + begin + ", " + end + ")"); } - this.finish = new LongRange(begin, end); + this.finish = Range.between(begin, end); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java index 887d92d8702..9dae7b9ce26 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java @@ -27,7 +27,7 @@ import java.util.HashSet; import java.util.List; import java.util.Set; import org.apache.commons.io.IOUtils; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogToolUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogToolUtils.java index 90faa19e261..a6cc159f85a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogToolUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogToolUtils.java @@ -28,7 +28,7 @@ import java.nio.channels.WritableByteChannel; import java.nio.charset.Charset; import java.nio.file.Files; import java.nio.file.Paths; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.Path; /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java index d342e3fba9b..5005b39f2f4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java @@ -34,7 +34,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileControllerFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileControllerFactory.java index 746bf5a4c11..cf40209de1e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileControllerFactory.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileControllerFactory.java @@ -28,7 +28,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.regex.Pattern; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java index a8ae06f876c..59b8e2c0b32 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java @@ -43,7 +43,7 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; -import org.apache.commons.lang.SerializationUtils; +import org.apache.commons.lang3.SerializationUtils; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java index 66e945fc2ff..612b7010d65 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java @@ -37,7 +37,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; import java.util.regex.Pattern; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/RMNodeLabel.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/RMNodeLabel.java index feeeaf1134e..693a58a772c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/RMNodeLabel.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/RMNodeLabel.java @@ -21,7 +21,7 @@ package org.apache.hadoop.yarn.nodelabels; import java.util.HashSet; import java.util.Set; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeLabel; import org.apache.hadoop.yarn.api.records.Resource; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/Graph.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/Graph.java index 6fb9119e21d..ab884fa6037 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/Graph.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/Graph.java @@ -26,7 +26,7 @@ import java.util.HashSet; import java.util.List; import java.util.Set; -import org.apache.commons.lang.StringEscapeUtils; +import org.apache.commons.lang3.StringEscapeUtils; import org.apache.hadoop.classification.InterfaceAudience.Private; @Private diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Log4jWarningErrorMetricsAppender.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Log4jWarningErrorMetricsAppender.java index 0366ae0353d..9f5de8c1517 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Log4jWarningErrorMetricsAppender.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Log4jWarningErrorMetricsAppender.java @@ -153,7 +153,7 @@ public class Log4jWarningErrorMetricsAppender extends AppenderSkeleton { if (throwableStr != null) { message = message + "\n" + StringUtils.join("\n", throwableStr); message = - org.apache.commons.lang.StringUtils.left(message, MAX_MESSAGE_SIZE); + org.apache.commons.lang3.StringUtils.left(message, MAX_MESSAGE_SIZE); } int level = event.getLevel().toInt(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java index 55be001443b..9ae890a02db 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java @@ -42,7 +42,7 @@ import org.apache.commons.io.IOUtils; import org.apache.commons.io.filefilter.AndFileFilter; import org.apache.commons.io.filefilter.DirectoryFileFilter; import org.apache.commons.io.filefilter.RegexFileFilter; -import org.apache.commons.lang.ArrayUtils; +import org.apache.commons.lang3.ArrayUtils; import org.apache.commons.lang3.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Router.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Router.java index f2eca041763..b1c78344213 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Router.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Router.java @@ -31,7 +31,7 @@ import java.util.Map; import java.util.TreeMap; import java.util.regex.Pattern; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java index 73644452140..0d045f36a9d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java @@ -32,7 +32,7 @@ import java.util.Map; import javax.servlet.http.HttpServlet; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration.IntegerRanges; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java index 289ad704cc5..1562b1e4aa0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java @@ -28,7 +28,7 @@ import java.util.EnumSet; import static java.util.EnumSet.*; import java.util.Iterator; -import static org.apache.commons.lang.StringEscapeUtils.*; +import static org.apache.commons.lang3.StringEscapeUtils.*; import static org.apache.hadoop.yarn.webapp.hamlet.HamletImpl.EOpt.*; import org.apache.hadoop.classification.InterfaceAudience; @@ -106,7 +106,7 @@ public class HamletImpl extends HamletSpec { if (!opts.contains(PRE)) { indent(opts); } - out.print(quote ? escapeHtml(String.valueOf(s)) + out.print(quote ? escapeHtml4(String.valueOf(s)) : String.valueOf(s)); if (!opts.contains(INLINE) && !opts.contains(PRE)) { out.println(); @@ -309,7 +309,7 @@ public class HamletImpl extends HamletSpec { sb.setLength(0); sb.append(' ').append(name); if (value != null) { - sb.append("=\"").append(escapeHtml(value)).append("\""); + sb.append("=\"").append(escapeHtml4(value)).append("\""); } out.print(sb.toString()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletImpl.java index 995e9fb4912..1fcab23803b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletImpl.java @@ -28,7 +28,7 @@ import java.util.EnumSet; import static java.util.EnumSet.*; import java.util.Iterator; -import static org.apache.commons.lang.StringEscapeUtils.*; +import static org.apache.commons.lang3.StringEscapeUtils.*; import static org.apache.hadoop.yarn.webapp.hamlet2.HamletImpl.EOpt.*; import org.apache.hadoop.classification.InterfaceAudience; @@ -104,7 +104,7 @@ public class HamletImpl extends HamletSpec { if (!opts.contains(PRE)) { indent(opts); } - out.print(quote ? escapeHtml(String.valueOf(s)) + out.print(quote ? escapeHtml4(String.valueOf(s)) : String.valueOf(s)); if (!opts.contains(INLINE) && !opts.contains(PRE)) { out.println(); @@ -307,7 +307,7 @@ public class HamletImpl extends HamletSpec { sb.setLength(0); sb.append(' ').append(name); if (value != null) { - sb.append("=\"").append(escapeHtml(value)).append("\""); + sb.append("=\"").append(escapeHtml4(value)).append("\""); } out.print(sb.toString()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java index 46c76d92d22..dba19c9d31b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.webapp.view; -import static org.apache.commons.lang.StringEscapeUtils.escapeJavaScript; +import static org.apache.commons.lang3.StringEscapeUtils.escapeEcmaScript; import static org.apache.hadoop.yarn.util.StringHelper.djoin; import static org.apache.hadoop.yarn.util.StringHelper.join; import static org.apache.hadoop.yarn.util.StringHelper.split; @@ -146,7 +146,7 @@ public class JQueryUI extends HtmlBlock { } int pos = init.indexOf('{') + 1; init = new StringBuffer(init).insert(pos, stateSaveInit).toString(); - list.add(join(" $('", escapeJavaScript(selector), "').dataTable(", init, + list.add(join(" $('", escapeEcmaScript(selector), "').dataTable(", init, ").fnSetFilteringDelay(288);")); } @@ -174,7 +174,7 @@ public class JQueryUI extends HtmlBlock { if (init.isEmpty()) { init = defaultInit; } - list.add(join(" $('", escapeJavaScript(selector), + list.add(join(" $('", escapeEcmaScript(selector), "').click(function() { $(this).children('.dialog').dialog(", init, "); return false; });")); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TextView.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TextView.java index 7a36c5a8a89..e67f9605bfd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TextView.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TextView.java @@ -20,7 +20,7 @@ package org.apache.hadoop.yarn.webapp.view; import java.io.PrintWriter; -import org.apache.commons.lang.StringEscapeUtils; +import org.apache.commons.lang3.StringEscapeUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.yarn.webapp.View; @@ -46,8 +46,8 @@ public abstract class TextView extends View { public void echo(Object... args) { PrintWriter out = writer(); for (Object s : args) { - String escapedString = StringEscapeUtils.escapeJavaScript( - StringEscapeUtils.escapeHtml(s.toString())); + String escapedString = StringEscapeUtils.escapeEcmaScript( + StringEscapeUtils.escapeHtml4(s.toString())); out.print(escapedString); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java index ebd66af2618..bbb784031a1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java @@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.api; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Sets; +import org.apache.commons.lang3.Range; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.yarn.api.resource.PlacementConstraint; @@ -101,7 +102,7 @@ public class BasePBImplRecordsTest { ParameterizedType pt = (ParameterizedType)type; Type rawType = pt.getRawType(); Type [] params = pt.getActualTypeArguments(); - // only support EnumSet, List, Set, Map + // only support EnumSet, List, Set, Map, Range if (rawType.equals(EnumSet.class)) { if (params[0] instanceof Class) { Class c = (Class)(params[0]); @@ -115,6 +116,11 @@ public class BasePBImplRecordsTest { Map map = Maps.newHashMap(); map.put(genTypeValue(params[0]), genTypeValue(params[1])); ret = map; + } else if (rawType.equals(Range.class)) { + ret = typeValueCache.get(rawType); + if (ret != null) { + return ret; + } } } if (ret == null) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestGetApplicationsRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestGetApplicationsRequest.java index 3d95a0fb573..c46c2bc0a9b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestGetApplicationsRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestGetApplicationsRequest.java @@ -21,7 +21,7 @@ import java.util.EnumSet; import java.util.HashSet; import java.util.Set; -import org.apache.commons.lang.math.LongRange; +import org.apache.commons.lang3.Range; import org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationsRequestPBImpl; @@ -90,11 +90,12 @@ public class TestGetApplicationsRequest { Assert.assertEquals( "StartRange from proto is not the same with original request", - requestFromProto.getStartRange(), new LongRange(startBegin, startEnd)); + requestFromProto.getStartRange(), Range.between(startBegin, startEnd)); Assert.assertEquals( "FinishRange from proto is not the same with original request", - requestFromProto.getFinishRange(), new LongRange(finishBegin, finishEnd)); + requestFromProto.getFinishRange(), + Range.between(finishBegin, finishEnd)); Assert.assertEquals( "Limit from proto is not the same with original request", diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java index dfa0c8826bd..4c660c002ff 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.api; import java.io.IOException; -import org.apache.commons.lang.math.LongRange; +import org.apache.commons.lang3.Range; import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto; import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto; @@ -360,7 +360,7 @@ public class TestPBImplRecords extends BasePBImplRecordsTest { @BeforeClass public static void setup() throws Exception { - typeValueCache.put(LongRange.class, new LongRange(1000, 2000)); + typeValueCache.put(Range.class, Range.between(1000L, 2000L)); typeValueCache.put(URL.class, URL.newInstance( "http", "localhost", 8080, "file0")); typeValueCache.put(SerializedException.class, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java index 54e331b71b9..a9894ff7549 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java @@ -27,7 +27,7 @@ import java.util.HashSet; import java.util.Map; import java.util.Set; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeLabel; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java index dea146da282..98b75054268 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java @@ -34,7 +34,7 @@ import java.net.HttpURLConnection; import java.net.URL; import java.net.URLEncoder; -import org.apache.commons.lang.ArrayUtils; +import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.net.ServerSocketUtil; import org.apache.hadoop.yarn.MockApps; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml index 132624b4cc8..4f09190d37d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml @@ -119,11 +119,6 @@ commons-io - - commons-lang - commons-lang - - commons-net commons-net diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java index e74ca81f659..5f9c5f37508 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java @@ -19,7 +19,7 @@ package org.apache.hadoop.registry.client.api; import com.google.common.base.Preconditions; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.ServiceStateException; import org.apache.hadoop.registry.client.impl.RegistryOperationsClient; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/JsonSerDeser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/JsonSerDeser.java index 491bcf9761d..04aabfc635b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/JsonSerDeser.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/JsonSerDeser.java @@ -19,7 +19,7 @@ package org.apache.hadoop.registry.client.binding; import com.fasterxml.jackson.core.JsonProcessingException; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.registry.client.exceptions.InvalidRecordException; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryUtils.java index fcfc5bf570f..1b839c253b1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryUtils.java @@ -20,7 +20,7 @@ package org.apache.hadoop.registry.client.binding; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.PathNotFoundException; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/FSRegistryOperationsService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/FSRegistryOperationsService.java index cfff1bdcbd2..d8cadbfee24 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/FSRegistryOperationsService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/FSRegistryOperationsService.java @@ -24,7 +24,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import org.apache.commons.lang.NotImplementedException; +import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -238,12 +238,12 @@ public class FSRegistryOperationsService extends CompositeService @Override public boolean addWriteAccessor(String id, String pass) throws IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public void clearWriteAccessors() { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java index 5c6c983f5be..12a41337cb1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java @@ -21,7 +21,7 @@ package org.apache.hadoop.registry.client.impl.zk; import com.google.common.base.Preconditions; import com.google.common.base.Splitter; import com.google.common.collect.Lists; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.curator.framework.CuratorFrameworkFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/ReverseZoneUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/ReverseZoneUtils.java index cb04f9e5130..a62bef77c99 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/ReverseZoneUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/ReverseZoneUtils.java @@ -21,7 +21,7 @@ import java.net.Inet6Address; import java.net.InetAddress; import java.net.UnknownHostException; import java.util.ArrayList; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.net.util.SubnetUtils; import org.apache.hadoop.conf.Configuration; import static org.apache.hadoop.registry.client.api.RegistryConstants.KEY_DNS_SPLIT_REVERSE_ZONE_RANGE; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/SelectByYarnPersistence.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/SelectByYarnPersistence.java index 004be86064d..6a1993eafd9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/SelectByYarnPersistence.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/SelectByYarnPersistence.java @@ -19,7 +19,7 @@ package org.apache.hadoop.registry.server.integration; import com.google.common.base.Preconditions; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.registry.client.types.RegistryPathStatus; import org.apache.hadoop.registry.client.types.ServiceRecord; import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperService.java index 88e9d67b79d..b6cf9fc5191 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperService.java @@ -19,7 +19,7 @@ package org.apache.hadoop.registry.server.services; import com.google.common.base.Preconditions; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.curator.ensemble.fixed.FixedEnsembleProvider; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/RegistryAdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/RegistryAdminService.java index 7a20c248db2..d60797e71ea 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/RegistryAdminService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/RegistryAdminService.java @@ -20,7 +20,7 @@ package org.apache.hadoop.registry.server.services; import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.curator.framework.api.BackgroundCallback; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/RegistryTestHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/RegistryTestHelper.java index 91602e1d3b3..a0f2ca11c0c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/RegistryTestHelper.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/RegistryTestHelper.java @@ -18,7 +18,7 @@ package org.apache.hadoop.registry; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.registry.client.api.RegistryConstants; import org.apache.hadoop.registry.client.binding.RegistryUtils; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java index 7d57048de2f..ecaf0fa89a8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java @@ -186,10 +186,10 @@ public class ApplicationHistoryClientService extends AbstractService implements IOException { long startedBegin = request.getStartRange() == null ? 0L : request.getStartRange() - .getMinimumLong(); + .getMinimum(); long startedEnd = request.getStartRange() == null ? Long.MAX_VALUE : request - .getStartRange().getMaximumLong(); + .getStartRange().getMaximum(); GetApplicationsResponse response = GetApplicationsResponse.newInstance(new ArrayList( history.getApplications(request.getLimit(), startedBegin, startedEnd) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDB.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDB.java index 5c511a3aaac..9279eb93075 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDB.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDB.java @@ -32,7 +32,7 @@ import java.util.TreeMap; import java.util.Map.Entry; import org.apache.commons.io.FilenameUtils; -import org.apache.commons.lang.time.FastDateFormat; +import org.apache.commons.lang3.time.FastDateFormat; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java index 1ac170c2505..36b5ce84307 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java @@ -37,7 +37,7 @@ import java.util.SortedSet; import java.util.TreeMap; import org.apache.commons.collections.map.LRUMap; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java index 154c68a6a3f..a4f56ffe512 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java @@ -35,7 +35,7 @@ import javax.servlet.FilterConfig; import javax.servlet.ServletException; import javax.servlet.http.HttpServletResponse; import javax.ws.rs.core.MediaType; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java index 533f9c82ef1..e62dcaf2a7f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java @@ -29,7 +29,7 @@ import java.util.Calendar; import java.util.List; import java.util.TimeZone; -import org.apache.commons.lang.NotImplementedException; +import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -987,12 +987,12 @@ public class SQLFederationStateStore implements FederationStateStore { @Override public Version getCurrentVersion() { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public Version loadVersion() { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java index 682eb1457d9..1bcb0f4c5e5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java @@ -36,7 +36,7 @@ import javax.cache.integration.CacheLoader; import javax.cache.integration.CacheLoaderException; import javax.cache.spi.CachingProvider; -import org.apache.commons.lang.NotImplementedException; +import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.io.retry.RetryPolicy; @@ -538,7 +538,7 @@ public final class FederationStateStoreFacade { throws CacheLoaderException { // The FACADE does not use the Cache's getAll API. Hence this is not // required to be implemented - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerAllocator.java index ae1ba437a50..1cec3dac11b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerAllocator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerAllocator.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.scheduler; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java index 10985e0417b..73795dcbc77 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java @@ -25,7 +25,7 @@ import java.util.Set; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/TimelineServerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/TimelineServerUtils.java index 78bf20f8ccd..3021def7b20 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/TimelineServerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/TimelineServerUtils.java @@ -62,7 +62,7 @@ public final class TimelineServerUtils { } target.addAll(defaultInitializers); String actualInitializers = - org.apache.commons.lang.StringUtils.join(target, ","); + org.apache.commons.lang3.StringUtils.join(target, ","); LOG.info("Filter initializers set for timeline service: " + actualInitializers); conf.set("hadoop.http.filter.initializers", actualInitializers); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java index d663dc9786b..38c79babcea 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java @@ -25,7 +25,7 @@ import java.security.PrivilegedExceptionAction; import java.util.Collection; import java.util.List; -import org.apache.commons.lang.StringEscapeUtils; +import org.apache.commons.lang3.StringEscapeUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.ApplicationBaseProtocol; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; @@ -173,8 +173,8 @@ public class AppAttemptBlock extends HtmlBlock { + container.getNodeHttpAddress()) .append("'>") .append(container.getNodeHttpAddress() == null ? "N/A" : - StringEscapeUtils.escapeJavaScript(StringEscapeUtils - .escapeHtml(container.getNodeHttpAddress()))) + StringEscapeUtils.escapeEcmaScript(StringEscapeUtils + .escapeHtml4(container.getNodeHttpAddress()))) .append("\",\"") .append(container.getContainerExitStatus()).append("\",\"") .append(nodeLink == null ? "N/A" : StringEscapeUtils - .escapeJavaScript(StringEscapeUtils.escapeHtml(nodeLink))) + .escapeEcmaScript(StringEscapeUtils.escapeHtml4(nodeLink))) .append("\",\"") .append(logsLink == null ? "N/A" : "Logs").append("\"],\n"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java index cb15449d357..291a5726df6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java @@ -32,8 +32,8 @@ import java.util.Collection; import java.util.EnumSet; import java.util.List; -import org.apache.commons.lang.StringEscapeUtils; -import org.apache.commons.lang.math.LongRange; +import org.apache.commons.lang3.StringEscapeUtils; +import org.apache.commons.lang3.Range; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.ApplicationBaseProtocol; @@ -108,7 +108,7 @@ public class AppsBlock extends HtmlBlock { "app.started-time.end must be greater than app.started-time.begin"); } request.setStartRange( - new LongRange(appStartedTimeBegain, appStartedTimeEnd)); + Range.between(appStartedTimeBegain, appStartedTimeEnd)); if (callerUGI == null) { appReports = getApplicationReport(request); @@ -174,19 +174,19 @@ public class AppsBlock extends HtmlBlock { .append(app.getAppId()) .append("\",\"") .append( - StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(app + StringEscapeUtils.escapeEcmaScript(StringEscapeUtils.escapeHtml4(app .getUser()))) .append("\",\"") .append( - StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(app + StringEscapeUtils.escapeEcmaScript(StringEscapeUtils.escapeHtml4(app .getName()))) .append("\",\"") .append( - StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(app + StringEscapeUtils.escapeEcmaScript(StringEscapeUtils.escapeHtml4(app .getType()))) .append("\",\"") .append( - StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(app + StringEscapeUtils.escapeEcmaScript(StringEscapeUtils.escapeHtml4(app .getQueue()))).append("\",\"").append(String .valueOf(app.getPriority())) .append("\",\"").append(app.getStartedTime()) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java index df4656f427f..03b1055e1d0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java @@ -31,7 +31,7 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.ws.rs.WebApplicationException; -import org.apache.commons.lang.math.LongRange; +import org.apache.commons.lang3.Range; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AuthorizationException; import org.apache.hadoop.util.StringUtils; @@ -151,7 +151,7 @@ public class WebServices { final GetApplicationsRequest request = GetApplicationsRequest.newInstance(); request.setLimit(countNum); - request.setStartRange(new LongRange(sBegin, sEnd)); + request.setStartRange(Range.between(sBegin, sEnd)); try { if (callerUGI == null) { // TODO: the request should take the params like what RMWebServices does diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml index 26a5220ca74..74be4da8da4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml @@ -63,10 +63,6 @@ org.codehaus.jettison jettison - - commons-lang - commons-lang - javax.servlet javax.servlet-api diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java index 446cbe4d08a..27224a599e3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java @@ -35,7 +35,7 @@ import java.util.EnumSet; import java.util.List; import java.util.Map; -import org.apache.commons.lang.math.RandomUtils; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileUtil; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java index e9344727ccf..d453a155c0c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java @@ -35,7 +35,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java index 95ab37408a0..0541544ab50 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java @@ -34,7 +34,7 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.yarn.api.records.ContainerSubState; import org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler.UpdateContainerSchedulerEvent; import org.slf4j.Logger; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java index 9f13a5ed266..76949ff7bcc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java @@ -21,7 +21,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged; import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java index 202e7d0176e..86137b514ae 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerSignalContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerSignalContext.java index 5b911b8b0cf..6d5d5905da0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerSignalContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerSignalContext.java @@ -20,7 +20,7 @@ package org.apache.hadoop.yarn.server.nodemanager.executor; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.Signal; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java index b675d5abdc7..bb0881b447b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java @@ -51,7 +51,7 @@ import javax.ws.rs.core.Response.Status; import javax.ws.rs.core.StreamingOutput; import javax.ws.rs.core.UriInfo; import org.apache.commons.io.IOUtils; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.http.JettyUtils; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java index f8cf92dae0e..da9bc89419b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java @@ -1461,13 +1461,13 @@ public class TestContainerLaunch extends BaseContainerManagerTest { // Basic tests: less length, exact length, max+1 length builder.command(Arrays.asList( - org.apache.commons.lang.StringUtils.repeat("A", 1024))); + org.apache.commons.lang3.StringUtils.repeat("A", 1024))); builder.command(Arrays.asList( - org.apache.commons.lang.StringUtils.repeat( + org.apache.commons.lang3.StringUtils.repeat( "E", Shell.WINDOWS_MAX_SHELL_LENGTH - callCmd.length()))); try { builder.command(Arrays.asList( - org.apache.commons.lang.StringUtils.repeat( + org.apache.commons.lang3.StringUtils.repeat( "X", Shell.WINDOWS_MAX_SHELL_LENGTH -callCmd.length() + 1))); fail("longCommand was expected to throw"); } catch(IOException e) { @@ -1476,21 +1476,21 @@ public class TestContainerLaunch extends BaseContainerManagerTest { // Composite tests, from parts: less, exact and + builder.command(Arrays.asList( - org.apache.commons.lang.StringUtils.repeat("A", 1024), - org.apache.commons.lang.StringUtils.repeat("A", 1024), - org.apache.commons.lang.StringUtils.repeat("A", 1024))); + org.apache.commons.lang3.StringUtils.repeat("A", 1024), + org.apache.commons.lang3.StringUtils.repeat("A", 1024), + org.apache.commons.lang3.StringUtils.repeat("A", 1024))); // buildr.command joins the command parts with an extra space builder.command(Arrays.asList( - org.apache.commons.lang.StringUtils.repeat("E", 4095), - org.apache.commons.lang.StringUtils.repeat("E", 2047), - org.apache.commons.lang.StringUtils.repeat("E", 2047 - callCmd.length()))); + org.apache.commons.lang3.StringUtils.repeat("E", 4095), + org.apache.commons.lang3.StringUtils.repeat("E", 2047), + org.apache.commons.lang3.StringUtils.repeat("E", 2047 - callCmd.length()))); try { builder.command(Arrays.asList( - org.apache.commons.lang.StringUtils.repeat("X", 4095), - org.apache.commons.lang.StringUtils.repeat("X", 2047), - org.apache.commons.lang.StringUtils.repeat("X", 2048 - callCmd.length()))); + org.apache.commons.lang3.StringUtils.repeat("X", 4095), + org.apache.commons.lang3.StringUtils.repeat("X", 2047), + org.apache.commons.lang3.StringUtils.repeat("X", 2048 - callCmd.length()))); fail("long commands was expected to throw"); } catch(IOException e) { assertThat(e.getMessage(), CoreMatchers.containsString(expectedMessage)); @@ -1508,11 +1508,11 @@ public class TestContainerLaunch extends BaseContainerManagerTest { ShellScriptBuilder builder = ShellScriptBuilder.create(); // test env - builder.env("somekey", org.apache.commons.lang.StringUtils.repeat("A", 1024)); - builder.env("somekey", org.apache.commons.lang.StringUtils.repeat( + builder.env("somekey", org.apache.commons.lang3.StringUtils.repeat("A", 1024)); + builder.env("somekey", org.apache.commons.lang3.StringUtils.repeat( "A", Shell.WINDOWS_MAX_SHELL_LENGTH - ("@set somekey=").length())); try { - builder.env("somekey", org.apache.commons.lang.StringUtils.repeat( + builder.env("somekey", org.apache.commons.lang3.StringUtils.repeat( "A", Shell.WINDOWS_MAX_SHELL_LENGTH - ("@set somekey=").length()) + 1); fail("long env was expected to throw"); } catch(IOException e) { @@ -1533,11 +1533,11 @@ public class TestContainerLaunch extends BaseContainerManagerTest { ShellScriptBuilder builder = ShellScriptBuilder.create(); // test mkdir - builder.mkdir(new Path(org.apache.commons.lang.StringUtils.repeat("A", 1024))); - builder.mkdir(new Path(org.apache.commons.lang.StringUtils.repeat("E", + builder.mkdir(new Path(org.apache.commons.lang3.StringUtils.repeat("A", 1024))); + builder.mkdir(new Path(org.apache.commons.lang3.StringUtils.repeat("E", (Shell.WINDOWS_MAX_SHELL_LENGTH - mkDirCmd.length()) / 2))); try { - builder.mkdir(new Path(org.apache.commons.lang.StringUtils.repeat( + builder.mkdir(new Path(org.apache.commons.lang3.StringUtils.repeat( "X", (Shell.WINDOWS_MAX_SHELL_LENGTH - mkDirCmd.length())/2 +1))); fail("long mkdir was expected to throw"); } catch(IOException e) { @@ -1557,18 +1557,18 @@ public class TestContainerLaunch extends BaseContainerManagerTest { ShellScriptBuilder builder = ShellScriptBuilder.create(); // test link - builder.link(new Path(org.apache.commons.lang.StringUtils.repeat("A", 1024)), - new Path(org.apache.commons.lang.StringUtils.repeat("B", 1024))); + builder.link(new Path(org.apache.commons.lang3.StringUtils.repeat("A", 1024)), + new Path(org.apache.commons.lang3.StringUtils.repeat("B", 1024))); builder.link( - new Path(org.apache.commons.lang.StringUtils.repeat( + new Path(org.apache.commons.lang3.StringUtils.repeat( "E", (Shell.WINDOWS_MAX_SHELL_LENGTH - linkCmd.length())/2)), - new Path(org.apache.commons.lang.StringUtils.repeat( + new Path(org.apache.commons.lang3.StringUtils.repeat( "F", (Shell.WINDOWS_MAX_SHELL_LENGTH - linkCmd.length())/2))); try { builder.link( - new Path(org.apache.commons.lang.StringUtils.repeat( + new Path(org.apache.commons.lang3.StringUtils.repeat( "X", (Shell.WINDOWS_MAX_SHELL_LENGTH - linkCmd.length())/2 + 1)), - new Path(org.apache.commons.lang.StringUtils.repeat( + new Path(org.apache.commons.lang3.StringUtils.repeat( "Y", (Shell.WINDOWS_MAX_SHELL_LENGTH - linkCmd.length())/2) + 1)); fail("long link was expected to throw"); } catch(IOException e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java index 05cdd49db3a..6268ad986c3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java @@ -59,7 +59,7 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.AbstractFileSystem; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java index c6fa16df100..dead6031203 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java @@ -46,8 +46,8 @@ import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; import org.apache.commons.io.FileUtils; -import org.apache.commons.lang.StringUtils; -import org.apache.commons.lang.builder.EqualsBuilder; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.AbstractFileSystem; import org.apache.hadoop.fs.FileContext; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml index cb651c79e75..72bcd91d650 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml @@ -138,10 +138,6 @@ org.codehaus.jettison jettison - - commons-lang - commons-lang - com.sun.jersey jersey-core diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java index e92a3c8e412..be997534dd2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java @@ -37,7 +37,7 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.cli.UnrecognizedOptionException; -import org.apache.commons.lang.math.LongRange; +import org.apache.commons.lang3.Range; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; @@ -754,7 +754,7 @@ public class ClientRMService extends AbstractService implements message.append(" at ").append(remoteAddress.getHostAddress()); } - String diagnostics = org.apache.commons.lang.StringUtils + String diagnostics = org.apache.commons.lang3.StringUtils .trimToNull(request.getDiagnostics()); if (diagnostics != null) { message.append(" with diagnostic message: "); @@ -812,8 +812,8 @@ public class ClientRMService extends AbstractService implements Set queues = request.getQueues(); Set tags = request.getApplicationTags(); long limit = request.getLimit(); - LongRange start = request.getStartRange(); - LongRange finish = request.getFinishRange(); + Range start = request.getStartRange(); + Range finish = request.getFinishRange(); ApplicationsRequestScope scope = request.getScope(); final Map apps = rmContext.getRMApps(); @@ -888,11 +888,11 @@ public class ClientRMService extends AbstractService implements continue; } - if (start != null && !start.containsLong(application.getStartTime())) { + if (start != null && !start.contains(application.getStartTime())) { continue; } - if (finish != null && !finish.containsLong(application.getFinishTime())) { + if (finish != null && !finish.contains(application.getFinishTime())) { continue; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java index 3f9fd171689..cc69fbae366 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java @@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableSet; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceStability.Unstable; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueuePlacementRuleUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueuePlacementRuleUtils.java index e1996fa3db0..0b5fe2ebd8e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueuePlacementRuleUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueuePlacementRuleUtils.java @@ -125,7 +125,7 @@ public final class QueuePlacementRuleUtils { public static ApplicationPlacementContext getPlacementContext( QueueMappingEntity mapping, String leafQueueName) { - if (!org.apache.commons.lang.StringUtils.isEmpty(mapping.getParentQueue())) { + if (!org.apache.commons.lang3.StringUtils.isEmpty(mapping.getParentQueue())) { return new ApplicationPlacementContext(leafQueueName, mapping.getParentQueue()); } else{ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java index 906d5365bdf..99f37b1e7a5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java @@ -22,7 +22,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java index 73191562c97..9f1ea4403f4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java @@ -36,7 +36,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java index 4e755054bcf..43e6e4d577c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java @@ -28,7 +28,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; -import org.apache.commons.lang.time.DateUtils; +import org.apache.commons.lang3.time.DateUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java index 005569cf252..1225af1a2a4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java @@ -33,9 +33,9 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantReadWriteLock; -import org.apache.commons.lang.StringUtils; -import org.apache.commons.lang.time.DateUtils; -import org.apache.commons.lang.time.FastDateFormat; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.time.DateUtils; +import org.apache.commons.lang3.time.FastDateFormat; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java index 7de250d31d2..844057ea142 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java @@ -26,7 +26,7 @@ import java.util.Set; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java index 67b676bea57..9c3e98f0e71 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java @@ -28,7 +28,7 @@ import java.util.Set; import java.util.concurrent.locks.ReentrantReadWriteLock; import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index 1c9bf6bb6e9..50ab70d03ec 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -33,8 +33,8 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.atomic.AtomicBoolean; -import org.apache.commons.lang.StringUtils; -import org.apache.commons.lang.time.DateUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.time.DateUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index 1ae8f919dfb..366bad0a4f2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -23,7 +23,7 @@ import java.util.*; import java.util.Map.Entry; import java.util.concurrent.ConcurrentHashMap; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java index c198d135ed2..bb4823e1dd2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java @@ -27,7 +27,7 @@ import java.util.List; import java.util.Map; import java.util.Set; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueCapacities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueCapacities.java index cc4af3dfb47..35c084bb4d3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueCapacities.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueCapacities.java @@ -26,7 +26,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java index 99deb1abf3b..99a5b84b61f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java @@ -22,7 +22,7 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.yarn.api.records.Container; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java index 4fc2bab7a3e..a6907676f9d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java @@ -22,7 +22,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Maps; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.yarn.api.records.AllocationTagNamespaceType; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/LocalAllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/LocalAllocationTagsManager.java index 1fce466d3ac..070a004b9dc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/LocalAllocationTagsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/LocalAllocationTagsManager.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTags; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java index 2bbbcc54a30..4bc318298d3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java @@ -29,7 +29,7 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import org.apache.commons.lang.StringEscapeUtils; +import org.apache.commons.lang3.StringEscapeUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; @@ -126,13 +126,13 @@ public class FairSchedulerAppsBlock extends HtmlBlock { appsTableData.append("[\"") .append(appInfo.getAppId()).append("\",\"") - .append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml( + .append(StringEscapeUtils.escapeEcmaScript(StringEscapeUtils.escapeHtml4( appInfo.getUser()))).append("\",\"") - .append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml( + .append(StringEscapeUtils.escapeEcmaScript(StringEscapeUtils.escapeHtml4( appInfo.getName()))).append("\",\"") - .append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml( + .append(StringEscapeUtils.escapeEcmaScript(StringEscapeUtils.escapeHtml4( appInfo.getApplicationType()))).append("\",\"") - .append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml( + .append(StringEscapeUtils.escapeEcmaScript(StringEscapeUtils.escapeHtml4( appInfo.getQueue()))).append("\",\"") .append(fairShare).append("\",\"") .append(appInfo.getStartTime()).append("\",\"") diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java index b26eb0951ed..18595decf54 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java @@ -27,8 +27,8 @@ import java.io.IOException; import java.util.Collection; import java.util.List; -import org.apache.commons.lang.StringEscapeUtils; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringEscapeUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest; @@ -100,8 +100,8 @@ public class RMAppAttemptBlock extends AppAttemptBlock{ resourceRequestTableData.append("[\"") .append(String.valueOf(resourceRequest.getPriority())).append("\",\"") .append(resourceRequest.getResourceName()).append("\",\"") - .append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils - .escapeHtml(String.valueOf(resourceRequest.getCapability())))) + .append(StringEscapeUtils.escapeEcmaScript(StringEscapeUtils + .escapeHtml4(String.valueOf(resourceRequest.getCapability())))) .append("\",\"") .append(String.valueOf(resourceRequest.getNumContainers())) .append("\",\"") diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java index 8553d8cde28..80d27f74507 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java @@ -25,7 +25,7 @@ import java.util.Collection; import java.util.List; import java.util.Set; -import org.apache.commons.lang.StringEscapeUtils; +import org.apache.commons.lang3.StringEscapeUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest; @@ -167,7 +167,7 @@ public class RMAppBlock extends AppBlock{ .append(nodeLink == null ? "#" : "href='" + nodeLink) .append("'>") .append(nodeLink == null ? "N/A" : StringEscapeUtils - .escapeJavaScript(StringEscapeUtils.escapeHtml(nodeLink))) + .escapeEcmaScript(StringEscapeUtils.escapeHtml4(nodeLink))) .append("\",\"") .append(logsLink == null ? "N/A" : "Logs").append("\",") diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java index a525cffdd0d..25b3a4dab3d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java @@ -26,7 +26,7 @@ import java.io.IOException; import java.util.List; import java.util.Set; -import org.apache.commons.lang.StringEscapeUtils; +import org.apache.commons.lang3.StringEscapeUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; @@ -119,19 +119,19 @@ public class RMAppsBlock extends AppsBlock { .append(app.getAppId()) .append("\",\"") .append( - StringEscapeUtils.escapeJavaScript( - StringEscapeUtils.escapeHtml(app.getUser()))) + StringEscapeUtils.escapeEcmaScript( + StringEscapeUtils.escapeHtml4(app.getUser()))) .append("\",\"") .append( - StringEscapeUtils.escapeJavaScript( - StringEscapeUtils.escapeHtml(app.getName()))) + StringEscapeUtils.escapeEcmaScript( + StringEscapeUtils.escapeHtml4(app.getName()))) .append("\",\"") .append( - StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(app + StringEscapeUtils.escapeEcmaScript(StringEscapeUtils.escapeHtml4(app .getType()))) .append("\",\"") .append( - StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(app + StringEscapeUtils.escapeEcmaScript(StringEscapeUtils.escapeHtml4(app .getQueue()))).append("\",\"").append(String .valueOf(app.getPriority())) .append("\",\"").append(app.getStartedTime()) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java index 82a946e4c9f..22a9c33477b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java @@ -21,7 +21,7 @@ import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java index 3508ab4760c..a6b30b3f6b3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java @@ -25,7 +25,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import org.apache.commons.lang.time.DateUtils; +import org.apache.commons.lang3.time.DateUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptImplDiagnostics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptImplDiagnostics.java index 295b59fc8c5..6c1b08269f5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptImplDiagnostics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptImplDiagnostics.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java index addec662862..f313d70191b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java @@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; -import org.apache.commons.lang.math.RandomUtils; +import org.apache.commons.lang3.RandomUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -550,7 +550,7 @@ public class TestCapacitySchedulerAutoCreatedQueueBase { protected RMApp submitApp(String user, String queue, String nodeLabel) throws Exception { RMApp app = mockRM.submitApp(GB, - "test-auto-queue-creation" + RandomUtils.nextInt(100), user, null, + "test-auto-queue-creation" + RandomUtils.nextInt(0, 100), user, null, queue, nodeLabel); Assert.assertEquals(app.getAmNodeLabelExpression(), nodeLabel); // check preconditions diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java index 07eaf97fd73..4c4e371c961 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java @@ -25,7 +25,7 @@ import java.util.Map; import java.util.Random; import java.util.concurrent.ConcurrentHashMap; -import org.apache.commons.lang.NotImplementedException; +import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest; @@ -564,172 +564,172 @@ public class FederationClientInterceptor @Override public GetApplicationsResponse getApplications(GetApplicationsRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public GetClusterMetricsResponse getClusterMetrics( GetClusterMetricsRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public GetClusterNodesResponse getClusterNodes(GetClusterNodesRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public GetQueueInfoResponse getQueueInfo(GetQueueInfoRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public GetQueueUserAclsInfoResponse getQueueUserAcls( GetQueueUserAclsInfoRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public MoveApplicationAcrossQueuesResponse moveApplicationAcrossQueues( MoveApplicationAcrossQueuesRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public GetNewReservationResponse getNewReservation( GetNewReservationRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public ReservationSubmissionResponse submitReservation( ReservationSubmissionRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public ReservationListResponse listReservations( ReservationListRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public ReservationUpdateResponse updateReservation( ReservationUpdateRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public ReservationDeleteResponse deleteReservation( ReservationDeleteRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public GetNodesToLabelsResponse getNodeToLabels( GetNodesToLabelsRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public GetLabelsToNodesResponse getLabelsToNodes( GetLabelsToNodesRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public GetClusterNodeLabelsResponse getClusterNodeLabels( GetClusterNodeLabelsRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public GetApplicationAttemptReportResponse getApplicationAttemptReport( GetApplicationAttemptReportRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public GetApplicationAttemptsResponse getApplicationAttempts( GetApplicationAttemptsRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public GetContainerReportResponse getContainerReport( GetContainerReportRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public GetContainersResponse getContainers(GetContainersRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public GetDelegationTokenResponse getDelegationToken( GetDelegationTokenRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public RenewDelegationTokenResponse renewDelegationToken( RenewDelegationTokenRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public CancelDelegationTokenResponse cancelDelegationToken( CancelDelegationTokenRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public FailApplicationAttemptResponse failApplicationAttempt( FailApplicationAttemptRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public UpdateApplicationPriorityResponse updateApplicationPriority( UpdateApplicationPriorityRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public SignalContainerResponse signalToContainer( SignalContainerRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public UpdateApplicationTimeoutsResponse updateApplicationTimeouts( UpdateApplicationTimeoutsRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public GetAllResourceProfilesResponse getResourceProfiles( GetAllResourceProfilesRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public GetResourceProfileResponse getResourceProfile( GetResourceProfileRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public GetAllResourceTypeInfoResponse getResourceTypeInfo( GetAllResourceTypeInfoRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/AppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/AppsBlock.java index 56fa73e2507..aafc5f6c364 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/AppsBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/AppsBlock.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.router.webapp; -import static org.apache.commons.lang.StringEscapeUtils.escapeHtml; -import static org.apache.commons.lang.StringEscapeUtils.escapeJavaScript; +import static org.apache.commons.lang3.StringEscapeUtils.escapeHtml4; +import static org.apache.commons.lang3.StringEscapeUtils.escapeEcmaScript; import static org.apache.hadoop.yarn.util.StringHelper.join; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.C_PROGRESSBAR; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.C_PROGRESSBAR_VALUE; @@ -125,6 +125,6 @@ public class AppsBlock extends HtmlBlock { } private static String escape(String str) { - return escapeJavaScript(escapeHtml(str)); + return escapeEcmaScript(escapeHtml4(str)); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java index d1d49ec4e5a..51dfb00f39f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java @@ -39,7 +39,7 @@ import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; import javax.ws.rs.core.Response.Status; -import org.apache.commons.lang.NotImplementedException; +import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.authorize.AuthorizationException; import org.apache.hadoop.util.ReflectionUtils; @@ -1040,213 +1040,213 @@ public class FederationInterceptorREST extends AbstractRESTRequestInterceptor { @Override public ClusterInfo getClusterInfo() { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public SchedulerTypeInfo getSchedulerInfo() { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public String dumpSchedulerLogs(String time, HttpServletRequest hsr) throws IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public ActivitiesInfo getActivities(HttpServletRequest hsr, String nodeId) { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public AppActivitiesInfo getAppActivities(HttpServletRequest hsr, String appId, String time) { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public ApplicationStatisticsInfo getAppStatistics(HttpServletRequest hsr, Set stateQueries, Set typeQueries) { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public NodeToLabelsInfo getNodeToLabels(HttpServletRequest hsr) throws IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public LabelsToNodesInfo getLabelsToNodes(Set labels) throws IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public Response replaceLabelsOnNodes(NodeToLabelsEntryList newNodeToLabels, HttpServletRequest hsr) throws IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public Response replaceLabelsOnNode(Set newNodeLabelsName, HttpServletRequest hsr, String nodeId) throws Exception { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public NodeLabelsInfo getClusterNodeLabels(HttpServletRequest hsr) throws IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public Response addToClusterNodeLabels(NodeLabelsInfo newNodeLabels, HttpServletRequest hsr) throws Exception { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public Response removeFromCluserNodeLabels(Set oldNodeLabels, HttpServletRequest hsr) throws Exception { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public NodeLabelsInfo getLabelsOnNode(HttpServletRequest hsr, String nodeId) throws IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public AppPriority getAppPriority(HttpServletRequest hsr, String appId) throws AuthorizationException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public Response updateApplicationPriority(AppPriority targetPriority, HttpServletRequest hsr, String appId) throws AuthorizationException, YarnException, InterruptedException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public AppQueue getAppQueue(HttpServletRequest hsr, String appId) throws AuthorizationException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public Response updateAppQueue(AppQueue targetQueue, HttpServletRequest hsr, String appId) throws AuthorizationException, YarnException, InterruptedException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public Response postDelegationToken(DelegationToken tokenData, HttpServletRequest hsr) throws AuthorizationException, IOException, InterruptedException, Exception { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public Response postDelegationTokenExpiration(HttpServletRequest hsr) throws AuthorizationException, IOException, InterruptedException, Exception { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public Response cancelDelegationToken(HttpServletRequest hsr) throws AuthorizationException, IOException, InterruptedException, Exception { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public Response createNewReservation(HttpServletRequest hsr) throws AuthorizationException, IOException, InterruptedException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public Response submitReservation(ReservationSubmissionRequestInfo resContext, HttpServletRequest hsr) throws AuthorizationException, IOException, InterruptedException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public Response updateReservation(ReservationUpdateRequestInfo resContext, HttpServletRequest hsr) throws AuthorizationException, IOException, InterruptedException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public Response deleteReservation(ReservationDeleteRequestInfo resContext, HttpServletRequest hsr) throws AuthorizationException, IOException, InterruptedException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public Response listReservation(String queue, String reservationId, long startTime, long endTime, boolean includeResourceAllocations, HttpServletRequest hsr) throws Exception { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public AppTimeoutInfo getAppTimeout(HttpServletRequest hsr, String appId, String type) throws AuthorizationException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public AppTimeoutsInfo getAppTimeouts(HttpServletRequest hsr, String appId) throws AuthorizationException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public Response updateApplicationTimeout(AppTimeoutInfo appTimeout, HttpServletRequest hsr, String appId) throws AuthorizationException, YarnException, InterruptedException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public AppAttemptsInfo getAppAttempts(HttpServletRequest hsr, String appId) { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public RMQueueAclInfo checkUserAccessToQueue(String queue, String username, String queueAclType, HttpServletRequest hsr) { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public AppAttemptInfo getAppAttempt(HttpServletRequest req, HttpServletResponse res, String appId, String appAttemptId) { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public ContainersInfo getContainers(HttpServletRequest req, HttpServletResponse res, String appId, String appAttemptId) { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public ContainerInfo getContainer(HttpServletRequest req, HttpServletResponse res, String appId, String appAttemptId, String containerId) { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml index a4967b7b250..839b9a08b3e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml @@ -44,11 +44,6 @@ slf4j-api - - commons-lang - commons-lang - - commons-cli commons-cli diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java index 37ed50c2602..af6f915132e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java @@ -28,7 +28,7 @@ import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.apache.commons.cli.PosixParser; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml index dd47fd5341c..35d5c944163 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml @@ -106,11 +106,6 @@ jersey-client - - commons-lang - commons-lang - - commons-logging commons-logging diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderUtils.java index 4fd846857e8..fff06a00437 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderUtils.java @@ -22,7 +22,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import com.google.common.annotations.VisibleForTesting; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java index f83c1ac346f..efaecd22bb4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java @@ -24,7 +24,7 @@ import java.util.EnumSet; import javax.servlet.http.HttpServletRequest; import org.apache.commons.io.IOUtils; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList; import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field; From e9ea9022999f83aca000a10edbb76489e6b79d12 Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Tue, 12 Jun 2018 09:03:42 -0700 Subject: [PATCH 093/113] HDDS-130. TestGenerateOzoneRequiredConfigurations should use GenericTestUtils#getTempPath to set output directory. Contributed by Sandeep Nemuri. --- ...stGenerateOzoneRequiredConfigurations.java | 72 +++++++++++++------ 1 file changed, 50 insertions(+), 22 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java index 82582a676b0..cfd11594698 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java @@ -18,56 +18,57 @@ package org.apache.hadoop.ozone.genconf; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.commons.io.FileUtils; +import org.apache.commons.lang.RandomStringUtils; +import org.apache.hadoop.test.GenericTestUtils; +import org.hamcrest.CoreMatchers; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; +import java.io.ByteArrayOutputStream; +import java.io.File; import java.io.IOException; +import java.io.PrintStream; /** * Tests GenerateOzoneRequiredConfigurations. */ public class TestGenerateOzoneRequiredConfigurations { - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf; - + private static File outputBaseDir; /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true and - * OZONE_HANDLER_TYPE_KEY = "distributed" + * Creates output directory which will be used by the test-cases. + * If a test-case needs a separate directory, it has to create a random + * directory inside {@code outputBaseDir}. * - * @throws IOException + * @throws Exception In case of exception while creating output directory. */ @BeforeClass public static void init() throws Exception { - conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build(); - cluster.waitForClusterToBeReady(); + outputBaseDir = GenericTestUtils.getTestDir(); + FileUtils.forceMkdir(outputBaseDir); } /** - * Shutdown MiniDFSCluster. + * Cleans up the output base directory. */ @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } + public static void cleanup() throws IOException { + FileUtils.deleteDirectory(outputBaseDir); } /** - * Tests a valid path and generates ozone-site.xml. + * Tests a valid path and generates ozone-site.xml by calling + * {@code GenerateOzoneRequiredConfigurations#generateConfigurations}. + * * @throws Exception */ @Test - public void generateConfigurationsSuccess() throws Exception { - String[] args = new String[]{"-output", "."}; - GenerateOzoneRequiredConfigurations.main(args); + public void testGenerateConfigurations() throws Exception { + File tempPath = getRandomTempDir(); + String[] args = new String[]{"-output", tempPath.getAbsolutePath()}; Assert.assertEquals("Path is valid", true, GenerateOzoneRequiredConfigurations.isValidPath(args[1])); @@ -79,6 +80,27 @@ public class TestGenerateOzoneRequiredConfigurations { 0, GenerateOzoneRequiredConfigurations.generateConfigurations(args[1])); } + /** + * Tests ozone-site.xml generation by calling + * {@code GenerateOzoneRequiredConfigurations#main}. + * + * @throws Exception + */ + @Test + public void testGenerateConfigurationsThroughMainMethod() throws Exception { + File tempPath = getRandomTempDir(); + String[] args = new String[]{"-output", tempPath.getAbsolutePath()}; + ByteArrayOutputStream outContent = new ByteArrayOutputStream(); + PrintStream oldStream = System.out; + try (PrintStream ps = new PrintStream(outContent)) { + System.setOut(ps); + GenerateOzoneRequiredConfigurations.main(args); + Assert.assertThat(outContent.toString(), + CoreMatchers.containsString("ozone-site.xml has been generated at")); + System.setOut(oldStream); + } + } + /** * Test to avoid generating ozone-site.xml when invalid permission. * @throws Exception @@ -97,4 +119,10 @@ public class TestGenerateOzoneRequiredConfigurations { Assert.assertEquals("Config file not generated", 1, GenerateOzoneRequiredConfigurations.generateConfigurations(args[1])); } + + private File getRandomTempDir() throws IOException { + File tempDir = new File(outputBaseDir, RandomStringUtils.randomAlphanumeric(5)); + FileUtils.forceMkdir(tempDir); + return tempDir; + } } From 24a89825f0cbc92b0a462152bc765e2195edd5a1 Mon Sep 17 00:00:00 2001 From: Haibo Chen Date: Tue, 12 Jun 2018 10:03:07 -0700 Subject: [PATCH 094/113] YARN-6931. Make the aggregation interval in AppLevelTimelineCollector configurable. (Abhishek Modi via Haibo Chen) --- .../apache/hadoop/yarn/conf/YarnConfiguration.java | 9 +++++++++ .../src/main/resources/yarn-default.xml | 9 +++++++++ .../AppLevelTimelineCollectorWithAgg.java | 14 +++++++++----- 3 files changed, 27 insertions(+), 5 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index f7f82f8adc7..5292a250533 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -2664,6 +2664,15 @@ public class YarnConfiguration extends Configuration { public static final String TIMELINE_SERVICE_READ_AUTH_ENABLED = TIMELINE_SERVICE_PREFIX + "read.authentication.enabled"; + /** + * The name for setting that controls how often in-memory app level + * aggregation is kicked off in timeline collector. + */ + public static final String TIMELINE_SERVICE_AGGREGATION_INTERVAL_SECS = + TIMELINE_SERVICE_PREFIX + "app-aggregation-interval-secs"; + + public static final int + DEFAULT_TIMELINE_SERVICE_AGGREGATION_INTERVAL_SECS = 15; /** * The default setting for authentication checks for reading timeline * service v2 data. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index b0ffc48e1e6..2cc842fc73f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -2545,6 +2545,15 @@ 259200000 + + + The setting that controls how often in-memory app level + aggregation is kicked off in timeline collector. + + yarn.timeline-service.app-aggregation-interval-secs + 15 + + The default hdfs location for flowrun coprocessor jar. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollectorWithAgg.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollectorWithAgg.java index d7f47c894e3..aa041a524dc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollectorWithAgg.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollectorWithAgg.java @@ -25,6 +25,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -50,7 +51,7 @@ public class AppLevelTimelineCollectorWithAgg LoggerFactory.getLogger(TimelineCollector.class); private final static int AGGREGATION_EXECUTOR_NUM_THREADS = 1; - private final static int AGGREGATION_EXECUTOR_EXEC_INTERVAL_SECS = 15; + private int aggregationExecutorIntervalSecs; private static Set entityTypesSkipAggregation = initializeSkipSet(); @@ -71,6 +72,11 @@ public class AppLevelTimelineCollectorWithAgg @Override protected void serviceInit(Configuration conf) throws Exception { + aggregationExecutorIntervalSecs = conf.getInt( + YarnConfiguration.TIMELINE_SERVICE_AGGREGATION_INTERVAL_SECS, + YarnConfiguration. + DEFAULT_TIMELINE_SERVICE_AGGREGATION_INTERVAL_SECS + ); super.serviceInit(conf); } @@ -84,10 +90,8 @@ public class AppLevelTimelineCollectorWithAgg .build()); appAggregator = new AppLevelAggregator(); appAggregationExecutor.scheduleAtFixedRate(appAggregator, - AppLevelTimelineCollectorWithAgg. - AGGREGATION_EXECUTOR_EXEC_INTERVAL_SECS, - AppLevelTimelineCollectorWithAgg. - AGGREGATION_EXECUTOR_EXEC_INTERVAL_SECS, + aggregationExecutorIntervalSecs, + aggregationExecutorIntervalSecs, TimeUnit.SECONDS); super.serviceStart(); } From 10d0e4be6eade7c1685b9c6962bc9b18e33122a8 Mon Sep 17 00:00:00 2001 From: Haibo Chen Date: Tue, 12 Jun 2018 10:11:30 -0700 Subject: [PATCH 095/113] YARN-8325. Miscellaneous QueueManager code clean up. (Szilard Nemeth via Haibo Chen) --- .../scheduler/fair/QueueManager.java | 41 +++++++++++-------- 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java index 632a842a6cc..83717657c1e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java @@ -53,8 +53,8 @@ import java.util.Set; @Private @Unstable public class QueueManager { - public static final Log LOG = LogFactory.getLog( - QueueManager.class.getName()); + private static final Log LOG = + LogFactory.getLog(QueueManager.class.getName()); private final class IncompatibleQueueRemovalTask { @@ -91,8 +91,8 @@ public class QueueManager { private final FairScheduler scheduler; private final Collection leafQueues = - new CopyOnWriteArrayList(); - private final Map queues = new HashMap(); + new CopyOnWriteArrayList<>(); + private final Map queues = new HashMap<>(); private Set incompatibleQueuesPendingRemoval = new HashSet<>(); private FSParentQueue rootQueue; @@ -123,7 +123,8 @@ public class QueueManager { } /** - * Get a leaf queue by name, creating it if the create param is true and is necessary. + * Get a leaf queue by name, creating it if the create param is + * true and is necessary. * If the queue is not or can not be a leaf queue, i.e. it already exists as a * parent queue, or one of the parents in its name is already a leaf queue, * null is returned. @@ -137,7 +138,7 @@ public class QueueManager { return getLeafQueue(name, create, true); } - public FSLeafQueue getLeafQueue( + private FSLeafQueue getLeafQueue( String name, boolean create, boolean recomputeSteadyShares) { @@ -154,7 +155,7 @@ public class QueueManager { } /** - * Remove a leaf queue if empty + * Remove a leaf queue if empty. * @param name name of the queue * @return true if queue was removed or false otherwise */ @@ -166,8 +167,10 @@ public class QueueManager { /** - * Get a parent queue by name, creating it if the create param is true and is necessary. - * If the queue is not or can not be a parent queue, i.e. it already exists as a + * Get a parent queue by name, creating it if the create param is + * true and is necessary. + * If the queue is not or can not be a parent queue, + * i.e. it already exists as a * leaf queue, or one of the parents in its name is already a leaf queue, * null is returned. * @@ -318,7 +321,8 @@ public class QueueManager { SchedulingPolicy childPolicy = scheduler.getAllocationConfiguration(). getSchedulingPolicy(queueName); if (!parent.getPolicy().isChildPolicyAllowed(childPolicy)) { - LOG.error("Can't create queue '" + queueName + "'."); + LOG.error("Can't create queue '" + queueName + "'," + + "the child scheduling policy is not allowed by parent queue!"); return null; } @@ -359,8 +363,8 @@ public class QueueManager { * @param child the child queue * @param queueConf the {@link AllocationConfiguration} */ - void setChildResourceLimits(FSParentQueue parent, FSQueue child, - AllocationConfiguration queueConf) { + private void setChildResourceLimits(FSParentQueue parent, FSQueue child, + AllocationConfiguration queueConf) { Map> configuredQueues = queueConf.getConfiguredQueues(); @@ -396,8 +400,8 @@ public class QueueManager { FSQueueType queueType) { queueToCreate = ensureRootPrefix(queueToCreate); - // Ensure queueToCreate is not root and doesn't have the default queue in its - // ancestry. + // Ensure queueToCreate is not root and doesn't + // have the default queue in its ancestry. if (queueToCreate.equals(ROOT_QUEUE) || queueToCreate.startsWith( ROOT_QUEUE + "." + YarnConfiguration.DEFAULT_QUEUE_NAME + ".")) { @@ -551,7 +555,7 @@ public class QueueManager { } /** - * Get a collection of all leaf queues + * Get a collection of all leaf queues. */ public Collection getLeafQueues() { synchronized (queues) { @@ -560,7 +564,7 @@ public class QueueManager { } /** - * Get a collection of all queues + * Get a collection of all queues. */ public Collection getQueues() { synchronized (queues) { @@ -568,7 +572,7 @@ public class QueueManager { } } - private String ensureRootPrefix(String name) { + private static String ensureRootPrefix(String name) { if (!name.startsWith(ROOT_QUEUE + ".") && !name.equals(ROOT_QUEUE)) { name = ROOT_QUEUE + "." + name; } @@ -576,7 +580,8 @@ public class QueueManager { } public void updateAllocationConfiguration(AllocationConfiguration queueConf) { - // Create leaf queues and the parent queues in a leaf's ancestry if they do not exist + // Create leaf queues and the parent queues in a leaf's + // ancestry if they do not exist synchronized (queues) { // Verify and set scheduling policies for existing queues before creating // any queue, since we need parent policies to determine if we can create From 6e756e8a620e4d6dc3192986679060c52063489b Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Tue, 12 Jun 2018 10:24:34 -0700 Subject: [PATCH 096/113] HADOOP-15529. ContainerLaunch#testInvalidEnvVariableSubstitutionType is not supported in Windows. Contributed by Giovanni Matteo Fumarola. --- .../launcher/TestContainerLaunch.java | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java index da9bc89419b..ebdceead35b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java @@ -1920,7 +1920,11 @@ public class TestContainerLaunch extends BaseContainerManagerTest { public void testInvalidEnvVariableSubstitutionType1() throws IOException { Map env = new HashMap(); // invalid env - env.put("testVar", "version${foo.version}"); + String invalidEnv = "version${foo.version}"; + if (Shell.WINDOWS) { + invalidEnv = "version%foo%<>^&|=:version%"; + } + env.put("testVar", invalidEnv); validateShellExecutorForDifferentEnvs(env); } @@ -1931,7 +1935,11 @@ public class TestContainerLaunch extends BaseContainerManagerTest { public void testInvalidEnvVariableSubstitutionType2() throws IOException { Map env = new HashMap(); // invalid env - env.put("testPath", "/abc:/${foo.path}:/$bar"); + String invalidEnv = "/abc:/${foo.path}:/$bar"; + if (Shell.WINDOWS) { + invalidEnv = "/abc:/%foo%<>^&|=:path%:/%bar%"; + } + env.put("testPath", invalidEnv); validateShellExecutorForDifferentEnvs(env); } From c35481594ffc372e3f846b0c8ebc2ff9e36ffdb0 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Tue, 12 Jun 2018 10:59:50 -0700 Subject: [PATCH 097/113] YARN-8422. TestAMSimulator failing with NPE. Contributed by Giovanni Matteo Fumarola. --- .../hadoop/yarn/sls/appmaster/TestAMSimulator.java | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/appmaster/TestAMSimulator.java b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/appmaster/TestAMSimulator.java index bfc7d0c6c31..bc8ea70e46b 100644 --- a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/appmaster/TestAMSimulator.java +++ b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/appmaster/TestAMSimulator.java @@ -49,8 +49,8 @@ public class TestAMSimulator { private YarnConfiguration conf; private Path metricOutputDir; - private Class slsScheduler; - private Class scheduler; + private Class slsScheduler; + private Class scheduler; @Parameterized.Parameters public static Collection params() { @@ -60,7 +60,7 @@ public class TestAMSimulator { }); } - public TestAMSimulator(Class slsScheduler, Class scheduler) { + public TestAMSimulator(Class slsScheduler, Class scheduler) { this.slsScheduler = slsScheduler; this.scheduler = scheduler; } @@ -115,7 +115,8 @@ public class TestAMSimulator { } private void createMetricOutputDir() { - Path testDir = Paths.get(System.getProperty("test.build.data")); + Path testDir = + Paths.get(System.getProperty("test.build.data", "target/test-dir")); try { metricOutputDir = Files.createTempDirectory(testDir, "output"); } catch (IOException e) { @@ -153,7 +154,9 @@ public class TestAMSimulator { @After public void tearDown() { - rm.stop(); + if (rm != null) { + rm.stop(); + } deleteMetricOutputDir(); } From a16623df2148e59e1b7dcf98939b03cb7440d143 Mon Sep 17 00:00:00 2001 From: Xiao Chen Date: Tue, 12 Jun 2018 11:14:33 -0700 Subject: [PATCH 098/113] HADOOP-15307. NFS: flavor AUTH_SYS should use VerifierNone. Contributed by Gabor Bota. --- .../org/apache/hadoop/oncrpc/security/Verifier.java | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Verifier.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Verifier.java index f3202a1d33c..3c0e5fe36e2 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Verifier.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Verifier.java @@ -41,12 +41,18 @@ public abstract class Verifier extends RpcAuthInfo { public static Verifier readFlavorAndVerifier(XDR xdr) { AuthFlavor flavor = AuthFlavor.fromValue(xdr.readInt()); final Verifier verifer; - if(flavor == AuthFlavor.AUTH_NONE) { + if (flavor == AuthFlavor.AUTH_NONE) { verifer = new VerifierNone(); - } else if(flavor == AuthFlavor.RPCSEC_GSS) { + } else if (flavor == AuthFlavor.AUTH_SYS) { + // Added in HADOOP-15307 based on HDFS-5085: + // When the auth flavor is AUTH_SYS, the corresponding verifier is + // AUTH_NONE. I.e., it is impossible to have a verifier with auth + // flavor AUTH_SYS. + verifer = new VerifierNone(); + } else if (flavor == AuthFlavor.RPCSEC_GSS) { verifer = new VerifierGSS(); } else { - throw new UnsupportedOperationException("Unsupported verifier flavor" + throw new UnsupportedOperationException("Unsupported verifier flavor: " + flavor); } verifer.read(xdr); From 04b74eddc67888142879ed114d21348e8a4aad78 Mon Sep 17 00:00:00 2001 From: Gera Shegalov Date: Tue, 12 Jun 2018 11:21:51 -0700 Subject: [PATCH 099/113] MAPREDUCE-7108. TestFileOutputCommitter fails on Windows. (Zuoming Zhang via gera) --- .../lib/output/TestFileOutputCommitter.java | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java index cd9d44b936d..fc43dce1830 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java @@ -40,6 +40,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RawLocalFileSystem; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.MapFile; import org.apache.hadoop.io.NullWritable; @@ -526,16 +527,15 @@ public class TestFileOutputCommitter { // Ensure getReaders call works and also ignores // hidden filenames (_ or . prefixes) + MapFile.Reader[] readers = {}; try { - MapFileOutputFormat.getReaders(outDir, conf); - } catch (Exception e) { - fail("Fail to read from MapFileOutputFormat: " + e); - e.printStackTrace(); + readers = MapFileOutputFormat.getReaders(outDir, conf); + // validate output + validateMapFileOutputContent(FileSystem.get(job.getConfiguration()), outDir); + } finally { + IOUtils.cleanupWithLogger(null, readers); + FileUtil.fullyDelete(new File(outDir.toString())); } - - // validate output - validateMapFileOutputContent(FileSystem.get(job.getConfiguration()), outDir); - FileUtil.fullyDelete(new File(outDir.toString())); } @Test From aeaf9fec62f10699d1c809d66444520fe4533c2c Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Tue, 12 Jun 2018 14:16:14 -0700 Subject: [PATCH 100/113] HADOOP-15532. TestBasicDiskValidator fails with NoSuchFileException. Contributed by Giovanni Matteo Fumarola. --- .../test/java/org/apache/hadoop/util/TestDiskChecker.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java index 6b6c6c843dd..e92c9edb4fd 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java @@ -137,7 +137,8 @@ public class TestDiskChecker { * @throws java.io.IOException if any */ protected File createTempFile() throws java.io.IOException { - File testDir = new File(System.getProperty("test.build.data")); + File testDir = + new File(System.getProperty("test.build.data", "target/test-dir")); return Files.createTempFile(testDir.toPath(), "test", "tmp").toFile(); } @@ -147,7 +148,8 @@ public class TestDiskChecker { * @throws java.io.IOException if any */ protected File createTempDir() throws java.io.IOException { - File testDir = new File(System.getProperty("test.build.data")); + File testDir = + new File(System.getProperty("test.build.data", "target/test-dir")); return Files.createTempDirectory(testDir.toPath(), "test").toFile(); } From 5670e89b2ec69ab71e32dcd5acbd3a57ca6abea5 Mon Sep 17 00:00:00 2001 From: Arun Suresh Date: Tue, 12 Jun 2018 15:36:52 -0700 Subject: [PATCH 101/113] MAPREDUCE-7101. Add config parameter to allow JHS to alway scan user dir irrespective of modTime. (Thomas Marquardt via asuresh) --- .../hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java | 9 +++++++-- .../src/main/resources/mapred-default.xml | 9 +++++++++ .../hadoop/mapreduce/v2/hs/HistoryFileManager.java | 8 +++++++- 3 files changed, 23 insertions(+), 3 deletions(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java index 1cadf84bf67..9e964e1381f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java @@ -61,8 +61,13 @@ public class JHAdminConfig { MR_HISTORY_PREFIX + "cleaner.interval-ms"; public static final long DEFAULT_MR_HISTORY_CLEANER_INTERVAL_MS = 1 * 24 * 60 * 60 * 1000l; //1 day - - + + /** Always scan user dir, irrespective of dir modification time.*/ + public static final String MR_HISTORY_ALWAYS_SCAN_USER_DIR = + MR_HISTORY_PREFIX + "always-scan-user-dir"; + public static final boolean DEFAULT_MR_HISTORY_ALWAYS_SCAN_USER_DIR = + false; + /** The number of threads to handle client API requests.*/ public static final String MR_HISTORY_CLIENT_THREAD_COUNT = MR_HISTORY_PREFIX + "client.thread-count"; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml index dcb312ce3b0..9f33d6553c7 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml @@ -1774,6 +1774,15 @@ + + mapreduce.jobhistory.always-scan-user-dir + false + Some Cloud FileSystems do not currently update the + modification time of directories. To support these filesystems, this + configuration value should be set to 'true'. + + + mapreduce.jobhistory.done-dir ${yarn.app.mapreduce.am.staging-dir}/history/done diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java index a07ca26c1cd..7fe99a28b9e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java @@ -324,7 +324,13 @@ public class HistoryFileManager extends AbstractService { // so we need to have additional check. // Note: modTime (X second Y millisecond) could be casted to X second or // X+1 second. - if (modTime != newModTime + // MAPREDUCE-7101: Some Cloud FileSystems do not currently update the + // modification time of directories. For these, we scan every time if + // the 'alwaysScan' is true. + boolean alwaysScan = conf.getBoolean( + JHAdminConfig.MR_HISTORY_ALWAYS_SCAN_USER_DIR, + JHAdminConfig.DEFAULT_MR_HISTORY_ALWAYS_SCAN_USER_DIR); + if (alwaysScan || modTime != newModTime || (scanTime/1000) == (modTime/1000) || (scanTime/1000 + 1) == (modTime/1000)) { // reset scanTime before scanning happens From 108da85320d65e37fe835de65866b818e5420587 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Tue, 12 Jun 2018 20:40:32 -0400 Subject: [PATCH 102/113] HADOOP-15527. Improve delay check for stopping processes. Contributed by Vinod Kumar Vavilapalli --- .../src/main/bin/hadoop-functions.sh | 34 ++++++++++++++++++- .../src/test/scripts/hadoop_stop_daemon.bats | 24 ++++++++++++- 2 files changed, 56 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh index bee14305230..cbedd972188 100755 --- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh +++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh @@ -2040,6 +2040,35 @@ function hadoop_start_secure_daemon_wrapper return 0 } +## @description Wait till process dies or till timeout +## @audience private +## @stability evolving +## @param pid +## @param timeout +function wait_process_to_die_or_timeout +{ + local pid=$1 + local timeout=$2 + + # Normalize timeout + # Round up or down + timeout=$(printf "%.0f\n" "${timeout}") + if [[ ${timeout} -lt 1 ]]; then + # minimum 1 second + timeout=1 + fi + + # Wait to see if it's still alive + for (( i=0; i < "${timeout}"; i++ )) + do + if kill -0 "${pid}" > /dev/null 2>&1; then + sleep 1 + else + break + fi + done +} + ## @description Stop the non-privileged `command` daemon with that ## @description that is running at `pidfile`. ## @audience public @@ -2060,11 +2089,14 @@ function hadoop_stop_daemon pid=$(cat "$pidfile") kill "${pid}" >/dev/null 2>&1 - sleep "${HADOOP_STOP_TIMEOUT}" + + wait_process_to_die_or_timeout "${pid}" "${HADOOP_STOP_TIMEOUT}" + if kill -0 "${pid}" > /dev/null 2>&1; then hadoop_error "WARNING: ${cmd} did not stop gracefully after ${HADOOP_STOP_TIMEOUT} seconds: Trying to kill with kill -9" kill -9 "${pid}" >/dev/null 2>&1 fi + wait_process_to_die_or_timeout "${pid}" "${HADOOP_STOP_TIMEOUT}" if ps -p "${pid}" > /dev/null 2>&1; then hadoop_error "ERROR: Unable to kill ${pid}" else diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_stop_daemon.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_stop_daemon.bats index 023d01c02c7..148380706db 100644 --- a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_stop_daemon.bats +++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_stop_daemon.bats @@ -15,7 +15,7 @@ load hadoop-functions_test_helper -@test "hadoop_stop_daemon" { +@test "hadoop_stop_daemon_changing_pid" { old_pid=12345 new_pid=54321 HADOOP_STOP_TIMEOUT=3 @@ -29,3 +29,25 @@ load hadoop-functions_test_helper [ -f pidfile ] [ "$(cat pidfile)" = "${new_pid}" ] } + +@test "hadoop_stop_daemon_force_kill" { + + HADOOP_STOP_TIMEOUT=4 + + # Run the following in a sub-shell so that its termination doesn't affect the test + (sh ${TESTBINDIR}/process_with_sigterm_trap.sh ${TMP}/pidfile &) + + # Wait for the process to go into tight loop + sleep 1 + + [ -f ${TMP}/pidfile ] + pid=$(cat "${TMP}/pidfile") + + run hadoop_stop_daemon my_command ${TMP}/pidfile 2>&1 + + # The process should no longer be alive + ! kill -0 ${pid} > /dev/null 2>&1 + + # The PID file should be gone + [ ! -f ${TMP}/pidfile ] +} From 29024a62038c297f11e8992601f2522ffffc7da7 Mon Sep 17 00:00:00 2001 From: Weiwei Yang Date: Wed, 13 Jun 2018 09:28:05 +0800 Subject: [PATCH 103/113] YARN-8394. Improve data locality documentation for Capacity Scheduler. Contributed by Weiwei Yang. --- .../conf/capacity-scheduler.xml | 2 ++ .../hadoop-yarn-site/src/site/markdown/CapacityScheduler.md | 5 +++++ 2 files changed, 7 insertions(+) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/conf/capacity-scheduler.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/conf/capacity-scheduler.xml index aca6c7cf529..62654cacc48 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/conf/capacity-scheduler.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/conf/capacity-scheduler.xml @@ -149,6 +149,8 @@ attempts to schedule rack-local containers. When setting this parameter, the size of the cluster should be taken into account. We use 40 as the default value, which is approximately the number of nodes in one rack. + Note, if this value is -1, the locality constraint in the container request + will be ignored, which disables the delay scheduling. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md index ef6381a43a1..5be32d42ea3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md @@ -400,9 +400,14 @@ list of current scheduling edit policies as a comma separated string in `yarn.re * Data Locality +Capacity Scheduler leverages `Delay Scheduling` to honor task locality constraints. There are 3 levels of locality constraint: node-local, rack-local and off-switch. The scheduler counts the number of missed opportunities when the locality cannot be satisfied, and waits this count to reach a threshold before relaxing the locality constraint to next level. The threshold can be configured in following properties: + | Property | Description | |:---- |:---- | | `yarn.scheduler.capacity.node-locality-delay` | Number of missed scheduling opportunities after which the CapacityScheduler attempts to schedule rack-local containers. Typically, this should be set to number of nodes in the cluster. By default is setting approximately number of nodes in one rack which is 40. Positive integer value is expected. | +| `yarn.scheduler.capacity.rack-locality-additional-delay` | Number of additional missed scheduling opportunities over the node-locality-delay ones, after which the CapacityScheduler attempts to schedule off-switch containers. By default this value is set to -1, in this case, the number of missed opportunities for assigning off-switch containers is calculated based on the formula `L * C / N`, where `L` is number of locations (nodes or racks) specified in the resource request, `C` is the number of requested containers, and `N` is the size of the cluster. | + +Note, this feature should be disabled if YARN is deployed separately with the file system, as locality is meaningless. This can be done by setting `yarn.scheduler.capacity.node-locality-delay` to `-1`, in this case, request's locality constraint is ignored. * Container Allocation per NodeManager Heartbeat From f4c7c91123b1dbb12bcc0070479999963b04ad46 Mon Sep 17 00:00:00 2001 From: Jitendra Pandey Date: Wed, 13 Jun 2018 00:36:02 -0700 Subject: [PATCH 104/113] HADOOP-15483. Upgrade jquery to version 3.3.1. Contributed by Lokesh Jain, Mukul Kumar Singh and Sunil Govindan. --- LICENSE.txt | 4 +- .../src/main/webapps/scm/index.html | 6 +- .../main/webapps/router/federationhealth.html | 6 +- hadoop-hdfs-project/hadoop-hdfs/pom.xml | 4 +- .../src/main/webapps/datanode/datanode.html | 6 +- .../src/main/webapps/hdfs/dfshealth.html | 6 +- .../src/main/webapps/hdfs/dfshealth.js | 8 +- .../src/main/webapps/hdfs/explorer.html | 10 +- .../src/main/webapps/hdfs/explorer.js | 34 +- .../src/main/webapps/journal/index.html | 6 +- .../src/main/webapps/secondary/status.html | 6 +- .../bootstrap-3.0.2/css/bootstrap.min.css | 9 - .../fonts/glyphicons-halflings-regular.eot | Bin 20290 -> 0 bytes .../fonts/glyphicons-halflings-regular.svg | 229 - .../fonts/glyphicons-halflings-regular.ttf | Bin 41236 -> 0 bytes .../fonts/glyphicons-halflings-regular.woff | Bin 23292 -> 0 bytes .../static/bootstrap-3.0.2/img/clear.png | Bin 509 -> 0 bytes .../static/bootstrap-3.0.2/img/loading.gif | Bin 1849 -> 0 bytes .../bootstrap-3.0.2/js/bootstrap.min.js | 9 - .../css/bootstrap-editable.css | 0 .../bootstrap-3.3.7/css/bootstrap-theme.css | 587 + .../css/bootstrap-theme.css.map | 1 + .../css/bootstrap-theme.min.css | 6 + .../css/bootstrap-theme.min.css.map | 1 + .../static/bootstrap-3.3.7/css/bootstrap.css | 6757 ++++++++ .../bootstrap-3.3.7/css/bootstrap.css.map | 1 + .../bootstrap-3.3.7/css/bootstrap.min.css | 6 + .../bootstrap-3.3.7/css/bootstrap.min.css.map | 1 + .../fonts/glyphicons-halflings-regular.eot | Bin 0 -> 20127 bytes .../fonts/glyphicons-halflings-regular.svg | 288 + .../fonts/glyphicons-halflings-regular.ttf | Bin 0 -> 45404 bytes .../fonts/glyphicons-halflings-regular.woff | Bin 0 -> 23424 bytes .../fonts/glyphicons-halflings-regular.woff2 | Bin 0 -> 18028 bytes .../js/bootstrap-editable.min.js | 0 .../static/bootstrap-3.3.7/js/bootstrap.js | 2377 +++ .../bootstrap-3.3.7/js/bootstrap.min.js | 7 + .../webapps/static/bootstrap-3.3.7/js/npm.js | 13 + .../src/main/webapps/static/dfs-dust.js | 2 +- .../main/webapps/static/jquery-1.10.2.min.js | 6 - .../main/webapps/static/jquery-3.3.1.min.js | 2 + .../src/main/webapps/static/moment.min.js | 6 +- .../robotframework/acceptance/ozone.robot | 4 +- .../src/main/webapps/ksm/index.html | 6 +- hadoop-ozone/pom.xml | 4 +- .../src/main/html/js/thirdparty/jquery.js | 14075 ++++++++-------- .../hadoop-yarn/hadoop-yarn-common/pom.xml | 2 +- .../hadoop/yarn/webapp/view/JQueryUI.java | 2 +- .../webapps/static/jquery/jquery-1.8.2.min.js | 2 - .../webapps/static/jquery/jquery-3.3.1.min.js | 2 + .../webapps/static/jt/jquery.jstree.js | 42 +- 50 files changed, 17455 insertions(+), 7088 deletions(-) delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/css/bootstrap.min.css delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/fonts/glyphicons-halflings-regular.eot delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/fonts/glyphicons-halflings-regular.svg delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/fonts/glyphicons-halflings-regular.ttf delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/fonts/glyphicons-halflings-regular.woff delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/img/clear.png delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/img/loading.gif delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/js/bootstrap.min.js rename hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/{bootstrap-3.0.2 => bootstrap-3.3.7}/css/bootstrap-editable.css (100%) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap-theme.css create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap-theme.css.map create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap-theme.min.css create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap-theme.min.css.map create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap.css create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap.css.map create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap.min.css create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/css/bootstrap.min.css.map create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/fonts/glyphicons-halflings-regular.eot create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/fonts/glyphicons-halflings-regular.svg create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/fonts/glyphicons-halflings-regular.ttf create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/fonts/glyphicons-halflings-regular.woff create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/fonts/glyphicons-halflings-regular.woff2 rename hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/{bootstrap-3.0.2 => bootstrap-3.3.7}/js/bootstrap-editable.min.js (100%) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/js/bootstrap.js create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/js/bootstrap.min.js create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7/js/npm.js delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery-1.10.2.min.js create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery-3.3.1.min.js delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-1.8.2.min.js create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-3.3.1.min.js diff --git a/LICENSE.txt b/LICENSE.txt index 75c55620643..f8de86a1053 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -617,7 +617,7 @@ OTHER DEALINGS IN THE SOFTWARE. The binary distribution of this product bundles these dependencies under the following license: -hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2 +hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7 hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/bootstrap.min.js hadoop-tools/hadoop-sls/src/main/html/css/bootstrap.min.css hadoop-tools/hadoop-sls/src/main/html/css/bootstrap-responsive.min.css @@ -761,7 +761,7 @@ THE SOFTWARE. For: -hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery-1.10.2.min.js +hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery-3.3.1.min.js hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery Apache HBase - Server which contains JQuery minified javascript library version 1.8.3 diff --git a/hadoop-hdds/server-scm/src/main/webapps/scm/index.html b/hadoop-hdds/server-scm/src/main/webapps/scm/index.html index 3407f51be73..2c943b626ff 100644 --- a/hadoop-hdds/server-scm/src/main/webapps/scm/index.html +++ b/hadoop-hdds/server-scm/src/main/webapps/scm/index.html @@ -26,7 +26,7 @@ HDFS Storage Container Manager - + @@ -63,7 +63,7 @@

- + @@ -71,6 +71,6 @@ - + diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html index 37fcb928121..7359087d63c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html @@ -19,7 +19,7 @@ - + Router Information @@ -425,9 +425,9 @@ - + - + diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index f8b17223e3e..eaf9361e9f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -392,11 +392,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> src/main/webapps/secondary/robots.txt src/contrib/** src/site/resources/images/* - src/main/webapps/static/bootstrap-3.0.2/** + src/main/webapps/static/bootstrap-3.3.7/** src/main/webapps/static/moment.min.js src/main/webapps/static/dust-full-2.0.0.min.js src/main/webapps/static/dust-helpers-1.1.1.min.js - src/main/webapps/static/jquery-1.10.2.min.js + src/main/webapps/static/jquery-3.3.1.min.js src/main/webapps/static/jquery.dataTables.min.js src/main/webapps/static/json-bignum.js src/main/webapps/static/dataTables.bootstrap.css diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html index ed8f8fc9b4a..1d66f5591f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html @@ -19,7 +19,7 @@ - + DataNode Information @@ -167,8 +167,8 @@ {/dn.VolumeInfo} - - + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html index a9284252156..4495b99b1fa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html @@ -19,7 +19,7 @@ - + Namenode information @@ -467,9 +467,9 @@ There are no reported volume failures. - - - + @@ -65,6 +65,6 @@ - + diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index 5d57e10de78..cffef14e6e5 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -143,8 +143,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> src/main/webapps/router/robots.txt src/contrib/** src/site/resources/images/* - webapps/static/bootstrap-3.0.2/** - webapps/static/jquery-1.10.2.min.js + webapps/static/bootstrap-3.3.7/** + webapps/static/jquery-3.3.1.min.js webapps/static/jquery.dataTables.min.js webapps/static/nvd3-1.8.5.min.css.map webapps/static/nvd3-1.8.5.min.js diff --git a/hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js b/hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js index 38f046c739e..9b5206bcc60 100644 --- a/hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js +++ b/hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js @@ -1,247 +1,173 @@ /*! - * jQuery JavaScript Library v1.10.2 - * http://jquery.com/ + * jQuery JavaScript Library v3.3.1 + * https://jquery.com/ * * Includes Sizzle.js - * http://sizzlejs.com/ + * https://sizzlejs.com/ * - * Copyright 2005, 2013 jQuery Foundation, Inc. and other contributors + * Copyright JS Foundation and other contributors * Released under the MIT license - * http://jquery.org/license + * https://jquery.org/license * - * Date: 2013-07-03T13:48Z + * Date: 2018-01-20T17:24Z */ -(function( window, undefined ) { +( function( global, factory ) { + + "use strict"; + + if ( typeof module === "object" && typeof module.exports === "object" ) { + + // For CommonJS and CommonJS-like environments where a proper `window` + // is present, execute the factory and get jQuery. + // For environments that do not have a `window` with a `document` + // (such as Node.js), expose a factory as module.exports. + // This accentuates the need for the creation of a real `window`. + // e.g. var jQuery = require("jquery")(window); + // See ticket #14549 for more info. + module.exports = global.document ? + factory( global, true ) : + function( w ) { + if ( !w.document ) { + throw new Error( "jQuery requires a window with a document" ); + } + return factory( w ); + }; + } else { + factory( global ); + } + +// Pass this if window is not defined yet +} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { + +// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 +// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode +// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common +// enough that all such attempts are guarded in a try block. +"use strict"; + +var arr = []; + +var document = window.document; + +var getProto = Object.getPrototypeOf; + +var slice = arr.slice; + +var concat = arr.concat; + +var push = arr.push; + +var indexOf = arr.indexOf; + +var class2type = {}; + +var toString = class2type.toString; + +var hasOwn = class2type.hasOwnProperty; + +var fnToString = hasOwn.toString; + +var ObjectFunctionString = fnToString.call( Object ); + +var support = {}; + +var isFunction = function isFunction( obj ) { + + // Support: Chrome <=57, Firefox <=52 + // In some browsers, typeof returns "function" for HTML elements + // (i.e., `typeof document.createElement( "object" ) === "function"`). + // We don't want to classify *any* DOM node as a function. + return typeof obj === "function" && typeof obj.nodeType !== "number"; + }; + + +var isWindow = function isWindow( obj ) { + return obj != null && obj === obj.window; + }; + + + + + var preservedScriptAttributes = { + type: true, + src: true, + noModule: true + }; + + function DOMEval( code, doc, node ) { + doc = doc || document; + + var i, + script = doc.createElement( "script" ); + + script.text = code; + if ( node ) { + for ( i in preservedScriptAttributes ) { + if ( node[ i ] ) { + script[ i ] = node[ i ]; + } + } + } + doc.head.appendChild( script ).parentNode.removeChild( script ); + } + + +function toType( obj ) { + if ( obj == null ) { + return obj + ""; + } + + // Support: Android <=2.3 only (functionish RegExp) + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call( obj ) ] || "object" : + typeof obj; +} +/* global Symbol */ +// Defining this global in .eslintrc.json would create a danger of using the global +// unguarded in another place, it seems safer to define global only for this module + + -// Can't do this because several apps including ASP.NET trace -// the stack via arguments.caller.callee and Firefox dies if -// you try to trace through "use strict" call chains. (#13335) -// Support: Firefox 18+ -//"use strict"; var - // The deferred used on DOM ready - readyList, - - // A central reference to the root jQuery(document) - rootjQuery, - - // Support: IE<10 - // For `typeof xmlNode.method` instead of `xmlNode.method !== undefined` - core_strundefined = typeof undefined, - - // Use the correct document accordingly with window argument (sandbox) - location = window.location, - document = window.document, - docElem = document.documentElement, - - // Map over jQuery in case of overwrite - _jQuery = window.jQuery, - - // Map over the $ in case of overwrite - _$ = window.$, - - // [[Class]] -> type pairs - class2type = {}, - - // List of deleted data cache ids, so we can reuse them - core_deletedIds = [], - - core_version = "1.10.2", - - // Save a reference to some core methods - core_concat = core_deletedIds.concat, - core_push = core_deletedIds.push, - core_slice = core_deletedIds.slice, - core_indexOf = core_deletedIds.indexOf, - core_toString = class2type.toString, - core_hasOwn = class2type.hasOwnProperty, - core_trim = core_version.trim, + version = "3.3.1", // Define a local copy of jQuery jQuery = function( selector, context ) { + // The jQuery object is actually just the init constructor 'enhanced' - return new jQuery.fn.init( selector, context, rootjQuery ); + // Need init if jQuery is called (just allow error to be thrown if not included) + return new jQuery.fn.init( selector, context ); }, - // Used for matching numbers - core_pnum = /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source, - - // Used for splitting on whitespace - core_rnotwhite = /\S+/g, - - // Make sure we trim BOM and NBSP (here's looking at you, Safari 5.0 and IE) - rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, - - // A simple way to check for HTML strings - // Prioritize #id over to avoid XSS via location.hash (#9521) - // Strict HTML recognition (#11290: must start with <) - rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/, - - // Match a standalone tag - rsingleTag = /^<(\w+)\s*\/?>(?:<\/\1>|)$/, - - // JSON RegExp - rvalidchars = /^[\],:{}\s]*$/, - rvalidbraces = /(?:^|:|,)(?:\s*\[)+/g, - rvalidescape = /\\(?:["\\\/bfnrt]|u[\da-fA-F]{4})/g, - rvalidtokens = /"[^"\\\r\n]*"|true|false|null|-?(?:\d+\.|)\d+(?:[eE][+-]?\d+|)/g, - - // Matches dashed string for camelizing - rmsPrefix = /^-ms-/, - rdashAlpha = /-([\da-z])/gi, - - // Used by jQuery.camelCase as callback to replace() - fcamelCase = function( all, letter ) { - return letter.toUpperCase(); - }, - - // The ready event handler - completed = function( event ) { - - // readyState === "complete" is good enough for us to call the dom ready in oldIE - if ( document.addEventListener || event.type === "load" || document.readyState === "complete" ) { - detach(); - jQuery.ready(); - } - }, - // Clean-up method for dom ready events - detach = function() { - if ( document.addEventListener ) { - document.removeEventListener( "DOMContentLoaded", completed, false ); - window.removeEventListener( "load", completed, false ); - - } else { - document.detachEvent( "onreadystatechange", completed ); - window.detachEvent( "onload", completed ); - } - }; + // Support: Android <=4.0 only + // Make sure we trim BOM and NBSP + rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g; jQuery.fn = jQuery.prototype = { + // The current version of jQuery being used - jquery: core_version, + jquery: version, constructor: jQuery, - init: function( selector, context, rootjQuery ) { - var match, elem; - - // HANDLE: $(""), $(null), $(undefined), $(false) - if ( !selector ) { - return this; - } - - // Handle HTML strings - if ( typeof selector === "string" ) { - if ( selector.charAt(0) === "<" && selector.charAt( selector.length - 1 ) === ">" && selector.length >= 3 ) { - // Assume that strings that start and end with <> are HTML and skip the regex check - match = [ null, selector, null ]; - - } else { - match = rquickExpr.exec( selector ); - } - - // Match html or make sure no context is specified for #id - if ( match && (match[1] || !context) ) { - - // HANDLE: $(html) -> $(array) - if ( match[1] ) { - context = context instanceof jQuery ? context[0] : context; - - // scripts is true for back-compat - jQuery.merge( this, jQuery.parseHTML( - match[1], - context && context.nodeType ? context.ownerDocument || context : document, - true - ) ); - - // HANDLE: $(html, props) - if ( rsingleTag.test( match[1] ) && jQuery.isPlainObject( context ) ) { - for ( match in context ) { - // Properties of context are called as methods if possible - if ( jQuery.isFunction( this[ match ] ) ) { - this[ match ]( context[ match ] ); - - // ...and otherwise set as attributes - } else { - this.attr( match, context[ match ] ); - } - } - } - - return this; - - // HANDLE: $(#id) - } else { - elem = document.getElementById( match[2] ); - - // Check parentNode to catch when Blackberry 4.6 returns - // nodes that are no longer in the document #6963 - if ( elem && elem.parentNode ) { - // Handle the case where IE and Opera return items - // by name instead of ID - if ( elem.id !== match[2] ) { - return rootjQuery.find( selector ); - } - - // Otherwise, we inject the element directly into the jQuery object - this.length = 1; - this[0] = elem; - } - - this.context = document; - this.selector = selector; - return this; - } - - // HANDLE: $(expr, $(...)) - } else if ( !context || context.jquery ) { - return ( context || rootjQuery ).find( selector ); - - // HANDLE: $(expr, context) - // (which is just equivalent to: $(context).find(expr) - } else { - return this.constructor( context ).find( selector ); - } - - // HANDLE: $(DOMElement) - } else if ( selector.nodeType ) { - this.context = this[0] = selector; - this.length = 1; - return this; - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( jQuery.isFunction( selector ) ) { - return rootjQuery.ready( selector ); - } - - if ( selector.selector !== undefined ) { - this.selector = selector.selector; - this.context = selector.context; - } - - return jQuery.makeArray( selector, this ); - }, - - // Start with an empty selector - selector: "", // The default length of a jQuery object is 0 length: 0, toArray: function() { - return core_slice.call( this ); + return slice.call( this ); }, // Get the Nth element in the matched element set OR // Get the whole matched element set as a clean array get: function( num ) { - return num == null ? - // Return a 'clean' array - this.toArray() : + // Return all the elements in a clean array + if ( num == null ) { + return slice.call( this ); + } - // Return just the object - ( num < 0 ? this[ this.length + num ] : this[ num ] ); + // Return just the one element from the set + return num < 0 ? this[ num + this.length ] : this[ num ]; }, // Take an array of elements and push it onto the stack @@ -253,28 +179,24 @@ jQuery.fn = jQuery.prototype = { // Add the old object onto the stack (as a reference) ret.prevObject = this; - ret.context = this.context; // Return the newly-formed element set return ret; }, // Execute a callback for every element in the matched set. - // (You can seed the arguments with an array of args, but this is - // only used internally.) - each: function( callback, args ) { - return jQuery.each( this, callback, args ); + each: function( callback ) { + return jQuery.each( this, callback ); }, - ready: function( fn ) { - // Add the callback - jQuery.ready.promise().done( fn ); - - return this; + map: function( callback ) { + return this.pushStack( jQuery.map( this, function( elem, i ) { + return callback.call( elem, i, elem ); + } ) ); }, slice: function() { - return this.pushStack( core_slice.apply( this, arguments ) ); + return this.pushStack( slice.apply( this, arguments ) ); }, first: function() { @@ -288,32 +210,23 @@ jQuery.fn = jQuery.prototype = { eq: function( i ) { var len = this.length, j = +i + ( i < 0 ? len : 0 ); - return this.pushStack( j >= 0 && j < len ? [ this[j] ] : [] ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map(this, function( elem, i ) { - return callback.call( elem, i, elem ); - })); + return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); }, end: function() { - return this.prevObject || this.constructor(null); + return this.prevObject || this.constructor(); }, // For internal use only. // Behaves like an Array's method, not like a jQuery method. - push: core_push, - sort: [].sort, - splice: [].splice + push: push, + sort: arr.sort, + splice: arr.splice }; -// Give the init function the jQuery prototype for later instantiation -jQuery.fn.init.prototype = jQuery.fn; - jQuery.extend = jQuery.fn.extend = function() { - var src, copyIsArray, copy, name, options, clone, - target = arguments[0] || {}, + var options, name, src, copy, copyIsArray, clone, + target = arguments[ 0 ] || {}, i = 1, length = arguments.length, deep = false; @@ -321,25 +234,28 @@ jQuery.extend = jQuery.fn.extend = function() { // Handle a deep copy situation if ( typeof target === "boolean" ) { deep = target; - target = arguments[1] || {}; - // skip the boolean and the target - i = 2; + + // Skip the boolean and the target + target = arguments[ i ] || {}; + i++; } // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !jQuery.isFunction(target) ) { + if ( typeof target !== "object" && !isFunction( target ) ) { target = {}; } - // extend jQuery itself if only one argument is passed - if ( length === i ) { + // Extend jQuery itself if only one argument is passed + if ( i === length ) { target = this; - --i; + i--; } for ( ; i < length; i++ ) { + // Only deal with non-null/undefined values - if ( (options = arguments[ i ]) != null ) { + if ( ( options = arguments[ i ] ) != null ) { + // Extend the base object for ( name in options ) { src = target[ name ]; @@ -351,13 +267,15 @@ jQuery.extend = jQuery.fn.extend = function() { } // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject(copy) || (copyIsArray = jQuery.isArray(copy)) ) ) { + if ( deep && copy && ( jQuery.isPlainObject( copy ) || + ( copyIsArray = Array.isArray( copy ) ) ) ) { + if ( copyIsArray ) { copyIsArray = false; - clone = src && jQuery.isArray(src) ? src : []; + clone = src && Array.isArray( src ) ? src : []; } else { - clone = src && jQuery.isPlainObject(src) ? src : {}; + clone = src && jQuery.isPlainObject( src ) ? src : {}; } // Never move original objects, clone them @@ -375,298 +293,72 @@ jQuery.extend = jQuery.fn.extend = function() { return target; }; -jQuery.extend({ +jQuery.extend( { + // Unique for each copy of jQuery on the page - // Non-digits removed to match rinlinejQuery - expando: "jQuery" + ( core_version + Math.random() ).replace( /\D/g, "" ), + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), - noConflict: function( deep ) { - if ( window.$ === jQuery ) { - window.$ = _$; - } + // Assume jQuery is ready without the ready module + isReady: true, - if ( deep && window.jQuery === jQuery ) { - window.jQuery = _jQuery; - } - - return jQuery; + error: function( msg ) { + throw new Error( msg ); }, - // Is the DOM ready to be used? Set to true once it occurs. - isReady: false, - - // A counter to track how many items to wait for before - // the ready event fires. See #6781 - readyWait: 1, - - // Hold (or release) the ready event - holdReady: function( hold ) { - if ( hold ) { - jQuery.readyWait++; - } else { - jQuery.ready( true ); - } - }, - - // Handle when the DOM is ready - ready: function( wait ) { - - // Abort if there are pending holds or we're already ready - if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { - return; - } - - // Make sure body exists, at least, in case IE gets a little overzealous (ticket #5443). - if ( !document.body ) { - return setTimeout( jQuery.ready ); - } - - // Remember that the DOM is ready - jQuery.isReady = true; - - // If a normal DOM Ready event fired, decrement, and wait if need be - if ( wait !== true && --jQuery.readyWait > 0 ) { - return; - } - - // If there are functions bound, to execute - readyList.resolveWith( document, [ jQuery ] ); - - // Trigger any bound ready events - if ( jQuery.fn.trigger ) { - jQuery( document ).trigger("ready").off("ready"); - } - }, - - // See test/unit/core.js for details concerning isFunction. - // Since version 1.3, DOM methods and functions like alert - // aren't supported. They return false on IE (#2968). - isFunction: function( obj ) { - return jQuery.type(obj) === "function"; - }, - - isArray: Array.isArray || function( obj ) { - return jQuery.type(obj) === "array"; - }, - - isWindow: function( obj ) { - /* jshint eqeqeq: false */ - return obj != null && obj == obj.window; - }, - - isNumeric: function( obj ) { - return !isNaN( parseFloat(obj) ) && isFinite( obj ); - }, - - type: function( obj ) { - if ( obj == null ) { - return String( obj ); - } - return typeof obj === "object" || typeof obj === "function" ? - class2type[ core_toString.call(obj) ] || "object" : - typeof obj; - }, + noop: function() {}, isPlainObject: function( obj ) { - var key; + var proto, Ctor; - // Must be an Object. - // Because of IE, we also have to check the presence of the constructor property. - // Make sure that DOM nodes and window objects don't pass through, as well - if ( !obj || jQuery.type(obj) !== "object" || obj.nodeType || jQuery.isWindow( obj ) ) { + // Detect obvious negatives + // Use toString instead of jQuery.type to catch host objects + if ( !obj || toString.call( obj ) !== "[object Object]" ) { return false; } - try { - // Not own constructor property must be Object - if ( obj.constructor && - !core_hasOwn.call(obj, "constructor") && - !core_hasOwn.call(obj.constructor.prototype, "isPrototypeOf") ) { - return false; - } - } catch ( e ) { - // IE8,9 Will throw exceptions on certain host objects #9897 - return false; + proto = getProto( obj ); + + // Objects with no prototype (e.g., `Object.create( null )`) are plain + if ( !proto ) { + return true; } - // Support: IE<9 - // Handle iteration over inherited properties before own properties. - if ( jQuery.support.ownLast ) { - for ( key in obj ) { - return core_hasOwn.call( obj, key ); - } - } - - // Own properties are enumerated firstly, so to speed up, - // if last one is own, then all properties are own. - for ( key in obj ) {} - - return key === undefined || core_hasOwn.call( obj, key ); + // Objects with prototype are plain iff they were constructed by a global Object function + Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; + return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; }, isEmptyObject: function( obj ) { + + /* eslint-disable no-unused-vars */ + // See https://github.com/eslint/eslint/issues/6125 var name; + for ( name in obj ) { return false; } return true; }, - error: function( msg ) { - throw new Error( msg ); - }, - - // data: string of html - // context (optional): If specified, the fragment will be created in this context, defaults to document - // keepScripts (optional): If true, will include scripts passed in the html string - parseHTML: function( data, context, keepScripts ) { - if ( !data || typeof data !== "string" ) { - return null; - } - if ( typeof context === "boolean" ) { - keepScripts = context; - context = false; - } - context = context || document; - - var parsed = rsingleTag.exec( data ), - scripts = !keepScripts && []; - - // Single tag - if ( parsed ) { - return [ context.createElement( parsed[1] ) ]; - } - - parsed = jQuery.buildFragment( [ data ], context, scripts ); - if ( scripts ) { - jQuery( scripts ).remove(); - } - return jQuery.merge( [], parsed.childNodes ); - }, - - parseJSON: function( data ) { - // Attempt to parse using the native JSON parser first - if ( window.JSON && window.JSON.parse ) { - return window.JSON.parse( data ); - } - - if ( data === null ) { - return data; - } - - if ( typeof data === "string" ) { - - // Make sure leading/trailing whitespace is removed (IE can't handle it) - data = jQuery.trim( data ); - - if ( data ) { - // Make sure the incoming data is actual JSON - // Logic borrowed from http://json.org/json2.js - if ( rvalidchars.test( data.replace( rvalidescape, "@" ) - .replace( rvalidtokens, "]" ) - .replace( rvalidbraces, "")) ) { - - return ( new Function( "return " + data ) )(); - } - } - } - - jQuery.error( "Invalid JSON: " + data ); - }, - - // Cross-browser xml parsing - parseXML: function( data ) { - var xml, tmp; - if ( !data || typeof data !== "string" ) { - return null; - } - try { - if ( window.DOMParser ) { // Standard - tmp = new DOMParser(); - xml = tmp.parseFromString( data , "text/xml" ); - } else { // IE - xml = new ActiveXObject( "Microsoft.XMLDOM" ); - xml.async = "false"; - xml.loadXML( data ); - } - } catch( e ) { - xml = undefined; - } - if ( !xml || !xml.documentElement || xml.getElementsByTagName( "parsererror" ).length ) { - jQuery.error( "Invalid XML: " + data ); - } - return xml; - }, - - noop: function() {}, - // Evaluates a script in a global context - // Workarounds based on findings by Jim Driscoll - // http://weblogs.java.net/blog/driscoll/archive/2009/09/08/eval-javascript-global-context - globalEval: function( data ) { - if ( data && jQuery.trim( data ) ) { - // We use execScript on Internet Explorer - // We use an anonymous function so that context is window - // rather than jQuery in Firefox - ( window.execScript || function( data ) { - window[ "eval" ].call( window, data ); - } )( data ); - } + globalEval: function( code ) { + DOMEval( code ); }, - // Convert dashed to camelCase; used by the css and data modules - // Microsoft forgot to hump their vendor prefix (#9572) - camelCase: function( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); - }, + each: function( obj, callback ) { + var length, i = 0; - nodeName: function( elem, name ) { - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - }, - - // args is for internal usage only - each: function( obj, callback, args ) { - var value, - i = 0, - length = obj.length, - isArray = isArraylike( obj ); - - if ( args ) { - if ( isArray ) { - for ( ; i < length; i++ ) { - value = callback.apply( obj[ i ], args ); - - if ( value === false ) { - break; - } - } - } else { - for ( i in obj ) { - value = callback.apply( obj[ i ], args ); - - if ( value === false ) { - break; - } + if ( isArrayLike( obj ) ) { + length = obj.length; + for ( ; i < length; i++ ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; } } - - // A special, fast, case for the most common use of each } else { - if ( isArray ) { - for ( ; i < length; i++ ) { - value = callback.call( obj[ i ], i, obj[ i ] ); - - if ( value === false ) { - break; - } - } - } else { - for ( i in obj ) { - value = callback.call( obj[ i ], i, obj[ i ] ); - - if ( value === false ) { - break; - } + for ( i in obj ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; } } } @@ -674,33 +366,25 @@ jQuery.extend({ return obj; }, - // Use native String.trim function wherever possible - trim: core_trim && !core_trim.call("\uFEFF\xA0") ? - function( text ) { - return text == null ? - "" : - core_trim.call( text ); - } : - - // Otherwise use our own trimming functionality - function( text ) { - return text == null ? - "" : - ( text + "" ).replace( rtrim, "" ); - }, + // Support: Android <=4.0 only + trim: function( text ) { + return text == null ? + "" : + ( text + "" ).replace( rtrim, "" ); + }, // results is for internal usage only makeArray: function( arr, results ) { var ret = results || []; if ( arr != null ) { - if ( isArraylike( Object(arr) ) ) { + if ( isArrayLike( Object( arr ) ) ) { jQuery.merge( ret, typeof arr === "string" ? [ arr ] : arr ); } else { - core_push.call( ret, arr ); + push.call( ret, arr ); } } @@ -708,40 +392,18 @@ jQuery.extend({ }, inArray: function( elem, arr, i ) { - var len; - - if ( arr ) { - if ( core_indexOf ) { - return core_indexOf.call( arr, elem, i ); - } - - len = arr.length; - i = i ? i < 0 ? Math.max( 0, len + i ) : i : 0; - - for ( ; i < len; i++ ) { - // Skip accessing in sparse arrays - if ( i in arr && arr[ i ] === elem ) { - return i; - } - } - } - - return -1; + return arr == null ? -1 : indexOf.call( arr, elem, i ); }, + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit merge: function( first, second ) { - var l = second.length, - i = first.length, - j = 0; + var len = +second.length, + j = 0, + i = first.length; - if ( typeof l === "number" ) { - for ( ; j < l; j++ ) { - first[ i++ ] = second[ j ]; - } - } else { - while ( second[j] !== undefined ) { - first[ i++ ] = second[ j++ ]; - } + for ( ; j < len; j++ ) { + first[ i++ ] = second[ j ]; } first.length = i; @@ -749,40 +411,39 @@ jQuery.extend({ return first; }, - grep: function( elems, callback, inv ) { - var retVal, - ret = [], + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], i = 0, - length = elems.length; - inv = !!inv; + length = elems.length, + callbackExpect = !invert; // Go through the array, only saving the items // that pass the validator function for ( ; i < length; i++ ) { - retVal = !!callback( elems[ i ], i ); - if ( inv !== retVal ) { - ret.push( elems[ i ] ); + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); } } - return ret; + return matches; }, // arg is for internal usage only map: function( elems, callback, arg ) { - var value, + var length, value, i = 0, - length = elems.length, - isArray = isArraylike( elems ), ret = []; - // Go through the array, translating each of the items to their - if ( isArray ) { + // Go through the array, translating each of the items to their new values + if ( isArrayLike( elems ) ) { + length = elems.length; for ( ; i < length; i++ ) { value = callback( elems[ i ], i, arg ); if ( value != null ) { - ret[ ret.length ] = value; + ret.push( value ); } } @@ -792,234 +453,73 @@ jQuery.extend({ value = callback( elems[ i ], i, arg ); if ( value != null ) { - ret[ ret.length ] = value; + ret.push( value ); } } } // Flatten any nested arrays - return core_concat.apply( [], ret ); + return concat.apply( [], ret ); }, // A global GUID counter for objects guid: 1, - // Bind a function to a context, optionally partially applying any - // arguments. - proxy: function( fn, context ) { - var args, proxy, tmp; + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +} ); - if ( typeof context === "string" ) { - tmp = fn[ context ]; - context = fn; - fn = tmp; - } - - // Quick check to determine if target is callable, in the spec - // this throws a TypeError, but we will just return undefined. - if ( !jQuery.isFunction( fn ) ) { - return undefined; - } - - // Simulated bind - args = core_slice.call( arguments, 2 ); - proxy = function() { - return fn.apply( context || this, args.concat( core_slice.call( arguments ) ) ); - }; - - // Set the guid of unique handler to the same of original handler, so it can be removed - proxy.guid = fn.guid = fn.guid || jQuery.guid++; - - return proxy; - }, - - // Multifunctional method to get and set values of a collection - // The value/s can optionally be executed if it's a function - access: function( elems, fn, key, value, chainable, emptyGet, raw ) { - var i = 0, - length = elems.length, - bulk = key == null; - - // Sets many values - if ( jQuery.type( key ) === "object" ) { - chainable = true; - for ( i in key ) { - jQuery.access( elems, fn, i, key[i], true, emptyGet, raw ); - } - - // Sets one value - } else if ( value !== undefined ) { - chainable = true; - - if ( !jQuery.isFunction( value ) ) { - raw = true; - } - - if ( bulk ) { - // Bulk operations run against the entire set - if ( raw ) { - fn.call( elems, value ); - fn = null; - - // ...except when executing function values - } else { - bulk = fn; - fn = function( elem, key, value ) { - return bulk.call( jQuery( elem ), value ); - }; - } - } - - if ( fn ) { - for ( ; i < length; i++ ) { - fn( elems[i], key, raw ? value : value.call( elems[i], i, fn( elems[i], key ) ) ); - } - } - } - - return chainable ? - elems : - - // Gets - bulk ? - fn.call( elems ) : - length ? fn( elems[0], key ) : emptyGet; - }, - - now: function() { - return ( new Date() ).getTime(); - }, - - // A method for quickly swapping in/out CSS properties to get correct calculations. - // Note: this method belongs to the css module but it's needed here for the support module. - // If support gets modularized, this method should be moved back to the css module. - swap: function( elem, options, callback, args ) { - var ret, name, - old = {}; - - // Remember the old values, and insert the new ones - for ( name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - ret = callback.apply( elem, args || [] ); - - // Revert the old values - for ( name in options ) { - elem.style[ name ] = old[ name ]; - } - - return ret; - } -}); - -jQuery.ready.promise = function( obj ) { - if ( !readyList ) { - - readyList = jQuery.Deferred(); - - // Catch cases where $(document).ready() is called after the browser event has already occurred. - // we once tried to use readyState "interactive" here, but it caused issues like the one - // discovered by ChrisS here: http://bugs.jquery.com/ticket/12282#comment:15 - if ( document.readyState === "complete" ) { - // Handle it asynchronously to allow scripts the opportunity to delay ready - setTimeout( jQuery.ready ); - - // Standards-based browsers support DOMContentLoaded - } else if ( document.addEventListener ) { - // Use the handy event callback - document.addEventListener( "DOMContentLoaded", completed, false ); - - // A fallback to window.onload, that will always work - window.addEventListener( "load", completed, false ); - - // If IE event model is used - } else { - // Ensure firing before onload, maybe late but safe also for iframes - document.attachEvent( "onreadystatechange", completed ); - - // A fallback to window.onload, that will always work - window.attachEvent( "onload", completed ); - - // If IE and not a frame - // continually check to see if the document is ready - var top = false; - - try { - top = window.frameElement == null && document.documentElement; - } catch(e) {} - - if ( top && top.doScroll ) { - (function doScrollCheck() { - if ( !jQuery.isReady ) { - - try { - // Use the trick by Diego Perini - // http://javascript.nwbox.com/IEContentLoaded/ - top.doScroll("left"); - } catch(e) { - return setTimeout( doScrollCheck, 50 ); - } - - // detach all dom ready events - detach(); - - // and execute any waiting functions - jQuery.ready(); - } - })(); - } - } - } - return readyList.promise( obj ); -}; +if ( typeof Symbol === "function" ) { + jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; +} // Populate the class2type map -jQuery.each("Boolean Number String Function Array Date RegExp Object Error".split(" "), function(i, name) { +jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), +function( i, name ) { class2type[ "[object " + name + "]" ] = name.toLowerCase(); -}); +} ); -function isArraylike( obj ) { - var length = obj.length, - type = jQuery.type( obj ); +function isArrayLike( obj ) { - if ( jQuery.isWindow( obj ) ) { + // Support: real iOS 8.2 only (not reproducible in simulator) + // `in` check used to prevent JIT error (gh-2145) + // hasOwn isn't used here due to false negatives + // regarding Nodelist length in IE + var length = !!obj && "length" in obj && obj.length, + type = toType( obj ); + + if ( isFunction( obj ) || isWindow( obj ) ) { return false; } - if ( obj.nodeType === 1 && length ) { - return true; - } - - return type === "array" || type !== "function" && - ( length === 0 || - typeof length === "number" && length > 0 && ( length - 1 ) in obj ); + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; } - -// All jQuery objects should point back to these -rootjQuery = jQuery(document); +var Sizzle = /*! - * Sizzle CSS Selector Engine v1.10.2 - * http://sizzlejs.com/ + * Sizzle CSS Selector Engine v2.3.3 + * https://sizzlejs.com/ * - * Copyright 2013 jQuery Foundation, Inc. and other contributors + * Copyright jQuery Foundation and other contributors * Released under the MIT license * http://jquery.org/license * - * Date: 2013-07-03 + * Date: 2016-08-08 */ -(function( window, undefined ) { +(function( window ) { var i, support, - cachedruns, Expr, getText, isXML, + tokenize, compile, + select, outermostContext, sortInput, + hasDuplicate, // Local document vars setDocument, @@ -1032,26 +532,20 @@ var i, contains, // Instance-specific data - expando = "sizzle" + -(new Date()), + expando = "sizzle" + 1 * new Date(), preferredDoc = window.document, dirruns = 0, done = 0, classCache = createCache(), tokenCache = createCache(), compilerCache = createCache(), - hasDuplicate = false, sortOrder = function( a, b ) { if ( a === b ) { hasDuplicate = true; - return 0; } return 0; }, - // General-purpose constants - strundefined = typeof undefined, - MAX_NEGATIVE = 1 << 31, - // Instance methods hasOwn = ({}).hasOwnProperty, arr = [], @@ -1059,12 +553,13 @@ var i, push_native = arr.push, push = arr.push, slice = arr.slice, - // Use a stripped-down indexOf if we can't use a native one - indexOf = arr.indexOf || function( elem ) { + // Use a stripped-down indexOf as it's faster than native + // https://jsperf.com/thor-indexof-vs-for/5 + indexOf = function( list, elem ) { var i = 0, - len = this.length; + len = list.length; for ( ; i < len; i++ ) { - if ( this[i] === elem ) { + if ( list[i] === elem ) { return i; } } @@ -1075,44 +570,46 @@ var i, // Regular expressions - // Whitespace characters http://www.w3.org/TR/css3-selectors/#whitespace + // http://www.w3.org/TR/css3-selectors/#whitespace whitespace = "[\\x20\\t\\r\\n\\f]", - // http://www.w3.org/TR/css3-syntax/#characters - characterEncoding = "(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+", - // Loosely modeled on CSS identifier characters - // An unquoted value should be a CSS identifier http://www.w3.org/TR/css3-selectors/#attribute-selectors - // Proper syntax: http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier - identifier = characterEncoding.replace( "w", "w#" ), + // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier + identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", - // Acceptable operators http://www.w3.org/TR/selectors/#attribute-selectors - attributes = "\\[" + whitespace + "*(" + characterEncoding + ")" + whitespace + - "*(?:([*^$|!~]?=)" + whitespace + "*(?:(['\"])((?:\\\\.|[^\\\\])*?)\\3|(" + identifier + ")|)|)" + whitespace + "*\\]", + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + + "*\\]", - // Prefer arguments quoted, - // then not containing pseudos/brackets, - // then attribute selectors/non-parenthetical expressions, - // then anything else - // These preferences are here to reduce the number of selectors - // needing tokenize in the PSEUDO preFilter - pseudos = ":(" + characterEncoding + ")(?:\\(((['\"])((?:\\\\.|[^\\\\])*?)\\3|((?:\\\\.|[^\\\\()[\\]]|" + attributes.replace( 3, 8 ) + ")*)|.*)\\)|)", + pseudos = ":(" + identifier + ")(?:\\((" + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rwhitespace = new RegExp( whitespace + "+", "g" ), rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), - rsibling = new RegExp( whitespace + "*[+~]" ), - rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*)" + whitespace + "*\\]", "g" ), + rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), rpseudo = new RegExp( pseudos ), ridentifier = new RegExp( "^" + identifier + "$" ), matchExpr = { - "ID": new RegExp( "^#(" + characterEncoding + ")" ), - "CLASS": new RegExp( "^\\.(" + characterEncoding + ")" ), - "TAG": new RegExp( "^(" + characterEncoding.replace( "w", "w*" ) + ")" ), + "ID": new RegExp( "^#(" + identifier + ")" ), + "CLASS": new RegExp( "^\\.(" + identifier + ")" ), + "TAG": new RegExp( "^(" + identifier + "|[*])" ), "ATTR": new RegExp( "^" + attributes ), "PSEUDO": new RegExp( "^" + pseudos ), "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + @@ -1125,31 +622,66 @@ var i, whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) }, + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + rnative = /^[^{]+\{\s*\[native \w/, // Easily-parseable/retrievable ID or TAG or CLASS selectors rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, - rinputs = /^(?:input|select|textarea|button)$/i, - rheader = /^h\d$/i, + rsibling = /[+~]/, - rescape = /'|\\/g, - - // CSS escapes http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + // CSS escapes + // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), funescape = function( _, escaped, escapedWhitespace ) { var high = "0x" + escaped - 0x10000; // NaN means non-codepoint - // Support: Firefox + // Support: Firefox<24 // Workaround erroneous numeric interpretation of +"0x" return high !== high || escapedWhitespace ? escaped : - // BMP codepoint high < 0 ? + // BMP codepoint String.fromCharCode( high + 0x10000 ) : // Supplemental Plane codepoint (surrogate pair) String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); - }; + }, + + // CSS string/identifier serialization + // https://drafts.csswg.org/cssom/#common-serializing-idioms + rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, + fcssescape = function( ch, asCodePoint ) { + if ( asCodePoint ) { + + // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER + if ( ch === "\0" ) { + return "\uFFFD"; + } + + // Control characters and (dependent upon position) numbers get escaped as code points + return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; + } + + // Other potentially-special ASCII characters get backslash-escaped + return "\\" + ch; + }, + + // Used for iframes + // See setDocument() + // Removing the function wrapper causes a "Permission Denied" + // error in IE + unloadHandler = function() { + setDocument(); + }, + + disabledAncestor = addCombinator( + function( elem ) { + return elem.disabled === true && ("form" in elem || "label" in elem); + }, + { dir: "parentNode", next: "legend" } + ); // Optimize for push.apply( _, NodeList ) try { @@ -1181,104 +713,128 @@ try { } function Sizzle( selector, context, results, seed ) { - var match, elem, m, nodeType, - // QSA vars - i, groups, old, nid, newContext, newSelector; + var m, i, elem, nid, match, groups, newSelector, + newContext = context && context.ownerDocument, - if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { - setDocument( context ); - } + // nodeType defaults to 9, since context defaults to document + nodeType = context ? context.nodeType : 9; - context = context || document; results = results || []; - if ( !selector || typeof selector !== "string" ) { + // Return early from calls with invalid selector or context + if ( typeof selector !== "string" || !selector || + nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { + return results; } - if ( (nodeType = context.nodeType) !== 1 && nodeType !== 9 ) { - return []; - } + // Try to shortcut find operations (as opposed to filters) in HTML documents + if ( !seed ) { - if ( documentIsHTML && !seed ) { + if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { + setDocument( context ); + } + context = context || document; + + if ( documentIsHTML ) { + + // If the selector is sufficiently simple, try using a "get*By*" DOM method + // (excepting DocumentFragment context, where the methods don't exist) + if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { + + // ID selector + if ( (m = match[1]) ) { + + // Document context + if ( nodeType === 9 ) { + if ( (elem = context.getElementById( m )) ) { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + + // Element context + } else { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( newContext && (elem = newContext.getElementById( m )) && + contains( context, elem ) && + elem.id === m ) { - // Shortcuts - if ( (match = rquickExpr.exec( selector )) ) { - // Speed-up: Sizzle("#ID") - if ( (m = match[1]) ) { - if ( nodeType === 9 ) { - elem = context.getElementById( m ); - // Check parentNode to catch when Blackberry 4.6 returns - // nodes that are no longer in the document #6963 - if ( elem && elem.parentNode ) { - // Handle the case where IE, Opera, and Webkit return items - // by name instead of ID - if ( elem.id === m ) { results.push( elem ); return results; } - } else { - return results; } - } else { - // Context is not a document - if ( context.ownerDocument && (elem = context.ownerDocument.getElementById( m )) && - contains( context, elem ) && elem.id === m ) { - results.push( elem ); - return results; - } - } - // Speed-up: Sizzle("TAG") - } else if ( match[2] ) { - push.apply( results, context.getElementsByTagName( selector ) ); - return results; - - // Speed-up: Sizzle(".CLASS") - } else if ( (m = match[3]) && support.getElementsByClassName && context.getElementsByClassName ) { - push.apply( results, context.getElementsByClassName( m ) ); - return results; - } - } - - // QSA path - if ( support.qsa && (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { - nid = old = expando; - newContext = context; - newSelector = nodeType === 9 && selector; - - // qSA works strangely on Element-rooted queries - // We can work around this by specifying an extra ID on the root - // and working up from there (Thanks to Andrew Dupont for the technique) - // IE 8 doesn't work on object elements - if ( nodeType === 1 && context.nodeName.toLowerCase() !== "object" ) { - groups = tokenize( selector ); - - if ( (old = context.getAttribute("id")) ) { - nid = old.replace( rescape, "\\$&" ); - } else { - context.setAttribute( "id", nid ); - } - nid = "[id='" + nid + "'] "; - - i = groups.length; - while ( i-- ) { - groups[i] = nid + toSelector( groups[i] ); - } - newContext = rsibling.test( selector ) && context.parentNode || context; - newSelector = groups.join(","); - } - - if ( newSelector ) { - try { - push.apply( results, - newContext.querySelectorAll( newSelector ) - ); + // Type selector + } else if ( match[2] ) { + push.apply( results, context.getElementsByTagName( selector ) ); return results; - } catch(qsaError) { - } finally { - if ( !old ) { - context.removeAttribute("id"); + + // Class selector + } else if ( (m = match[3]) && support.getElementsByClassName && + context.getElementsByClassName ) { + + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // Take advantage of querySelectorAll + if ( support.qsa && + !compilerCache[ selector + " " ] && + (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { + + if ( nodeType !== 1 ) { + newContext = context; + newSelector = selector; + + // qSA looks outside Element context, which is not what we want + // Thanks to Andrew Dupont for this workaround technique + // Support: IE <=8 + // Exclude object elements + } else if ( context.nodeName.toLowerCase() !== "object" ) { + + // Capture the context ID, setting it first if necessary + if ( (nid = context.getAttribute( "id" )) ) { + nid = nid.replace( rcssescape, fcssescape ); + } else { + context.setAttribute( "id", (nid = expando) ); + } + + // Prefix every selector in the list + groups = tokenize( selector ); + i = groups.length; + while ( i-- ) { + groups[i] = "#" + nid + " " + toSelector( groups[i] ); + } + newSelector = groups.join( "," ); + + // Expand context for sibling selectors + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || + context; + } + + if ( newSelector ) { + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch ( qsaError ) { + } finally { + if ( nid === expando ) { + context.removeAttribute( "id" ); + } } } } @@ -1291,7 +847,7 @@ function Sizzle( selector, context, results, seed ) { /** * Create key-value caches of limited size - * @returns {Function(string, Object)} Returns the Object data after storing it on itself with + * @returns {function(string, object)} Returns the Object data after storing it on itself with * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) * deleting the oldest entry */ @@ -1300,11 +856,11 @@ function createCache() { function cache( key, value ) { // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) - if ( keys.push( key += " " ) > Expr.cacheLength ) { + if ( keys.push( key + " " ) > Expr.cacheLength ) { // Only keep the most recent entries delete cache[ keys.shift() ]; } - return (cache[ key ] = value); + return (cache[ key + " " ] = value); } return cache; } @@ -1320,22 +876,22 @@ function markFunction( fn ) { /** * Support testing using an element - * @param {Function} fn Passed the created div and expects a boolean result + * @param {Function} fn Passed the created element and returns a boolean result */ function assert( fn ) { - var div = document.createElement("div"); + var el = document.createElement("fieldset"); try { - return !!fn( div ); + return !!fn( el ); } catch (e) { return false; } finally { // Remove from its parent by default - if ( div.parentNode ) { - div.parentNode.removeChild( div ); + if ( el.parentNode ) { + el.parentNode.removeChild( el ); } // release memory in IE - div = null; + el = null; } } @@ -1346,7 +902,7 @@ function assert( fn ) { */ function addHandle( attrs, handler ) { var arr = attrs.split("|"), - i = attrs.length; + i = arr.length; while ( i-- ) { Expr.attrHandle[ arr[i] ] = handler; @@ -1362,8 +918,7 @@ function addHandle( attrs, handler ) { function siblingCheck( a, b ) { var cur = b && a, diff = cur && a.nodeType === 1 && b.nodeType === 1 && - ( ~b.sourceIndex || MAX_NEGATIVE ) - - ( ~a.sourceIndex || MAX_NEGATIVE ); + a.sourceIndex - b.sourceIndex; // Use IE sourceIndex if available on both nodes if ( diff ) { @@ -1404,6 +959,62 @@ function createButtonPseudo( type ) { }; } +/** + * Returns a function to use in pseudos for :enabled/:disabled + * @param {Boolean} disabled true for :disabled; false for :enabled + */ +function createDisabledPseudo( disabled ) { + + // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable + return function( elem ) { + + // Only certain elements can match :enabled or :disabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled + if ( "form" in elem ) { + + // Check for inherited disabledness on relevant non-disabled elements: + // * listed form-associated elements in a disabled fieldset + // https://html.spec.whatwg.org/multipage/forms.html#category-listed + // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled + // * option elements in a disabled optgroup + // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled + // All such elements have a "form" property. + if ( elem.parentNode && elem.disabled === false ) { + + // Option elements defer to a parent optgroup if present + if ( "label" in elem ) { + if ( "label" in elem.parentNode ) { + return elem.parentNode.disabled === disabled; + } else { + return elem.disabled === disabled; + } + } + + // Support: IE 6 - 11 + // Use the isDisabled shortcut property to check for disabled fieldset ancestors + return elem.isDisabled === disabled || + + // Where there is no isDisabled, check manually + /* jshint -W018 */ + elem.isDisabled !== !disabled && + disabledAncestor( elem ) === disabled; + } + + return elem.disabled === disabled; + + // Try to winnow out elements that can't be disabled before trusting the disabled property. + // Some victims get caught in our net (label, legend, menu, track), but it shouldn't + // even exist on them, let alone have a boolean value. + } else if ( "label" in elem ) { + return elem.disabled === disabled; + } + + // Remaining elements are neither :enabled nor :disabled + return false; + }; +} + /** * Returns a function to use in pseudos for positionals * @param {Function} fn @@ -1427,8 +1038,21 @@ function createPositionalPseudo( fn ) { } /** - * Detect xml + * Checks a node for validity as a Sizzle context + * @param {Element|Object=} context + * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value + */ +function testContext( context ) { + return context && typeof context.getElementsByTagName !== "undefined" && context; +} + +// Expose support vars for convenience +support = Sizzle.support = {}; + +/** + * Detects XML nodes * @param {Element|Object} elem An element or a document + * @returns {Boolean} True iff elem is a non-HTML XML node */ isXML = Sizzle.isXML = function( elem ) { // documentElement is verified for cases where it doesn't yet exist @@ -1437,121 +1061,144 @@ isXML = Sizzle.isXML = function( elem ) { return documentElement ? documentElement.nodeName !== "HTML" : false; }; -// Expose support vars for convenience -support = Sizzle.support = {}; - /** * Sets document-related variables once based on the current document * @param {Element|Object} [doc] An element or document object to use to set the document * @returns {Object} Returns the current document */ setDocument = Sizzle.setDocument = function( node ) { - var doc = node ? node.ownerDocument || node : preferredDoc, - parent = doc.defaultView; + var hasCompare, subWindow, + doc = node ? node.ownerDocument || node : preferredDoc; - // If no document and documentElement is available, return + // Return early if doc is invalid or already selected if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { return document; } - // Set our document + // Update global variables document = doc; - docElem = doc.documentElement; + docElem = document.documentElement; + documentIsHTML = !isXML( document ); - // Support tests - documentIsHTML = !isXML( doc ); + // Support: IE 9-11, Edge + // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) + if ( preferredDoc !== document && + (subWindow = document.defaultView) && subWindow.top !== subWindow ) { - // Support: IE>8 - // If iframe document is assigned to "document" variable and if iframe has been reloaded, - // IE will throw "permission denied" error when accessing "document" variable, see jQuery #13936 - // IE6-8 do not support the defaultView property so parent will be undefined - if ( parent && parent.attachEvent && parent !== parent.top ) { - parent.attachEvent( "onbeforeunload", function() { - setDocument(); - }); + // Support: IE 11, Edge + if ( subWindow.addEventListener ) { + subWindow.addEventListener( "unload", unloadHandler, false ); + + // Support: IE 9 - 10 only + } else if ( subWindow.attachEvent ) { + subWindow.attachEvent( "onunload", unloadHandler ); + } } /* Attributes ---------------------------------------------------------------------- */ // Support: IE<8 - // Verify that getAttribute really returns attributes and not properties (excepting IE8 booleans) - support.attributes = assert(function( div ) { - div.className = "i"; - return !div.getAttribute("className"); + // Verify that getAttribute really returns attributes and not properties + // (excepting IE8 booleans) + support.attributes = assert(function( el ) { + el.className = "i"; + return !el.getAttribute("className"); }); /* getElement(s)By* ---------------------------------------------------------------------- */ // Check if getElementsByTagName("*") returns only elements - support.getElementsByTagName = assert(function( div ) { - div.appendChild( doc.createComment("") ); - return !div.getElementsByTagName("*").length; + support.getElementsByTagName = assert(function( el ) { + el.appendChild( document.createComment("") ); + return !el.getElementsByTagName("*").length; }); - // Check if getElementsByClassName can be trusted - support.getElementsByClassName = assert(function( div ) { - div.innerHTML = "
"; - - // Support: Safari<4 - // Catch class over-caching - div.firstChild.className = "i"; - // Support: Opera<10 - // Catch gEBCN failure to find non-leading classes - return div.getElementsByClassName("i").length === 2; - }); + // Support: IE<9 + support.getElementsByClassName = rnative.test( document.getElementsByClassName ); // Support: IE<10 // Check if getElementById returns elements by name // The broken getElementById methods don't pick up programmatically-set names, // so use a roundabout getElementsByName test - support.getById = assert(function( div ) { - docElem.appendChild( div ).id = expando; - return !doc.getElementsByName || !doc.getElementsByName( expando ).length; + support.getById = assert(function( el ) { + docElem.appendChild( el ).id = expando; + return !document.getElementsByName || !document.getElementsByName( expando ).length; }); - // ID find and filter + // ID filter and find if ( support.getById ) { - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== strundefined && documentIsHTML ) { - var m = context.getElementById( id ); - // Check parentNode to catch when Blackberry 4.6 returns - // nodes that are no longer in the document #6963 - return m && m.parentNode ? [m] : []; - } - }; Expr.filter["ID"] = function( id ) { var attrId = id.replace( runescape, funescape ); return function( elem ) { return elem.getAttribute("id") === attrId; }; }; + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var elem = context.getElementById( id ); + return elem ? [ elem ] : []; + } + }; } else { - // Support: IE6/7 - // getElementById is not reliable as a find shortcut - delete Expr.find["ID"]; - Expr.filter["ID"] = function( id ) { var attrId = id.replace( runescape, funescape ); return function( elem ) { - var node = typeof elem.getAttributeNode !== strundefined && elem.getAttributeNode("id"); + var node = typeof elem.getAttributeNode !== "undefined" && + elem.getAttributeNode("id"); return node && node.value === attrId; }; }; + + // Support: IE 6 - 7 only + // getElementById is not reliable as a find shortcut + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var node, i, elems, + elem = context.getElementById( id ); + + if ( elem ) { + + // Verify the id attribute + node = elem.getAttributeNode("id"); + if ( node && node.value === id ) { + return [ elem ]; + } + + // Fall back on getElementsByName + elems = context.getElementsByName( id ); + i = 0; + while ( (elem = elems[i++]) ) { + node = elem.getAttributeNode("id"); + if ( node && node.value === id ) { + return [ elem ]; + } + } + } + + return []; + } + }; } // Tag Expr.find["TAG"] = support.getElementsByTagName ? function( tag, context ) { - if ( typeof context.getElementsByTagName !== strundefined ) { + if ( typeof context.getElementsByTagName !== "undefined" ) { return context.getElementsByTagName( tag ); + + // DocumentFragment nodes don't have gEBTN + } else if ( support.qsa ) { + return context.querySelectorAll( tag ); } } : + function( tag, context ) { var elem, tmp = [], i = 0, + // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too results = context.getElementsByTagName( tag ); // Filter out possible comments @@ -1569,7 +1216,7 @@ setDocument = Sizzle.setDocument = function( node ) { // Class Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { - if ( typeof context.getElementsByClassName !== strundefined && documentIsHTML ) { + if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { return context.getElementsByClassName( className ); } }; @@ -1586,74 +1233,105 @@ setDocument = Sizzle.setDocument = function( node ) { // We allow this because of a bug in IE8/9 that throws an error // whenever `document.activeElement` is accessed on an iframe // So, we allow :focus to pass through QSA all the time to avoid the IE error - // See http://bugs.jquery.com/ticket/13378 + // See https://bugs.jquery.com/ticket/13378 rbuggyQSA = []; - if ( (support.qsa = rnative.test( doc.querySelectorAll )) ) { + if ( (support.qsa = rnative.test( document.querySelectorAll )) ) { // Build QSA regex // Regex strategy adopted from Diego Perini - assert(function( div ) { + assert(function( el ) { // Select is set to empty string on purpose // This is to test IE's treatment of not explicitly // setting a boolean content attribute, // since its presence should be enough - // http://bugs.jquery.com/ticket/12359 - div.innerHTML = ""; + // https://bugs.jquery.com/ticket/12359 + docElem.appendChild( el ).innerHTML = "" + + ""; + + // Support: IE8, Opera 11-12.16 + // Nothing should be selected when empty strings follow ^= or $= or *= + // The test attribute must be unknown in Opera but "safe" for WinRT + // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section + if ( el.querySelectorAll("[msallowcapture^='']").length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + } // Support: IE8 // Boolean attributes and "value" are not treated correctly - if ( !div.querySelectorAll("[selected]").length ) { + if ( !el.querySelectorAll("[selected]").length ) { rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); } + // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ + if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { + rbuggyQSA.push("~="); + } + // Webkit/Opera - :checked should return selected option elements // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked // IE8 throws error here and will not see later tests - if ( !div.querySelectorAll(":checked").length ) { + if ( !el.querySelectorAll(":checked").length ) { rbuggyQSA.push(":checked"); } + + // Support: Safari 8+, iOS 8+ + // https://bugs.webkit.org/show_bug.cgi?id=136851 + // In-page `selector#id sibling-combinator selector` fails + if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { + rbuggyQSA.push(".#.+[+~]"); + } }); - assert(function( div ) { + assert(function( el ) { + el.innerHTML = "" + + ""; - // Support: Opera 10-12/IE8 - // ^= $= *= and empty values - // Should not select anything // Support: Windows 8 Native Apps - // The type attribute is restricted during .innerHTML assignment - var input = doc.createElement("input"); + // The type and name attributes are restricted during .innerHTML assignment + var input = document.createElement("input"); input.setAttribute( "type", "hidden" ); - div.appendChild( input ).setAttribute( "t", "" ); + el.appendChild( input ).setAttribute( "name", "D" ); - if ( div.querySelectorAll("[t^='']").length ) { - rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + // Support: IE8 + // Enforce case-sensitivity of name attribute + if ( el.querySelectorAll("[name=d]").length ) { + rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); } // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) // IE8 throws error here and will not see later tests - if ( !div.querySelectorAll(":enabled").length ) { + if ( el.querySelectorAll(":enabled").length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: IE9-11+ + // IE's :disabled selector does not pick up the children of disabled fieldsets + docElem.appendChild( el ).disabled = true; + if ( el.querySelectorAll(":disabled").length !== 2 ) { rbuggyQSA.push( ":enabled", ":disabled" ); } // Opera 10-11 does not throw on post-comma invalid pseudos - div.querySelectorAll("*,:x"); + el.querySelectorAll("*,:x"); rbuggyQSA.push(",.*:"); }); } - if ( (support.matchesSelector = rnative.test( (matches = docElem.webkitMatchesSelector || + if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || + docElem.webkitMatchesSelector || docElem.mozMatchesSelector || docElem.oMatchesSelector || docElem.msMatchesSelector) )) ) { - assert(function( div ) { + assert(function( el ) { // Check to see if it's possible to do matchesSelector // on a disconnected node (IE 9) - support.disconnectedMatch = matches.call( div, "div" ); + support.disconnectedMatch = matches.call( el, "*" ); // This should fail with an exception // Gecko does not error, returns false instead - matches.call( div, "[s!='']:x" ); + matches.call( el, "[s!='']:x" ); rbuggyMatches.push( "!=", pseudos ); }); } @@ -1663,11 +1341,12 @@ setDocument = Sizzle.setDocument = function( node ) { /* Contains ---------------------------------------------------------------------- */ + hasCompare = rnative.test( docElem.compareDocumentPosition ); // Element contains another - // Purposefully does not implement inclusive descendent + // Purposefully self-exclusive // As in, an element does not contain itself - contains = rnative.test( docElem.contains ) || docElem.compareDocumentPosition ? + contains = hasCompare || rnative.test( docElem.contains ) ? function( a, b ) { var adown = a.nodeType === 9 ? a.documentElement : a, bup = b && b.parentNode; @@ -1692,7 +1371,7 @@ setDocument = Sizzle.setDocument = function( node ) { ---------------------------------------------------------------------- */ // Document order sorting - sortOrder = docElem.compareDocumentPosition ? + sortOrder = hasCompare ? function( a, b ) { // Flag for duplicate removal @@ -1701,34 +1380,46 @@ setDocument = Sizzle.setDocument = function( node ) { return 0; } - var compare = b.compareDocumentPosition && a.compareDocumentPosition && a.compareDocumentPosition( b ); - + // Sort on method existence if only one input has compareDocumentPosition + var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; if ( compare ) { - // Disconnected nodes - if ( compare & 1 || - (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { - - // Choose the first element that is related to our preferred document - if ( a === doc || contains(preferredDoc, a) ) { - return -1; - } - if ( b === doc || contains(preferredDoc, b) ) { - return 1; - } - - // Maintain original order - return sortInput ? - ( indexOf.call( sortInput, a ) - indexOf.call( sortInput, b ) ) : - 0; - } - - return compare & 4 ? -1 : 1; + return compare; } - // Not directly comparable, sort on existence of method - return a.compareDocumentPosition ? -1 : 1; + // Calculate position if both inputs belong to the same document + compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? + a.compareDocumentPosition( b ) : + + // Otherwise we know they are disconnected + 1; + + // Disconnected nodes + if ( compare & 1 || + (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { + + // Choose the first element that is related to our preferred document + if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { + return -1; + } + if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { + return 1; + } + + // Maintain original order + return sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + } + + return compare & 4 ? -1 : 1; } : function( a, b ) { + // Exit early if the nodes are identical + if ( a === b ) { + hasDuplicate = true; + return 0; + } + var cur, i = 0, aup = a.parentNode, @@ -1736,19 +1427,14 @@ setDocument = Sizzle.setDocument = function( node ) { ap = [ a ], bp = [ b ]; - // Exit early if the nodes are identical - if ( a === b ) { - hasDuplicate = true; - return 0; - // Parentless nodes are either documents or disconnected - } else if ( !aup || !bup ) { - return a === doc ? -1 : - b === doc ? 1 : + if ( !aup || !bup ) { + return a === document ? -1 : + b === document ? 1 : aup ? -1 : bup ? 1 : sortInput ? - ( indexOf.call( sortInput, a ) - indexOf.call( sortInput, b ) ) : + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : 0; // If the nodes are siblings, we can do a quick check @@ -1781,7 +1467,7 @@ setDocument = Sizzle.setDocument = function( node ) { 0; }; - return doc; + return document; }; Sizzle.matches = function( expr, elements ) { @@ -1798,6 +1484,7 @@ Sizzle.matchesSelector = function( elem, expr ) { expr = expr.replace( rattributeQuotes, "='$1']" ); if ( support.matchesSelector && documentIsHTML && + !compilerCache[ expr + " " ] && ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { @@ -1811,10 +1498,10 @@ Sizzle.matchesSelector = function( elem, expr ) { elem.document && elem.document.nodeType !== 11 ) { return ret; } - } catch(e) {} + } catch (e) {} } - return Sizzle( expr, document, null, [elem] ).length > 0; + return Sizzle( expr, document, null, [ elem ] ).length > 0; }; Sizzle.contains = function( context, elem ) { @@ -1837,13 +1524,17 @@ Sizzle.attr = function( elem, name ) { fn( elem, name, !documentIsHTML ) : undefined; - return val === undefined ? + return val !== undefined ? + val : support.attributes || !documentIsHTML ? elem.getAttribute( name ) : (val = elem.getAttributeNode(name)) && val.specified ? val.value : - null : - val; + null; +}; + +Sizzle.escape = function( sel ) { + return (sel + "").replace( rcssescape, fcssescape ); }; Sizzle.error = function( msg ) { @@ -1876,6 +1567,10 @@ Sizzle.uniqueSort = function( results ) { } } + // Clear input after sorting to release objects + // See https://github.com/jquery/sizzle/pull/225 + sortInput = null; + return results; }; @@ -1891,13 +1586,13 @@ getText = Sizzle.getText = function( elem ) { if ( !nodeType ) { // If no nodeType, this is expected to be an array - for ( ; (node = elem[i]); i++ ) { + while ( (node = elem[i++]) ) { // Do not traverse comment nodes ret += getText( node ); } } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { // Use textContent for elements - // innerText usage removed for consistency of new lines (see #11153) + // innerText usage removed for consistency of new lines (jQuery #11153) if ( typeof elem.textContent === "string" ) { return elem.textContent; } else { @@ -1939,7 +1634,7 @@ Expr = Sizzle.selectors = { match[1] = match[1].replace( runescape, funescape ); // Move the given value to match[3] whether quoted or unquoted - match[3] = ( match[4] || match[5] || "" ).replace( runescape, funescape ); + match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); if ( match[2] === "~=" ) { match[3] = " " + match[3] + " "; @@ -1982,15 +1677,15 @@ Expr = Sizzle.selectors = { "PSEUDO": function( match ) { var excess, - unquoted = !match[5] && match[2]; + unquoted = !match[6] && match[2]; if ( matchExpr["CHILD"].test( match[0] ) ) { return null; } // Accept quoted arguments as-is - if ( match[3] && match[4] !== undefined ) { - match[2] = match[4]; + if ( match[3] ) { + match[2] = match[4] || match[5] || ""; // Strip excess characters from unquoted arguments } else if ( unquoted && rpseudo.test( unquoted ) && @@ -2026,7 +1721,7 @@ Expr = Sizzle.selectors = { return pattern || (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && classCache( className, function( elem ) { - return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== strundefined && elem.getAttribute("class") || "" ); + return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" ); }); }, @@ -2048,7 +1743,7 @@ Expr = Sizzle.selectors = { operator === "^=" ? check && result.indexOf( check ) === 0 : operator === "*=" ? check && result.indexOf( check ) > -1 : operator === "$=" ? check && result.slice( -check.length ) === check : - operator === "~=" ? ( " " + result + " " ).indexOf( check ) > -1 : + operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : false; }; @@ -2067,11 +1762,12 @@ Expr = Sizzle.selectors = { } : function( elem, context, xml ) { - var cache, outerCache, node, diff, nodeIndex, start, + var cache, uniqueCache, outerCache, node, nodeIndex, start, dir = simple !== forward ? "nextSibling" : "previousSibling", parent = elem.parentNode, name = ofType && elem.nodeName.toLowerCase(), - useCache = !xml && !ofType; + useCache = !xml && !ofType, + diff = false; if ( parent ) { @@ -2080,7 +1776,10 @@ Expr = Sizzle.selectors = { while ( dir ) { node = elem; while ( (node = node[ dir ]) ) { - if ( ofType ? node.nodeName.toLowerCase() === name : node.nodeType === 1 ) { + if ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) { + return false; } } @@ -2094,11 +1793,21 @@ Expr = Sizzle.selectors = { // non-xml :nth-child(...) stores cache data on `parent` if ( forward && useCache ) { + // Seek `elem` from a previously-cached index - outerCache = parent[ expando ] || (parent[ expando ] = {}); - cache = outerCache[ type ] || []; - nodeIndex = cache[0] === dirruns && cache[1]; - diff = cache[0] === dirruns && cache[2]; + + // ...in a gzip-friendly way + node = parent; + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex && cache[ 2 ]; node = nodeIndex && parent.childNodes[ nodeIndex ]; while ( (node = ++nodeIndex && node && node[ dir ] || @@ -2108,29 +1817,55 @@ Expr = Sizzle.selectors = { // When found, cache indexes on `parent` and break if ( node.nodeType === 1 && ++diff && node === elem ) { - outerCache[ type ] = [ dirruns, nodeIndex, diff ]; + uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; break; } } - // Use previously-cached element index if available - } else if ( useCache && (cache = (elem[ expando ] || (elem[ expando ] = {}))[ type ]) && cache[0] === dirruns ) { - diff = cache[1]; - - // xml :nth-child(...) or :nth-last-child(...) or :nth(-last)?-of-type(...) } else { - // Use the same loop as above to seek `elem` from the start - while ( (node = ++nodeIndex && node && node[ dir ] || - (diff = nodeIndex = 0) || start.pop()) ) { + // Use previously-cached element index if available + if ( useCache ) { + // ...in a gzip-friendly way + node = elem; + outerCache = node[ expando ] || (node[ expando ] = {}); - if ( ( ofType ? node.nodeName.toLowerCase() === name : node.nodeType === 1 ) && ++diff ) { - // Cache the index of each encountered element - if ( useCache ) { - (node[ expando ] || (node[ expando ] = {}))[ type ] = [ dirruns, diff ]; - } + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); - if ( node === elem ) { - break; + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex; + } + + // xml :nth-child(...) + // or :nth-last-child(...) or :nth(-last)?-of-type(...) + if ( diff === false ) { + // Use the same loop as above to seek `elem` from the start + while ( (node = ++nodeIndex && node && node[ dir ] || + (diff = nodeIndex = 0) || start.pop()) ) { + + if ( ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) && + ++diff ) { + + // Cache the index of each encountered element + if ( useCache ) { + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + uniqueCache[ type ] = [ dirruns, diff ]; + } + + if ( node === elem ) { + break; + } } } } @@ -2168,7 +1903,7 @@ Expr = Sizzle.selectors = { matched = fn( seed, argument ), i = matched.length; while ( i-- ) { - idx = indexOf.call( seed, matched[i] ); + idx = indexOf( seed, matched[i] ); seed[ idx ] = !( matches[ idx ] = matched[i] ); } }) : @@ -2207,6 +1942,8 @@ Expr = Sizzle.selectors = { function( elem, context, xml ) { input[0] = elem; matcher( input, null, xml, results ); + // Don't keep the element (issue #299) + input[0] = null; return !results.pop(); }; }), @@ -2218,6 +1955,7 @@ Expr = Sizzle.selectors = { }), "contains": markFunction(function( text ) { + text = text.replace( runescape, funescape ); return function( elem ) { return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; }; @@ -2266,13 +2004,8 @@ Expr = Sizzle.selectors = { }, // Boolean properties - "enabled": function( elem ) { - return elem.disabled === false; - }, - - "disabled": function( elem ) { - return elem.disabled === true; - }, + "enabled": createDisabledPseudo( false ), + "disabled": createDisabledPseudo( true ), "checked": function( elem ) { // In CSS3, :checked should return both checked and selected elements @@ -2294,12 +2027,11 @@ Expr = Sizzle.selectors = { // Contents "empty": function( elem ) { // http://www.w3.org/TR/selectors/#empty-pseudo - // :empty is only affected by element nodes and content nodes(including text(3), cdata(4)), - // not comment, processing instructions, or others - // Thanks to Diego Perini for the nodeName shortcut - // Greater than "@" means alpha characters (specifically not starting with "#" or "?") + // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), + // but not by others (comment: 8; processing instruction: 7; etc.) + // nodeType < 6 works because attributes (2) do not appear as children for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - if ( elem.nodeName > "@" || elem.nodeType === 3 || elem.nodeType === 4 ) { + if ( elem.nodeType < 6 ) { return false; } } @@ -2326,11 +2058,12 @@ Expr = Sizzle.selectors = { "text": function( elem ) { var attr; - // IE6 and 7 will map elem.type to 'text' for new HTML5 types (search, etc) - // use getAttribute instead to test this case return elem.nodeName.toLowerCase() === "input" && elem.type === "text" && - ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === elem.type ); + + // Support: IE<8 + // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" + ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); }, // Position-in-collection @@ -2395,7 +2128,7 @@ function setFilters() {} setFilters.prototype = Expr.filters = Expr.pseudos; Expr.setFilters = new setFilters(); -function tokenize( selector, parseOnly ) { +tokenize = Sizzle.tokenize = function( selector, parseOnly ) { var matched, match, tokens, type, soFar, groups, preFilters, cached = tokenCache[ selector + " " ]; @@ -2416,7 +2149,7 @@ function tokenize( selector, parseOnly ) { // Don't consume trailing commas as valid soFar = soFar.slice( match[0].length ) || soFar; } - groups.push( tokens = [] ); + groups.push( (tokens = []) ); } matched = false; @@ -2460,7 +2193,7 @@ function tokenize( selector, parseOnly ) { Sizzle.error( selector ) : // Cache the tokens tokenCache( selector, groups ).slice( 0 ); -} +}; function toSelector( tokens ) { var i = 0, @@ -2474,7 +2207,9 @@ function toSelector( tokens ) { function addCombinator( matcher, combinator, base ) { var dir = combinator.dir, - checkNonElements = base && dir === "parentNode", + skip = combinator.next, + key = skip || dir, + checkNonElements = base && key === "parentNode", doneName = done++; return combinator.first ? @@ -2485,14 +2220,15 @@ function addCombinator( matcher, combinator, base ) { return matcher( elem, context, xml ); } } + return false; } : // Check against all ancestor/preceding elements function( elem, context, xml ) { - var data, cache, outerCache, - dirkey = dirruns + " " + doneName; + var oldCache, uniqueCache, outerCache, + newCache = [ dirruns, doneName ]; - // We can't set arbitrary data on XML nodes, so they don't benefit from dir caching + // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching if ( xml ) { while ( (elem = elem[ dir ]) ) { if ( elem.nodeType === 1 || checkNonElements ) { @@ -2505,20 +2241,31 @@ function addCombinator( matcher, combinator, base ) { while ( (elem = elem[ dir ]) ) { if ( elem.nodeType === 1 || checkNonElements ) { outerCache = elem[ expando ] || (elem[ expando ] = {}); - if ( (cache = outerCache[ dir ]) && cache[0] === dirkey ) { - if ( (data = cache[1]) === true || data === cachedruns ) { - return data === true; - } + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {}); + + if ( skip && skip === elem.nodeName.toLowerCase() ) { + elem = elem[ dir ] || elem; + } else if ( (oldCache = uniqueCache[ key ]) && + oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { + + // Assign to newCache so results back-propagate to previous elements + return (newCache[ 2 ] = oldCache[ 2 ]); } else { - cache = outerCache[ dir ] = [ dirkey ]; - cache[1] = matcher( elem, context, xml ) || cachedruns; - if ( cache[1] === true ) { + // Reuse newcache so results back-propagate to previous elements + uniqueCache[ key ] = newCache; + + // A match means we're done; a fail means we have to keep checking + if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { return true; } } } } } + return false; }; } @@ -2536,6 +2283,15 @@ function elementMatcher( matchers ) { matchers[0]; } +function multipleContexts( selector, contexts, results ) { + var i = 0, + len = contexts.length; + for ( ; i < len; i++ ) { + Sizzle( selector, contexts[i], results ); + } + return results; +} + function condense( unmatched, map, filter, context, xml ) { var elem, newUnmatched = [], @@ -2627,7 +2383,7 @@ function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postS i = matcherOut.length; while ( i-- ) { if ( (elem = matcherOut[i]) && - (temp = postFinder ? indexOf.call( seed, elem ) : preMap[i]) > -1 ) { + (temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) { seed[temp] = !(results[temp] = elem); } @@ -2662,13 +2418,16 @@ function matcherFromTokens( tokens ) { return elem === checkContext; }, implicitRelative, true ), matchAnyContext = addCombinator( function( elem ) { - return indexOf.call( checkContext, elem ) > -1; + return indexOf( checkContext, elem ) > -1; }, implicitRelative, true ), matchers = [ function( elem, context, xml ) { - return ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( + var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( (checkContext = context).nodeType ? matchContext( elem, context, xml ) : matchAnyContext( elem, context, xml ) ); + // Avoid hanging onto element (issue #299) + checkContext = null; + return ret; } ]; for ( ; i < len; i++ ) { @@ -2706,42 +2465,43 @@ function matcherFromTokens( tokens ) { } function matcherFromGroupMatchers( elementMatchers, setMatchers ) { - // A counter to specify which element is currently being matched - var matcherCachedRuns = 0, - bySet = setMatchers.length > 0, + var bySet = setMatchers.length > 0, byElement = elementMatchers.length > 0, - superMatcher = function( seed, context, xml, results, expandContext ) { + superMatcher = function( seed, context, xml, results, outermost ) { var elem, j, matcher, - setMatched = [], matchedCount = 0, i = "0", unmatched = seed && [], - outermost = expandContext != null, + setMatched = [], contextBackup = outermostContext, - // We must always have either seed elements or context - elems = seed || byElement && Expr.find["TAG"]( "*", expandContext && context.parentNode || context ), + // We must always have either seed elements or outermost context + elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), // Use integer dirruns iff this is the outermost matcher - dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1); + dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), + len = elems.length; if ( outermost ) { - outermostContext = context !== document && context; - cachedruns = matcherCachedRuns; + outermostContext = context === document || context || outermost; } // Add elements passing elementMatchers directly to results - // Keep `i` a string if there are no elements so `matchedCount` will be "00" below - for ( ; (elem = elems[i]) != null; i++ ) { + // Support: IE<9, Safari + // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id + for ( ; i !== len && (elem = elems[i]) != null; i++ ) { if ( byElement && elem ) { j = 0; + if ( !context && elem.ownerDocument !== document ) { + setDocument( elem ); + xml = !documentIsHTML; + } while ( (matcher = elementMatchers[j++]) ) { - if ( matcher( elem, context, xml ) ) { + if ( matcher( elem, context || document, xml) ) { results.push( elem ); break; } } if ( outermost ) { dirruns = dirrunsUnique; - cachedruns = ++matcherCachedRuns; } } @@ -2759,8 +2519,17 @@ function matcherFromGroupMatchers( elementMatchers, setMatchers ) { } } - // Apply set filters to unmatched elements + // `i` is now the count of elements visited above, and adding it to `matchedCount` + // makes the latter nonnegative. matchedCount += i; + + // Apply set filters to unmatched elements + // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` + // equals `i`), unless we didn't visit _any_ elements in the above loop because we have + // no element matchers and no seed. + // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that + // case, which will result in a "00" `matchedCount` that differs from `i` but is also + // numerically zero. if ( bySet && i !== matchedCount ) { j = 0; while ( (matcher = setMatchers[j++]) ) { @@ -2806,7 +2575,7 @@ function matcherFromGroupMatchers( elementMatchers, setMatchers ) { superMatcher; } -compile = Sizzle.compile = function( selector, group /* Internal Use Only */ ) { +compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { var i, setMatchers = [], elementMatchers = [], @@ -2814,12 +2583,12 @@ compile = Sizzle.compile = function( selector, group /* Internal Use Only */ ) { if ( !cached ) { // Generate a function of recursive functions that can be used to check each element - if ( !group ) { - group = tokenize( selector ); + if ( !match ) { + match = tokenize( selector ); } - i = group.length; + i = match.length; while ( i-- ) { - cached = matcherFromTokens( group[i] ); + cached = matcherFromTokens( match[i] ); if ( cached[ expando ] ) { setMatchers.push( cached ); } else { @@ -2829,108 +2598,117 @@ compile = Sizzle.compile = function( selector, group /* Internal Use Only */ ) { // Cache the compiled function cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); + + // Save selector and tokenization + cached.selector = selector; } return cached; }; -function multipleContexts( selector, contexts, results ) { - var i = 0, - len = contexts.length; - for ( ; i < len; i++ ) { - Sizzle( selector, contexts[i], results ); - } - return results; -} - -function select( selector, context, results, seed ) { +/** + * A low-level selection function that works with Sizzle's compiled + * selector functions + * @param {String|Function} selector A selector or a pre-compiled + * selector function built with Sizzle.compile + * @param {Element} context + * @param {Array} [results] + * @param {Array} [seed] A set of elements to match against + */ +select = Sizzle.select = function( selector, context, results, seed ) { var i, tokens, token, type, find, - match = tokenize( selector ); + compiled = typeof selector === "function" && selector, + match = !seed && tokenize( (selector = compiled.selector || selector) ); - if ( !seed ) { - // Try to minimize operations if there is only one group - if ( match.length === 1 ) { + results = results || []; - // Take a shortcut and set the context if the root selector is an ID - tokens = match[0] = match[0].slice( 0 ); - if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && - support.getById && context.nodeType === 9 && documentIsHTML && - Expr.relative[ tokens[1].type ] ) { + // Try to minimize operations if there is only one selector in the list and no seed + // (the latter of which guarantees us context) + if ( match.length === 1 ) { - context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; - if ( !context ) { - return results; - } - selector = selector.slice( tokens.shift().value.length ); + // Reduce context if the leading compound selector is an ID + tokens = match[0] = match[0].slice( 0 ); + if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && + context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[1].type ] ) { + + context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; + if ( !context ) { + return results; + + // Precompiled matchers will still verify ancestry, so step up a level + } else if ( compiled ) { + context = context.parentNode; } - // Fetch a seed set for right-to-left matching - i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; - while ( i-- ) { - token = tokens[i]; + selector = selector.slice( tokens.shift().value.length ); + } - // Abort if we hit a combinator - if ( Expr.relative[ (type = token.type) ] ) { - break; - } - if ( (find = Expr.find[ type ]) ) { - // Search, expanding context for leading sibling combinators - if ( (seed = find( - token.matches[0].replace( runescape, funescape ), - rsibling.test( tokens[0].type ) && context.parentNode || context - )) ) { + // Fetch a seed set for right-to-left matching + i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; + while ( i-- ) { + token = tokens[i]; - // If seed is empty or no tokens remain, we can return early - tokens.splice( i, 1 ); - selector = seed.length && toSelector( tokens ); - if ( !selector ) { - push.apply( results, seed ); - return results; - } + // Abort if we hit a combinator + if ( Expr.relative[ (type = token.type) ] ) { + break; + } + if ( (find = Expr.find[ type ]) ) { + // Search, expanding context for leading sibling combinators + if ( (seed = find( + token.matches[0].replace( runescape, funescape ), + rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context + )) ) { - break; + // If seed is empty or no tokens remain, we can return early + tokens.splice( i, 1 ); + selector = seed.length && toSelector( tokens ); + if ( !selector ) { + push.apply( results, seed ); + return results; } + + break; } } } } - // Compile and execute a filtering function + // Compile and execute a filtering function if one is not provided // Provide `match` to avoid retokenization if we modified the selector above - compile( selector, match )( + ( compiled || compile( selector, match ) )( seed, context, !documentIsHTML, results, - rsibling.test( selector ) + !context || rsibling.test( selector ) && testContext( context.parentNode ) || context ); return results; -} +}; // One-time assignments // Sort stability support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; -// Support: Chrome<14 +// Support: Chrome 14-35+ // Always assume duplicates if they aren't passed to the comparison function -support.detectDuplicates = hasDuplicate; +support.detectDuplicates = !!hasDuplicate; // Initialize against the default document setDocument(); // Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) // Detached nodes confoundingly follow *each other* -support.sortDetached = assert(function( div1 ) { +support.sortDetached = assert(function( el ) { // Should return 1, but returns 4 (following) - return div1.compareDocumentPosition( document.createElement("div") ) & 1; + return el.compareDocumentPosition( document.createElement("fieldset") ) & 1; }); // Support: IE<8 // Prevent attribute/property "interpolation" -// http://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx -if ( !assert(function( div ) { - div.innerHTML = ""; - return div.firstChild.getAttribute("href") === "#" ; +// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx +if ( !assert(function( el ) { + el.innerHTML = ""; + return el.firstChild.getAttribute("href") === "#" ; }) ) { addHandle( "type|href|height|width", function( elem, name, isXML ) { if ( !isXML ) { @@ -2941,10 +2719,10 @@ if ( !assert(function( div ) { // Support: IE<9 // Use defaultValue in place of getAttribute("value") -if ( !support.attributes || !assert(function( div ) { - div.innerHTML = ""; - div.firstChild.setAttribute( "value", "" ); - return div.firstChild.getAttribute( "value" ) === ""; +if ( !support.attributes || !assert(function( el ) { + el.innerHTML = ""; + el.firstChild.setAttribute( "value", "" ); + return el.firstChild.getAttribute( "value" ) === ""; }) ) { addHandle( "value", function( elem, name, isXML ) { if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { @@ -2955,38 +2733,470 @@ if ( !support.attributes || !assert(function( div ) { // Support: IE<9 // Use getAttributeNode to fetch booleans when getAttribute lies -if ( !assert(function( div ) { - return div.getAttribute("disabled") == null; +if ( !assert(function( el ) { + return el.getAttribute("disabled") == null; }) ) { addHandle( booleans, function( elem, name, isXML ) { var val; if ( !isXML ) { - return (val = elem.getAttributeNode( name )) && val.specified ? - val.value : - elem[ name ] === true ? name.toLowerCase() : null; + return elem[ name ] === true ? name.toLowerCase() : + (val = elem.getAttributeNode( name )) && val.specified ? + val.value : + null; } }); } +return Sizzle; + +})( window ); + + + jQuery.find = Sizzle; jQuery.expr = Sizzle.selectors; -jQuery.expr[":"] = jQuery.expr.pseudos; -jQuery.unique = Sizzle.uniqueSort; + +// Deprecated +jQuery.expr[ ":" ] = jQuery.expr.pseudos; +jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; jQuery.text = Sizzle.getText; jQuery.isXMLDoc = Sizzle.isXML; jQuery.contains = Sizzle.contains; +jQuery.escapeSelector = Sizzle.escape; -})( window ); -// String to Object options format cache -var optionsCache = {}; -// Convert String-formatted options into Object-formatted ones and store in cache + +var dir = function( elem, dir, until ) { + var matched = [], + truncate = until !== undefined; + + while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { + if ( elem.nodeType === 1 ) { + if ( truncate && jQuery( elem ).is( until ) ) { + break; + } + matched.push( elem ); + } + } + return matched; +}; + + +var siblings = function( n, elem ) { + var matched = []; + + for ( ; n; n = n.nextSibling ) { + if ( n.nodeType === 1 && n !== elem ) { + matched.push( n ); + } + } + + return matched; +}; + + +var rneedsContext = jQuery.expr.match.needsContext; + + + +function nodeName( elem, name ) { + + return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); + +}; +var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); + + + +// Implement the identical functionality for filter and not +function winnow( elements, qualifier, not ) { + if ( isFunction( qualifier ) ) { + return jQuery.grep( elements, function( elem, i ) { + return !!qualifier.call( elem, i, elem ) !== not; + } ); + } + + // Single element + if ( qualifier.nodeType ) { + return jQuery.grep( elements, function( elem ) { + return ( elem === qualifier ) !== not; + } ); + } + + // Arraylike of elements (jQuery, arguments, Array) + if ( typeof qualifier !== "string" ) { + return jQuery.grep( elements, function( elem ) { + return ( indexOf.call( qualifier, elem ) > -1 ) !== not; + } ); + } + + // Filtered directly for both simple and complex selectors + return jQuery.filter( qualifier, elements, not ); +} + +jQuery.filter = function( expr, elems, not ) { + var elem = elems[ 0 ]; + + if ( not ) { + expr = ":not(" + expr + ")"; + } + + if ( elems.length === 1 && elem.nodeType === 1 ) { + return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; + } + + return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { + return elem.nodeType === 1; + } ) ); +}; + +jQuery.fn.extend( { + find: function( selector ) { + var i, ret, + len = this.length, + self = this; + + if ( typeof selector !== "string" ) { + return this.pushStack( jQuery( selector ).filter( function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( self[ i ], this ) ) { + return true; + } + } + } ) ); + } + + ret = this.pushStack( [] ); + + for ( i = 0; i < len; i++ ) { + jQuery.find( selector, self[ i ], ret ); + } + + return len > 1 ? jQuery.uniqueSort( ret ) : ret; + }, + filter: function( selector ) { + return this.pushStack( winnow( this, selector || [], false ) ); + }, + not: function( selector ) { + return this.pushStack( winnow( this, selector || [], true ) ); + }, + is: function( selector ) { + return !!winnow( + this, + + // If this is a positional/relative selector, check membership in the returned set + // so $("p:first").is("p:last") won't return true for a doc with two "p". + typeof selector === "string" && rneedsContext.test( selector ) ? + jQuery( selector ) : + selector || [], + false + ).length; + } +} ); + + +// Initialize a jQuery object + + +// A central reference to the root jQuery(document) +var rootjQuery, + + // A simple way to check for HTML strings + // Prioritize #id over to avoid XSS via location.hash (#9521) + // Strict HTML recognition (#11290: must start with <) + // Shortcut simple #id case for speed + rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, + + init = jQuery.fn.init = function( selector, context, root ) { + var match, elem; + + // HANDLE: $(""), $(null), $(undefined), $(false) + if ( !selector ) { + return this; + } + + // Method init() accepts an alternate rootjQuery + // so migrate can support jQuery.sub (gh-2101) + root = root || rootjQuery; + + // Handle HTML strings + if ( typeof selector === "string" ) { + if ( selector[ 0 ] === "<" && + selector[ selector.length - 1 ] === ">" && + selector.length >= 3 ) { + + // Assume that strings that start and end with <> are HTML and skip the regex check + match = [ null, selector, null ]; + + } else { + match = rquickExpr.exec( selector ); + } + + // Match html or make sure no context is specified for #id + if ( match && ( match[ 1 ] || !context ) ) { + + // HANDLE: $(html) -> $(array) + if ( match[ 1 ] ) { + context = context instanceof jQuery ? context[ 0 ] : context; + + // Option to run scripts is true for back-compat + // Intentionally let the error be thrown if parseHTML is not present + jQuery.merge( this, jQuery.parseHTML( + match[ 1 ], + context && context.nodeType ? context.ownerDocument || context : document, + true + ) ); + + // HANDLE: $(html, props) + if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { + for ( match in context ) { + + // Properties of context are called as methods if possible + if ( isFunction( this[ match ] ) ) { + this[ match ]( context[ match ] ); + + // ...and otherwise set as attributes + } else { + this.attr( match, context[ match ] ); + } + } + } + + return this; + + // HANDLE: $(#id) + } else { + elem = document.getElementById( match[ 2 ] ); + + if ( elem ) { + + // Inject the element directly into the jQuery object + this[ 0 ] = elem; + this.length = 1; + } + return this; + } + + // HANDLE: $(expr, $(...)) + } else if ( !context || context.jquery ) { + return ( context || root ).find( selector ); + + // HANDLE: $(expr, context) + // (which is just equivalent to: $(context).find(expr) + } else { + return this.constructor( context ).find( selector ); + } + + // HANDLE: $(DOMElement) + } else if ( selector.nodeType ) { + this[ 0 ] = selector; + this.length = 1; + return this; + + // HANDLE: $(function) + // Shortcut for document ready + } else if ( isFunction( selector ) ) { + return root.ready !== undefined ? + root.ready( selector ) : + + // Execute immediately if ready is not present + selector( jQuery ); + } + + return jQuery.makeArray( selector, this ); + }; + +// Give the init function the jQuery prototype for later instantiation +init.prototype = jQuery.fn; + +// Initialize central reference +rootjQuery = jQuery( document ); + + +var rparentsprev = /^(?:parents|prev(?:Until|All))/, + + // Methods guaranteed to produce a unique set when starting from a unique set + guaranteedUnique = { + children: true, + contents: true, + next: true, + prev: true + }; + +jQuery.fn.extend( { + has: function( target ) { + var targets = jQuery( target, this ), + l = targets.length; + + return this.filter( function() { + var i = 0; + for ( ; i < l; i++ ) { + if ( jQuery.contains( this, targets[ i ] ) ) { + return true; + } + } + } ); + }, + + closest: function( selectors, context ) { + var cur, + i = 0, + l = this.length, + matched = [], + targets = typeof selectors !== "string" && jQuery( selectors ); + + // Positional selectors never match, since there's no _selection_ context + if ( !rneedsContext.test( selectors ) ) { + for ( ; i < l; i++ ) { + for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { + + // Always skip document fragments + if ( cur.nodeType < 11 && ( targets ? + targets.index( cur ) > -1 : + + // Don't pass non-elements to Sizzle + cur.nodeType === 1 && + jQuery.find.matchesSelector( cur, selectors ) ) ) { + + matched.push( cur ); + break; + } + } + } + } + + return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); + }, + + // Determine the position of an element within the set + index: function( elem ) { + + // No argument, return index in parent + if ( !elem ) { + return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; + } + + // Index in selector + if ( typeof elem === "string" ) { + return indexOf.call( jQuery( elem ), this[ 0 ] ); + } + + // Locate the position of the desired element + return indexOf.call( this, + + // If it receives a jQuery object, the first element is used + elem.jquery ? elem[ 0 ] : elem + ); + }, + + add: function( selector, context ) { + return this.pushStack( + jQuery.uniqueSort( + jQuery.merge( this.get(), jQuery( selector, context ) ) + ) + ); + }, + + addBack: function( selector ) { + return this.add( selector == null ? + this.prevObject : this.prevObject.filter( selector ) + ); + } +} ); + +function sibling( cur, dir ) { + while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} + return cur; +} + +jQuery.each( { + parent: function( elem ) { + var parent = elem.parentNode; + return parent && parent.nodeType !== 11 ? parent : null; + }, + parents: function( elem ) { + return dir( elem, "parentNode" ); + }, + parentsUntil: function( elem, i, until ) { + return dir( elem, "parentNode", until ); + }, + next: function( elem ) { + return sibling( elem, "nextSibling" ); + }, + prev: function( elem ) { + return sibling( elem, "previousSibling" ); + }, + nextAll: function( elem ) { + return dir( elem, "nextSibling" ); + }, + prevAll: function( elem ) { + return dir( elem, "previousSibling" ); + }, + nextUntil: function( elem, i, until ) { + return dir( elem, "nextSibling", until ); + }, + prevUntil: function( elem, i, until ) { + return dir( elem, "previousSibling", until ); + }, + siblings: function( elem ) { + return siblings( ( elem.parentNode || {} ).firstChild, elem ); + }, + children: function( elem ) { + return siblings( elem.firstChild ); + }, + contents: function( elem ) { + if ( nodeName( elem, "iframe" ) ) { + return elem.contentDocument; + } + + // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only + // Treat the template element as a regular one in browsers that + // don't support it. + if ( nodeName( elem, "template" ) ) { + elem = elem.content || elem; + } + + return jQuery.merge( [], elem.childNodes ); + } +}, function( name, fn ) { + jQuery.fn[ name ] = function( until, selector ) { + var matched = jQuery.map( this, fn, until ); + + if ( name.slice( -5 ) !== "Until" ) { + selector = until; + } + + if ( selector && typeof selector === "string" ) { + matched = jQuery.filter( selector, matched ); + } + + if ( this.length > 1 ) { + + // Remove duplicates + if ( !guaranteedUnique[ name ] ) { + jQuery.uniqueSort( matched ); + } + + // Reverse order for parents* and prev-derivatives + if ( rparentsprev.test( name ) ) { + matched.reverse(); + } + } + + return this.pushStack( matched ); + }; +} ); +var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); + + + +// Convert String-formatted options into Object-formatted ones function createOptions( options ) { - var object = optionsCache[ options ] = {}; - jQuery.each( options.match( core_rnotwhite ) || [], function( _, flag ) { + var object = {}; + jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { object[ flag ] = true; - }); + } ); return object; } @@ -3017,156 +3227,186 @@ jQuery.Callbacks = function( options ) { // Convert options from String-formatted to Object-formatted if needed // (we check in cache first) options = typeof options === "string" ? - ( optionsCache[ options ] || createOptions( options ) ) : + createOptions( options ) : jQuery.extend( {}, options ); var // Flag to know if list is currently firing firing, - // Last fire value (for non-forgettable lists) + + // Last fire value for non-forgettable lists memory, + // Flag to know if list was already fired fired, - // End of the loop when firing - firingLength, - // Index of currently firing callback (modified by remove if needed) - firingIndex, - // First callback to fire (used internally by add and fireWith) - firingStart, + + // Flag to prevent firing + locked, + // Actual callback list list = [], - // Stack of fire calls for repeatable lists - stack = !options.once && [], + + // Queue of execution data for repeatable lists + queue = [], + + // Index of currently firing callback (modified by add/remove as needed) + firingIndex = -1, + // Fire callbacks - fire = function( data ) { - memory = options.memory && data; - fired = true; - firingIndex = firingStart || 0; - firingStart = 0; - firingLength = list.length; - firing = true; - for ( ; list && firingIndex < firingLength; firingIndex++ ) { - if ( list[ firingIndex ].apply( data[ 0 ], data[ 1 ] ) === false && options.stopOnFalse ) { - memory = false; // To prevent further calls using add - break; + fire = function() { + + // Enforce single-firing + locked = locked || options.once; + + // Execute callbacks for all pending executions, + // respecting firingIndex overrides and runtime changes + fired = firing = true; + for ( ; queue.length; firingIndex = -1 ) { + memory = queue.shift(); + while ( ++firingIndex < list.length ) { + + // Run callback and check for early termination + if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && + options.stopOnFalse ) { + + // Jump to end and forget the data so .add doesn't re-fire + firingIndex = list.length; + memory = false; + } } } + + // Forget the data if we're done with it + if ( !options.memory ) { + memory = false; + } + firing = false; - if ( list ) { - if ( stack ) { - if ( stack.length ) { - fire( stack.shift() ); - } - } else if ( memory ) { + + // Clean up if we're done firing for good + if ( locked ) { + + // Keep an empty list if we have data for future add calls + if ( memory ) { list = []; + + // Otherwise, this object is spent } else { - self.disable(); + list = ""; } } }, + // Actual Callbacks object self = { + // Add a callback or a collection of callbacks to the list add: function() { if ( list ) { - // First, we save the current length - var start = list.length; - (function add( args ) { + + // If we have memory from a past run, we should fire after adding + if ( memory && !firing ) { + firingIndex = list.length - 1; + queue.push( memory ); + } + + ( function add( args ) { jQuery.each( args, function( _, arg ) { - var type = jQuery.type( arg ); - if ( type === "function" ) { + if ( isFunction( arg ) ) { if ( !options.unique || !self.has( arg ) ) { list.push( arg ); } - } else if ( arg && arg.length && type !== "string" ) { + } else if ( arg && arg.length && toType( arg ) !== "string" ) { + // Inspect recursively add( arg ); } - }); - })( arguments ); - // Do we need to add the callbacks to the - // current firing batch? - if ( firing ) { - firingLength = list.length; - // With memory, if we're not firing then - // we should call right away - } else if ( memory ) { - firingStart = start; - fire( memory ); + } ); + } )( arguments ); + + if ( memory && !firing ) { + fire(); } } return this; }, + // Remove a callback from the list remove: function() { - if ( list ) { - jQuery.each( arguments, function( _, arg ) { - var index; - while( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { - list.splice( index, 1 ); - // Handle firing indexes - if ( firing ) { - if ( index <= firingLength ) { - firingLength--; - } - if ( index <= firingIndex ) { - firingIndex--; - } - } + jQuery.each( arguments, function( _, arg ) { + var index; + while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { + list.splice( index, 1 ); + + // Handle firing indexes + if ( index <= firingIndex ) { + firingIndex--; } - }); - } + } + } ); return this; }, + // Check if a given callback is in the list. // If no argument is given, return whether or not list has callbacks attached. has: function( fn ) { - return fn ? jQuery.inArray( fn, list ) > -1 : !!( list && list.length ); + return fn ? + jQuery.inArray( fn, list ) > -1 : + list.length > 0; }, + // Remove all callbacks from the list empty: function() { - list = []; - firingLength = 0; - return this; - }, - // Have the list do nothing anymore - disable: function() { - list = stack = memory = undefined; - return this; - }, - // Is it disabled? - disabled: function() { - return !list; - }, - // Lock the list in its current state - lock: function() { - stack = undefined; - if ( !memory ) { - self.disable(); + if ( list ) { + list = []; } return this; }, - // Is it locked? - locked: function() { - return !stack; + + // Disable .fire and .add + // Abort any current/pending executions + // Clear all callbacks and values + disable: function() { + locked = queue = []; + list = memory = ""; + return this; }, + disabled: function() { + return !list; + }, + + // Disable .fire + // Also disable .add unless we have memory (since it would have no effect) + // Abort any pending executions + lock: function() { + locked = queue = []; + if ( !memory && !firing ) { + list = memory = ""; + } + return this; + }, + locked: function() { + return !!locked; + }, + // Call all callbacks with the given context and arguments fireWith: function( context, args ) { - if ( list && ( !fired || stack ) ) { + if ( !locked ) { args = args || []; args = [ context, args.slice ? args.slice() : args ]; - if ( firing ) { - stack.push( args ); - } else { - fire( args ); + queue.push( args ); + if ( !firing ) { + fire(); } } return this; }, + // Call all the callbacks with the given arguments fire: function() { self.fireWith( this, arguments ); return this; }, + // To know if the callbacks have already been called at least once fired: function() { return !!fired; @@ -3175,14 +3415,61 @@ jQuery.Callbacks = function( options ) { return self; }; -jQuery.extend({ + + +function Identity( v ) { + return v; +} +function Thrower( ex ) { + throw ex; +} + +function adoptValue( value, resolve, reject, noValue ) { + var method; + + try { + + // Check for promise aspect first to privilege synchronous behavior + if ( value && isFunction( ( method = value.promise ) ) ) { + method.call( value ).done( resolve ).fail( reject ); + + // Other thenables + } else if ( value && isFunction( ( method = value.then ) ) ) { + method.call( value, resolve, reject ); + + // Other non-thenables + } else { + + // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: + // * false: [ value ].slice( 0 ) => resolve( value ) + // * true: [ value ].slice( 1 ) => resolve() + resolve.apply( undefined, [ value ].slice( noValue ) ); + } + + // For Promises/A+, convert exceptions into rejections + // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in + // Deferred#then to conditionally suppress rejection. + } catch ( value ) { + + // Support: Android 4.0 only + // Strict mode functions invoked without .call/.apply get global-object context + reject.apply( undefined, [ value ] ); + } +} + +jQuery.extend( { Deferred: function( func ) { var tuples = [ - // action, add listener, listener list, final state - [ "resolve", "done", jQuery.Callbacks("once memory"), "resolved" ], - [ "reject", "fail", jQuery.Callbacks("once memory"), "rejected" ], - [ "notify", "progress", jQuery.Callbacks("memory") ] + + // action, add listener, callbacks, + // ... .then handlers, argument index, [final state] + [ "notify", "progress", jQuery.Callbacks( "memory" ), + jQuery.Callbacks( "memory" ), 2 ], + [ "resolve", "done", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 0, "resolved" ], + [ "reject", "fail", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 1, "rejected" ] ], state = "pending", promise = { @@ -3193,28 +3480,206 @@ jQuery.extend({ deferred.done( arguments ).fail( arguments ); return this; }, - then: function( /* fnDone, fnFail, fnProgress */ ) { - var fns = arguments; - return jQuery.Deferred(function( newDefer ) { - jQuery.each( tuples, function( i, tuple ) { - var action = tuple[ 0 ], - fn = jQuery.isFunction( fns[ i ] ) && fns[ i ]; - // deferred[ done | fail | progress ] for forwarding actions to newDefer - deferred[ tuple[1] ](function() { - var returned = fn && fn.apply( this, arguments ); - if ( returned && jQuery.isFunction( returned.promise ) ) { - returned.promise() - .done( newDefer.resolve ) - .fail( newDefer.reject ) - .progress( newDefer.notify ); - } else { - newDefer[ action + "With" ]( this === promise ? newDefer.promise() : this, fn ? [ returned ] : arguments ); - } - }); - }); - fns = null; - }).promise(); + "catch": function( fn ) { + return promise.then( null, fn ); }, + + // Keep pipe for back-compat + pipe: function( /* fnDone, fnFail, fnProgress */ ) { + var fns = arguments; + + return jQuery.Deferred( function( newDefer ) { + jQuery.each( tuples, function( i, tuple ) { + + // Map tuples (progress, done, fail) to arguments (done, fail, progress) + var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; + + // deferred.progress(function() { bind to newDefer or newDefer.notify }) + // deferred.done(function() { bind to newDefer or newDefer.resolve }) + // deferred.fail(function() { bind to newDefer or newDefer.reject }) + deferred[ tuple[ 1 ] ]( function() { + var returned = fn && fn.apply( this, arguments ); + if ( returned && isFunction( returned.promise ) ) { + returned.promise() + .progress( newDefer.notify ) + .done( newDefer.resolve ) + .fail( newDefer.reject ); + } else { + newDefer[ tuple[ 0 ] + "With" ]( + this, + fn ? [ returned ] : arguments + ); + } + } ); + } ); + fns = null; + } ).promise(); + }, + then: function( onFulfilled, onRejected, onProgress ) { + var maxDepth = 0; + function resolve( depth, deferred, handler, special ) { + return function() { + var that = this, + args = arguments, + mightThrow = function() { + var returned, then; + + // Support: Promises/A+ section 2.3.3.3.3 + // https://promisesaplus.com/#point-59 + // Ignore double-resolution attempts + if ( depth < maxDepth ) { + return; + } + + returned = handler.apply( that, args ); + + // Support: Promises/A+ section 2.3.1 + // https://promisesaplus.com/#point-48 + if ( returned === deferred.promise() ) { + throw new TypeError( "Thenable self-resolution" ); + } + + // Support: Promises/A+ sections 2.3.3.1, 3.5 + // https://promisesaplus.com/#point-54 + // https://promisesaplus.com/#point-75 + // Retrieve `then` only once + then = returned && + + // Support: Promises/A+ section 2.3.4 + // https://promisesaplus.com/#point-64 + // Only check objects and functions for thenability + ( typeof returned === "object" || + typeof returned === "function" ) && + returned.then; + + // Handle a returned thenable + if ( isFunction( then ) ) { + + // Special processors (notify) just wait for resolution + if ( special ) { + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ) + ); + + // Normal processors (resolve) also hook into progress + } else { + + // ...and disregard older resolution values + maxDepth++; + + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ), + resolve( maxDepth, deferred, Identity, + deferred.notifyWith ) + ); + } + + // Handle all other returned values + } else { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Identity ) { + that = undefined; + args = [ returned ]; + } + + // Process the value(s) + // Default process is resolve + ( special || deferred.resolveWith )( that, args ); + } + }, + + // Only normal processors (resolve) catch and reject exceptions + process = special ? + mightThrow : + function() { + try { + mightThrow(); + } catch ( e ) { + + if ( jQuery.Deferred.exceptionHook ) { + jQuery.Deferred.exceptionHook( e, + process.stackTrace ); + } + + // Support: Promises/A+ section 2.3.3.3.4.1 + // https://promisesaplus.com/#point-61 + // Ignore post-resolution exceptions + if ( depth + 1 >= maxDepth ) { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Thrower ) { + that = undefined; + args = [ e ]; + } + + deferred.rejectWith( that, args ); + } + } + }; + + // Support: Promises/A+ section 2.3.3.3.1 + // https://promisesaplus.com/#point-57 + // Re-resolve promises immediately to dodge false rejection from + // subsequent errors + if ( depth ) { + process(); + } else { + + // Call an optional hook to record the stack, in case of exception + // since it's otherwise lost when execution goes async + if ( jQuery.Deferred.getStackHook ) { + process.stackTrace = jQuery.Deferred.getStackHook(); + } + window.setTimeout( process ); + } + }; + } + + return jQuery.Deferred( function( newDefer ) { + + // progress_handlers.add( ... ) + tuples[ 0 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onProgress ) ? + onProgress : + Identity, + newDefer.notifyWith + ) + ); + + // fulfilled_handlers.add( ... ) + tuples[ 1 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onFulfilled ) ? + onFulfilled : + Identity + ) + ); + + // rejected_handlers.add( ... ) + tuples[ 2 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onRejected ) ? + onRejected : + Thrower + ) + ); + } ).promise(); + }, + // Get a promise for this deferred // If obj is provided, the promise aspect is added to the object promise: function( obj ) { @@ -3223,34 +3688,60 @@ jQuery.extend({ }, deferred = {}; - // Keep pipe for back-compat - promise.pipe = promise.then; - // Add list-specific methods jQuery.each( tuples, function( i, tuple ) { var list = tuple[ 2 ], - stateString = tuple[ 3 ]; + stateString = tuple[ 5 ]; - // promise[ done | fail | progress ] = list.add - promise[ tuple[1] ] = list.add; + // promise.progress = list.add + // promise.done = list.add + // promise.fail = list.add + promise[ tuple[ 1 ] ] = list.add; // Handle state if ( stateString ) { - list.add(function() { - // state = [ resolved | rejected ] - state = stateString; + list.add( + function() { - // [ reject_list | resolve_list ].disable; progress_list.lock - }, tuples[ i ^ 1 ][ 2 ].disable, tuples[ 2 ][ 2 ].lock ); + // state = "resolved" (i.e., fulfilled) + // state = "rejected" + state = stateString; + }, + + // rejected_callbacks.disable + // fulfilled_callbacks.disable + tuples[ 3 - i ][ 2 ].disable, + + // rejected_handlers.disable + // fulfilled_handlers.disable + tuples[ 3 - i ][ 3 ].disable, + + // progress_callbacks.lock + tuples[ 0 ][ 2 ].lock, + + // progress_handlers.lock + tuples[ 0 ][ 3 ].lock + ); } - // deferred[ resolve | reject | notify ] - deferred[ tuple[0] ] = function() { - deferred[ tuple[0] + "With" ]( this === deferred ? promise : this, arguments ); + // progress_handlers.fire + // fulfilled_handlers.fire + // rejected_handlers.fire + list.add( tuple[ 3 ].fire ); + + // deferred.notify = function() { deferred.notifyWith(...) } + // deferred.resolve = function() { deferred.resolveWith(...) } + // deferred.reject = function() { deferred.rejectWith(...) } + deferred[ tuple[ 0 ] ] = function() { + deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); return this; }; - deferred[ tuple[0] + "With" ] = list.fireWith; - }); + + // deferred.notifyWith = list.fireWith + // deferred.resolveWith = list.fireWith + // deferred.rejectWith = list.fireWith + deferred[ tuple[ 0 ] + "With" ] = list.fireWith; + } ); // Make the deferred a promise promise.promise( deferred ); @@ -3265,560 +3756,518 @@ jQuery.extend({ }, // Deferred helper - when: function( subordinate /* , ..., subordinateN */ ) { - var i = 0, - resolveValues = core_slice.call( arguments ), - length = resolveValues.length, + when: function( singleValue ) { + var - // the count of uncompleted subordinates - remaining = length !== 1 || ( subordinate && jQuery.isFunction( subordinate.promise ) ) ? length : 0, + // count of uncompleted subordinates + remaining = arguments.length, - // the master Deferred. If resolveValues consist of only a single Deferred, just use that. - deferred = remaining === 1 ? subordinate : jQuery.Deferred(), + // count of unprocessed arguments + i = remaining, - // Update function for both resolve and progress values - updateFunc = function( i, contexts, values ) { + // subordinate fulfillment data + resolveContexts = Array( i ), + resolveValues = slice.call( arguments ), + + // the master Deferred + master = jQuery.Deferred(), + + // subordinate callback factory + updateFunc = function( i ) { return function( value ) { - contexts[ i ] = this; - values[ i ] = arguments.length > 1 ? core_slice.call( arguments ) : value; - if( values === progressValues ) { - deferred.notifyWith( contexts, values ); - } else if ( !( --remaining ) ) { - deferred.resolveWith( contexts, values ); + resolveContexts[ i ] = this; + resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; + if ( !( --remaining ) ) { + master.resolveWith( resolveContexts, resolveValues ); } }; - }, + }; - progressValues, progressContexts, resolveContexts; + // Single- and empty arguments are adopted like Promise.resolve + if ( remaining <= 1 ) { + adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject, + !remaining ); - // add listeners to Deferred subordinates; treat others as resolved - if ( length > 1 ) { - progressValues = new Array( length ); - progressContexts = new Array( length ); - resolveContexts = new Array( length ); - for ( ; i < length; i++ ) { - if ( resolveValues[ i ] && jQuery.isFunction( resolveValues[ i ].promise ) ) { - resolveValues[ i ].promise() - .done( updateFunc( i, resolveContexts, resolveValues ) ) - .fail( deferred.reject ) - .progress( updateFunc( i, progressContexts, progressValues ) ); - } else { - --remaining; - } + // Use .then() to unwrap secondary thenables (cf. gh-3000) + if ( master.state() === "pending" || + isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { + + return master.then(); } } - // if we're not waiting on anything, resolve the master - if ( !remaining ) { - deferred.resolveWith( resolveContexts, resolveValues ); + // Multiple arguments are aggregated like Promise.all array elements + while ( i-- ) { + adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); } - return deferred.promise(); + return master.promise(); } -}); -jQuery.support = (function( support ) { +} ); - var all, a, input, select, fragment, opt, eventName, isSupported, i, - div = document.createElement("div"); - // Setup - div.setAttribute( "className", "t" ); - div.innerHTML = "
a"; +// These usually indicate a programmer mistake during development, +// warn about them ASAP rather than swallowing them by default. +var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; - // Finish early in limited (non-browser) environments - all = div.getElementsByTagName("*") || []; - a = div.getElementsByTagName("a")[ 0 ]; - if ( !a || !a.style || !all.length ) { - return support; +jQuery.Deferred.exceptionHook = function( error, stack ) { + + // Support: IE 8 - 9 only + // Console exists when dev tools are open, which can happen at any time + if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { + window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); } +}; - // First batch of tests - select = document.createElement("select"); - opt = select.appendChild( document.createElement("option") ); - input = div.getElementsByTagName("input")[ 0 ]; - a.style.cssText = "top:1px;float:left;opacity:.5"; - // Test setAttribute on camelCase class. If it works, we need attrFixes when doing get/setAttribute (ie6/7) - support.getSetAttribute = div.className !== "t"; - // IE strips leading whitespace when .innerHTML is used - support.leadingWhitespace = div.firstChild.nodeType === 3; +jQuery.readyException = function( error ) { + window.setTimeout( function() { + throw error; + } ); +}; - // Make sure that tbody elements aren't automatically inserted - // IE will insert them into empty tables - support.tbody = !div.getElementsByTagName("tbody").length; - // Make sure that link elements get serialized correctly by innerHTML - // This requires a wrapper element in IE - support.htmlSerialize = !!div.getElementsByTagName("link").length; - // Get the style information from getAttribute - // (IE uses .cssText instead) - support.style = /top/.test( a.getAttribute("style") ); - // Make sure that URLs aren't manipulated - // (IE normalizes it by default) - support.hrefNormalized = a.getAttribute("href") === "/a"; +// The deferred used on DOM ready +var readyList = jQuery.Deferred(); - // Make sure that element opacity exists - // (IE uses filter instead) - // Use a regex to work around a WebKit issue. See #5145 - support.opacity = /^0.5/.test( a.style.opacity ); +jQuery.fn.ready = function( fn ) { - // Verify style float existence - // (IE uses styleFloat instead of cssFloat) - support.cssFloat = !!a.style.cssFloat; + readyList + .then( fn ) - // Check the default checkbox/radio value ("" on WebKit; "on" elsewhere) - support.checkOn = !!input.value; + // Wrap jQuery.readyException in a function so that the lookup + // happens at the time of error handling instead of callback + // registration. + .catch( function( error ) { + jQuery.readyException( error ); + } ); - // Make sure that a selected-by-default option has a working selected property. - // (WebKit defaults to false instead of true, IE too, if it's in an optgroup) - support.optSelected = opt.selected; + return this; +}; - // Tests for enctype support on a form (#6743) - support.enctype = !!document.createElement("form").enctype; +jQuery.extend( { - // Makes sure cloning an html5 element does not cause problems - // Where outerHTML is undefined, this still works - support.html5Clone = document.createElement("nav").cloneNode( true ).outerHTML !== "<:nav>"; + // Is the DOM ready to be used? Set to true once it occurs. + isReady: false, - // Will be defined later - support.inlineBlockNeedsLayout = false; - support.shrinkWrapBlocks = false; - support.pixelPosition = false; - support.deleteExpando = true; - support.noCloneEvent = true; - support.reliableMarginRight = true; - support.boxSizingReliable = true; + // A counter to track how many items to wait for before + // the ready event fires. See #6781 + readyWait: 1, - // Make sure checked status is properly cloned - input.checked = true; - support.noCloneChecked = input.cloneNode( true ).checked; + // Handle when the DOM is ready + ready: function( wait ) { - // Make sure that the options inside disabled selects aren't marked as disabled - // (WebKit marks them as disabled) - select.disabled = true; - support.optDisabled = !opt.disabled; - - // Support: IE<9 - try { - delete div.test; - } catch( e ) { - support.deleteExpando = false; - } - - // Check if we can trust getAttribute("value") - input = document.createElement("input"); - input.setAttribute( "value", "" ); - support.input = input.getAttribute( "value" ) === ""; - - // Check if an input maintains its value after becoming a radio - input.value = "t"; - input.setAttribute( "type", "radio" ); - support.radioValue = input.value === "t"; - - // #11217 - WebKit loses check when the name is after the checked attribute - input.setAttribute( "checked", "t" ); - input.setAttribute( "name", "t" ); - - fragment = document.createDocumentFragment(); - fragment.appendChild( input ); - - // Check if a disconnected checkbox will retain its checked - // value of true after appended to the DOM (IE6/7) - support.appendChecked = input.checked; - - // WebKit doesn't clone checked state correctly in fragments - support.checkClone = fragment.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Support: IE<9 - // Opera does not clone events (and typeof div.attachEvent === undefined). - // IE9-10 clones events bound via attachEvent, but they don't trigger with .click() - if ( div.attachEvent ) { - div.attachEvent( "onclick", function() { - support.noCloneEvent = false; - }); - - div.cloneNode( true ).click(); - } - - // Support: IE<9 (lack submit/change bubble), Firefox 17+ (lack focusin event) - // Beware of CSP restrictions (https://developer.mozilla.org/en/Security/CSP) - for ( i in { submit: true, change: true, focusin: true }) { - div.setAttribute( eventName = "on" + i, "t" ); - - support[ i + "Bubbles" ] = eventName in window || div.attributes[ eventName ].expando === false; - } - - div.style.backgroundClip = "content-box"; - div.cloneNode( true ).style.backgroundClip = ""; - support.clearCloneStyle = div.style.backgroundClip === "content-box"; - - // Support: IE<9 - // Iteration over object's inherited properties before its own. - for ( i in jQuery( support ) ) { - break; - } - support.ownLast = i !== "0"; - - // Run tests that need a body at doc ready - jQuery(function() { - var container, marginDiv, tds, - divReset = "padding:0;margin:0;border:0;display:block;box-sizing:content-box;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;", - body = document.getElementsByTagName("body")[0]; - - if ( !body ) { - // Return for frameset docs that don't have a body + // Abort if there are pending holds or we're already ready + if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { return; } - container = document.createElement("div"); - container.style.cssText = "border:0;width:0;height:0;position:absolute;top:0;left:-9999px;margin-top:1px"; + // Remember that the DOM is ready + jQuery.isReady = true; - body.appendChild( container ).appendChild( div ); - - // Support: IE8 - // Check if table cells still have offsetWidth/Height when they are set - // to display:none and there are still other visible table cells in a - // table row; if so, offsetWidth/Height are not reliable for use when - // determining if an element has been hidden directly using - // display:none (it is still safe to use offsets if a parent element is - // hidden; don safety goggles and see bug #4512 for more information). - div.innerHTML = "
t
"; - tds = div.getElementsByTagName("td"); - tds[ 0 ].style.cssText = "padding:0;margin:0;border:0;display:none"; - isSupported = ( tds[ 0 ].offsetHeight === 0 ); - - tds[ 0 ].style.display = ""; - tds[ 1 ].style.display = "none"; - - // Support: IE8 - // Check if empty table cells still have offsetWidth/Height - support.reliableHiddenOffsets = isSupported && ( tds[ 0 ].offsetHeight === 0 ); - - // Check box-sizing and margin behavior. - div.innerHTML = ""; - div.style.cssText = "box-sizing:border-box;-moz-box-sizing:border-box;-webkit-box-sizing:border-box;padding:1px;border:1px;display:block;width:4px;margin-top:1%;position:absolute;top:1%;"; - - // Workaround failing boxSizing test due to offsetWidth returning wrong value - // with some non-1 values of body zoom, ticket #13543 - jQuery.swap( body, body.style.zoom != null ? { zoom: 1 } : {}, function() { - support.boxSizing = div.offsetWidth === 4; - }); - - // Use window.getComputedStyle because jsdom on node.js will break without it. - if ( window.getComputedStyle ) { - support.pixelPosition = ( window.getComputedStyle( div, null ) || {} ).top !== "1%"; - support.boxSizingReliable = ( window.getComputedStyle( div, null ) || { width: "4px" } ).width === "4px"; - - // Check if div with explicit width and no margin-right incorrectly - // gets computed margin-right based on width of container. (#3333) - // Fails in WebKit before Feb 2011 nightlies - // WebKit Bug 13343 - getComputedStyle returns wrong value for margin-right - marginDiv = div.appendChild( document.createElement("div") ); - marginDiv.style.cssText = div.style.cssText = divReset; - marginDiv.style.marginRight = marginDiv.style.width = "0"; - div.style.width = "1px"; - - support.reliableMarginRight = - !parseFloat( ( window.getComputedStyle( marginDiv, null ) || {} ).marginRight ); + // If a normal DOM Ready event fired, decrement, and wait if need be + if ( wait !== true && --jQuery.readyWait > 0 ) { + return; } - if ( typeof div.style.zoom !== core_strundefined ) { - // Support: IE<8 - // Check if natively block-level elements act like inline-block - // elements when setting their display to 'inline' and giving - // them layout - div.innerHTML = ""; - div.style.cssText = divReset + "width:1px;padding:1px;display:inline;zoom:1"; - support.inlineBlockNeedsLayout = ( div.offsetWidth === 3 ); - - // Support: IE6 - // Check if elements with layout shrink-wrap their children - div.style.display = "block"; - div.innerHTML = "
"; - div.firstChild.style.width = "5px"; - support.shrinkWrapBlocks = ( div.offsetWidth !== 3 ); - - if ( support.inlineBlockNeedsLayout ) { - // Prevent IE 6 from affecting layout for positioned elements #11048 - // Prevent IE from shrinking the body in IE 7 mode #12869 - // Support: IE<8 - body.style.zoom = 1; - } - } - - body.removeChild( container ); - - // Null elements to avoid leaks in IE - container = div = tds = marginDiv = null; - }); - - // Null elements to avoid leaks in IE - all = select = fragment = opt = a = input = null; - - return support; -})({}); - -var rbrace = /(?:\{[\s\S]*\}|\[[\s\S]*\])$/, - rmultiDash = /([A-Z])/g; - -function internalData( elem, name, data, pvt /* Internal Use Only */ ){ - if ( !jQuery.acceptData( elem ) ) { - return; + // If there are functions bound, to execute + readyList.resolveWith( document, [ jQuery ] ); } +} ); - var ret, thisCache, - internalKey = jQuery.expando, +jQuery.ready.then = readyList.then; - // We have to handle DOM nodes and JS objects differently because IE6-7 - // can't GC object references properly across the DOM-JS boundary - isNode = elem.nodeType, - - // Only DOM nodes need the global jQuery cache; JS object data is - // attached directly to the object so GC can occur automatically - cache = isNode ? jQuery.cache : elem, - - // Only defining an ID for JS objects if its cache already exists allows - // the code to shortcut on the same path as a DOM node with no cache - id = isNode ? elem[ internalKey ] : elem[ internalKey ] && internalKey; - - // Avoid doing any more work than we need to when trying to get data on an - // object that has no data at all - if ( (!id || !cache[id] || (!pvt && !cache[id].data)) && data === undefined && typeof name === "string" ) { - return; - } - - if ( !id ) { - // Only DOM nodes need a new unique ID for each element since their data - // ends up in the global cache - if ( isNode ) { - id = elem[ internalKey ] = core_deletedIds.pop() || jQuery.guid++; - } else { - id = internalKey; - } - } - - if ( !cache[ id ] ) { - // Avoid exposing jQuery metadata on plain JS objects when the object - // is serialized using JSON.stringify - cache[ id ] = isNode ? {} : { toJSON: jQuery.noop }; - } - - // An object can be passed to jQuery.data instead of a key/value pair; this gets - // shallow copied over onto the existing cache - if ( typeof name === "object" || typeof name === "function" ) { - if ( pvt ) { - cache[ id ] = jQuery.extend( cache[ id ], name ); - } else { - cache[ id ].data = jQuery.extend( cache[ id ].data, name ); - } - } - - thisCache = cache[ id ]; - - // jQuery data() is stored in a separate object inside the object's internal data - // cache in order to avoid key collisions between internal data and user-defined - // data. - if ( !pvt ) { - if ( !thisCache.data ) { - thisCache.data = {}; - } - - thisCache = thisCache.data; - } - - if ( data !== undefined ) { - thisCache[ jQuery.camelCase( name ) ] = data; - } - - // Check for both converted-to-camel and non-converted data property names - // If a data property was specified - if ( typeof name === "string" ) { - - // First Try to find as-is property data - ret = thisCache[ name ]; - - // Test for null|undefined property data - if ( ret == null ) { - - // Try to find the camelCased property - ret = thisCache[ jQuery.camelCase( name ) ]; - } - } else { - ret = thisCache; - } - - return ret; +// The ready event handler and self cleanup method +function completed() { + document.removeEventListener( "DOMContentLoaded", completed ); + window.removeEventListener( "load", completed ); + jQuery.ready(); } -function internalRemoveData( elem, name, pvt ) { - if ( !jQuery.acceptData( elem ) ) { - return; - } +// Catch cases where $(document).ready() is called +// after the browser event has already occurred. +// Support: IE <=9 - 10 only +// Older IE sometimes signals "interactive" too soon +if ( document.readyState === "complete" || + ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { - var thisCache, i, - isNode = elem.nodeType, + // Handle it asynchronously to allow scripts the opportunity to delay ready + window.setTimeout( jQuery.ready ); - // See jQuery.data for more information - cache = isNode ? jQuery.cache : elem, - id = isNode ? elem[ jQuery.expando ] : jQuery.expando; +} else { - // If there is already no cache entry for this object, there is no - // purpose in continuing - if ( !cache[ id ] ) { - return; - } + // Use the handy event callback + document.addEventListener( "DOMContentLoaded", completed ); - if ( name ) { + // A fallback to window.onload, that will always work + window.addEventListener( "load", completed ); +} - thisCache = pvt ? cache[ id ] : cache[ id ].data; - if ( thisCache ) { - // Support array or space separated string names for data keys - if ( !jQuery.isArray( name ) ) { - // try the string as a key before any manipulation - if ( name in thisCache ) { - name = [ name ]; - } else { +// Multifunctional method to get and set values of a collection +// The value/s can optionally be executed if it's a function +var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { + var i = 0, + len = elems.length, + bulk = key == null; - // split the camel cased version by spaces unless a key with the spaces exists - name = jQuery.camelCase( name ); - if ( name in thisCache ) { - name = [ name ]; - } else { - name = name.split(" "); - } - } + // Sets many values + if ( toType( key ) === "object" ) { + chainable = true; + for ( i in key ) { + access( elems, fn, i, key[ i ], true, emptyGet, raw ); + } + + // Sets one value + } else if ( value !== undefined ) { + chainable = true; + + if ( !isFunction( value ) ) { + raw = true; + } + + if ( bulk ) { + + // Bulk operations run against the entire set + if ( raw ) { + fn.call( elems, value ); + fn = null; + + // ...except when executing function values } else { - // If "name" is an array of keys... - // When data is initially created, via ("key", "val") signature, - // keys will be converted to camelCase. - // Since there is no way to tell _how_ a key was added, remove - // both plain key and camelCase key. #12786 - // This will only penalize the array argument path. - name = name.concat( jQuery.map( name, jQuery.camelCase ) ); + bulk = fn; + fn = function( elem, key, value ) { + return bulk.call( jQuery( elem ), value ); + }; } + } - i = name.length; - while ( i-- ) { - delete thisCache[ name[i] ]; - } - - // If there is no data left in the cache, we want to continue - // and let the cache object itself get destroyed - if ( pvt ? !isEmptyDataObject(thisCache) : !jQuery.isEmptyObject(thisCache) ) { - return; + if ( fn ) { + for ( ; i < len; i++ ) { + fn( + elems[ i ], key, raw ? + value : + value.call( elems[ i ], i, fn( elems[ i ], key ) ) + ); } } } - // See jQuery.data for more information - if ( !pvt ) { - delete cache[ id ].data; - - // Don't destroy the parent cache unless the internal data object - // had been the only thing left in it - if ( !isEmptyDataObject( cache[ id ] ) ) { - return; - } + if ( chainable ) { + return elems; } - // Destroy the cache - if ( isNode ) { - jQuery.cleanData( [ elem ], true ); - - // Use delete when supported for expandos or `cache` is not a window per isWindow (#10080) - /* jshint eqeqeq: false */ - } else if ( jQuery.support.deleteExpando || cache != cache.window ) { - /* jshint eqeqeq: true */ - delete cache[ id ]; - - // When all else fails, null - } else { - cache[ id ] = null; + // Gets + if ( bulk ) { + return fn.call( elems ); } + + return len ? fn( elems[ 0 ], key ) : emptyGet; +}; + + +// Matches dashed string for camelizing +var rmsPrefix = /^-ms-/, + rdashAlpha = /-([a-z])/g; + +// Used by camelCase as callback to replace() +function fcamelCase( all, letter ) { + return letter.toUpperCase(); } -jQuery.extend({ - cache: {}, +// Convert dashed to camelCase; used by the css and data modules +// Support: IE <=9 - 11, Edge 12 - 15 +// Microsoft forgot to hump their vendor prefix (#9572) +function camelCase( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); +} +var acceptData = function( owner ) { - // The following elements throw uncatchable exceptions if you - // attempt to add expando properties to them. - noData: { - "applet": true, - "embed": true, - // Ban all objects except for Flash (which handle expandos) - "object": "clsid:D27CDB6E-AE6D-11cf-96B8-444553540000" + // Accepts only: + // - Node + // - Node.ELEMENT_NODE + // - Node.DOCUMENT_NODE + // - Object + // - Any + return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); +}; + + + + +function Data() { + this.expando = jQuery.expando + Data.uid++; +} + +Data.uid = 1; + +Data.prototype = { + + cache: function( owner ) { + + // Check if the owner object already has a cache + var value = owner[ this.expando ]; + + // If not, create one + if ( !value ) { + value = {}; + + // We can accept data for non-element nodes in modern browsers, + // but we should not, see #8335. + // Always return an empty object. + if ( acceptData( owner ) ) { + + // If it is a node unlikely to be stringify-ed or looped over + // use plain assignment + if ( owner.nodeType ) { + owner[ this.expando ] = value; + + // Otherwise secure it in a non-enumerable property + // configurable must be true to allow the property to be + // deleted when data is removed + } else { + Object.defineProperty( owner, this.expando, { + value: value, + configurable: true + } ); + } + } + } + + return value; }, + set: function( owner, data, value ) { + var prop, + cache = this.cache( owner ); + // Handle: [ owner, key, value ] args + // Always use camelCase key (gh-2257) + if ( typeof data === "string" ) { + cache[ camelCase( data ) ] = value; + + // Handle: [ owner, { properties } ] args + } else { + + // Copy the properties one-by-one to the cache object + for ( prop in data ) { + cache[ camelCase( prop ) ] = data[ prop ]; + } + } + return cache; + }, + get: function( owner, key ) { + return key === undefined ? + this.cache( owner ) : + + // Always use camelCase key (gh-2257) + owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ]; + }, + access: function( owner, key, value ) { + + // In cases where either: + // + // 1. No key was specified + // 2. A string key was specified, but no value provided + // + // Take the "read" path and allow the get method to determine + // which value to return, respectively either: + // + // 1. The entire cache object + // 2. The data stored at the key + // + if ( key === undefined || + ( ( key && typeof key === "string" ) && value === undefined ) ) { + + return this.get( owner, key ); + } + + // When the key is not a string, or both a key and value + // are specified, set or extend (existing objects) with either: + // + // 1. An object of properties + // 2. A key and value + // + this.set( owner, key, value ); + + // Since the "set" path can have two possible entry points + // return the expected data based on which path was taken[*] + return value !== undefined ? value : key; + }, + remove: function( owner, key ) { + var i, + cache = owner[ this.expando ]; + + if ( cache === undefined ) { + return; + } + + if ( key !== undefined ) { + + // Support array or space separated string of keys + if ( Array.isArray( key ) ) { + + // If key is an array of keys... + // We always set camelCase keys, so remove that. + key = key.map( camelCase ); + } else { + key = camelCase( key ); + + // If a key with the spaces exists, use it. + // Otherwise, create an array by matching non-whitespace + key = key in cache ? + [ key ] : + ( key.match( rnothtmlwhite ) || [] ); + } + + i = key.length; + + while ( i-- ) { + delete cache[ key[ i ] ]; + } + } + + // Remove the expando if there's no more data + if ( key === undefined || jQuery.isEmptyObject( cache ) ) { + + // Support: Chrome <=35 - 45 + // Webkit & Blink performance suffers when deleting properties + // from DOM nodes, so set to undefined instead + // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) + if ( owner.nodeType ) { + owner[ this.expando ] = undefined; + } else { + delete owner[ this.expando ]; + } + } + }, + hasData: function( owner ) { + var cache = owner[ this.expando ]; + return cache !== undefined && !jQuery.isEmptyObject( cache ); + } +}; +var dataPriv = new Data(); + +var dataUser = new Data(); + + + +// Implementation Summary +// +// 1. Enforce API surface and semantic compatibility with 1.9.x branch +// 2. Improve the module's maintainability by reducing the storage +// paths to a single mechanism. +// 3. Use the same single mechanism to support "private" and "user" data. +// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) +// 5. Avoid exposing implementation details on user objects (eg. expando properties) +// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 + +var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, + rmultiDash = /[A-Z]/g; + +function getData( data ) { + if ( data === "true" ) { + return true; + } + + if ( data === "false" ) { + return false; + } + + if ( data === "null" ) { + return null; + } + + // Only convert to a number if it doesn't change the string + if ( data === +data + "" ) { + return +data; + } + + if ( rbrace.test( data ) ) { + return JSON.parse( data ); + } + + return data; +} + +function dataAttr( elem, key, data ) { + var name; + + // If nothing was found internally, try to fetch any + // data from the HTML5 data-* attribute + if ( data === undefined && elem.nodeType === 1 ) { + name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); + data = elem.getAttribute( name ); + + if ( typeof data === "string" ) { + try { + data = getData( data ); + } catch ( e ) {} + + // Make sure we set the data so it isn't changed later + dataUser.set( elem, key, data ); + } else { + data = undefined; + } + } + return data; +} + +jQuery.extend( { hasData: function( elem ) { - elem = elem.nodeType ? jQuery.cache[ elem[jQuery.expando] ] : elem[ jQuery.expando ]; - return !!elem && !isEmptyDataObject( elem ); + return dataUser.hasData( elem ) || dataPriv.hasData( elem ); }, data: function( elem, name, data ) { - return internalData( elem, name, data ); + return dataUser.access( elem, name, data ); }, removeData: function( elem, name ) { - return internalRemoveData( elem, name ); + dataUser.remove( elem, name ); }, - // For internal use only. + // TODO: Now that all calls to _data and _removeData have been replaced + // with direct calls to dataPriv methods, these can be deprecated. _data: function( elem, name, data ) { - return internalData( elem, name, data, true ); + return dataPriv.access( elem, name, data ); }, _removeData: function( elem, name ) { - return internalRemoveData( elem, name, true ); - }, - - // A method for determining if a DOM node can handle the data expando - acceptData: function( elem ) { - // Do not set data on non-element because it will not be cleared (#8335). - if ( elem.nodeType && elem.nodeType !== 1 && elem.nodeType !== 9 ) { - return false; - } - - var noData = elem.nodeName && jQuery.noData[ elem.nodeName.toLowerCase() ]; - - // nodes accept data unless otherwise specified; rejection can be conditional - return !noData || noData !== true && elem.getAttribute("classid") === noData; + dataPriv.remove( elem, name ); } -}); +} ); -jQuery.fn.extend({ +jQuery.fn.extend( { data: function( key, value ) { - var attrs, name, - data = null, - i = 0, - elem = this[0]; - - // Special expections of .data basically thwart jQuery.access, - // so implement the relevant behavior ourselves + var i, name, data, + elem = this[ 0 ], + attrs = elem && elem.attributes; // Gets all values if ( key === undefined ) { if ( this.length ) { - data = jQuery.data( elem ); + data = dataUser.get( elem ); - if ( elem.nodeType === 1 && !jQuery._data( elem, "parsedAttrs" ) ) { - attrs = elem.attributes; - for ( ; i < attrs.length; i++ ) { - name = attrs[i].name; + if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { + i = attrs.length; + while ( i-- ) { - if ( name.indexOf("data-") === 0 ) { - name = jQuery.camelCase( name.slice(5) ); - - dataAttr( elem, name, data[ name ] ); + // Support: IE 11 only + // The attrs elements can be null (#14894) + if ( attrs[ i ] ) { + name = attrs[ i ].name; + if ( name.indexOf( "data-" ) === 0 ) { + name = camelCase( name.slice( 5 ) ); + dataAttr( elem, name, data[ name ] ); + } } } - jQuery._data( elem, "parsedAttrs", true ); + dataPriv.set( elem, "hasDataAttrs", true ); } } @@ -3827,89 +4276,68 @@ jQuery.fn.extend({ // Sets multiple values if ( typeof key === "object" ) { - return this.each(function() { - jQuery.data( this, key ); - }); + return this.each( function() { + dataUser.set( this, key ); + } ); } - return arguments.length > 1 ? + return access( this, function( value ) { + var data; - // Sets one value - this.each(function() { - jQuery.data( this, key, value ); - }) : + // The calling jQuery object (element matches) is not empty + // (and therefore has an element appears at this[ 0 ]) and the + // `value` parameter was not undefined. An empty jQuery object + // will result in `undefined` for elem = this[ 0 ] which will + // throw an exception if an attempt to read a data cache is made. + if ( elem && value === undefined ) { - // Gets one value - // Try to fetch any internally stored data first - elem ? dataAttr( elem, key, jQuery.data( elem, key ) ) : null; + // Attempt to get data from the cache + // The key will always be camelCased in Data + data = dataUser.get( elem, key ); + if ( data !== undefined ) { + return data; + } + + // Attempt to "discover" the data in + // HTML5 custom data-* attrs + data = dataAttr( elem, key ); + if ( data !== undefined ) { + return data; + } + + // We tried really hard, but the data doesn't exist. + return; + } + + // Set the data... + this.each( function() { + + // We always store the camelCased key + dataUser.set( this, key, value ); + } ); + }, null, value, arguments.length > 1, null, true ); }, removeData: function( key ) { - return this.each(function() { - jQuery.removeData( this, key ); - }); + return this.each( function() { + dataUser.remove( this, key ); + } ); } -}); +} ); -function dataAttr( elem, key, data ) { - // If nothing was found internally, try to fetch any - // data from the HTML5 data-* attribute - if ( data === undefined && elem.nodeType === 1 ) { - var name = "data-" + key.replace( rmultiDash, "-$1" ).toLowerCase(); - - data = elem.getAttribute( name ); - - if ( typeof data === "string" ) { - try { - data = data === "true" ? true : - data === "false" ? false : - data === "null" ? null : - // Only convert to a number if it doesn't change the string - +data + "" === data ? +data : - rbrace.test( data ) ? jQuery.parseJSON( data ) : - data; - } catch( e ) {} - - // Make sure we set the data so it isn't changed later - jQuery.data( elem, key, data ); - - } else { - data = undefined; - } - } - - return data; -} - -// checks a cache object for emptiness -function isEmptyDataObject( obj ) { - var name; - for ( name in obj ) { - - // if the public data object is empty, the private is still empty - if ( name === "data" && jQuery.isEmptyObject( obj[name] ) ) { - continue; - } - if ( name !== "toJSON" ) { - return false; - } - } - - return true; -} -jQuery.extend({ +jQuery.extend( { queue: function( elem, type, data ) { var queue; if ( elem ) { type = ( type || "fx" ) + "queue"; - queue = jQuery._data( elem, type ); + queue = dataPriv.get( elem, type ); // Speed up dequeue by getting out quickly if this is just a lookup if ( data ) { - if ( !queue || jQuery.isArray(data) ) { - queue = jQuery._data( elem, type, jQuery.makeArray(data) ); + if ( !queue || Array.isArray( data ) ) { + queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); } else { queue.push( data ); } @@ -3943,7 +4371,7 @@ jQuery.extend({ queue.unshift( "inprogress" ); } - // clear up the last queue stop function + // Clear up the last queue stop function delete hooks.stop; fn.call( elem, next, hooks ); } @@ -3953,19 +4381,18 @@ jQuery.extend({ } }, - // not intended for public consumption - generates a queueHooks object, or returns the current one + // Not public - generate a queueHooks object, or return the current one _queueHooks: function( elem, type ) { var key = type + "queueHooks"; - return jQuery._data( elem, key ) || jQuery._data( elem, key, { - empty: jQuery.Callbacks("once memory").add(function() { - jQuery._removeData( elem, type + "queue" ); - jQuery._removeData( elem, key ); - }) - }); + return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { + empty: jQuery.Callbacks( "once memory" ).add( function() { + dataPriv.remove( elem, [ type + "queue", key ] ); + } ) + } ); } -}); +} ); -jQuery.fn.extend({ +jQuery.fn.extend( { queue: function( type, data ) { var setter = 2; @@ -3976,43 +4403,31 @@ jQuery.fn.extend({ } if ( arguments.length < setter ) { - return jQuery.queue( this[0], type ); + return jQuery.queue( this[ 0 ], type ); } return data === undefined ? this : - this.each(function() { + this.each( function() { var queue = jQuery.queue( this, type, data ); - // ensure a hooks for this queue + // Ensure a hooks for this queue jQuery._queueHooks( this, type ); - if ( type === "fx" && queue[0] !== "inprogress" ) { + if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { jQuery.dequeue( this, type ); } - }); + } ); }, dequeue: function( type ) { - return this.each(function() { + return this.each( function() { jQuery.dequeue( this, type ); - }); - }, - // Based off of the plugin by Clint Helfers, with permission. - // http://blindsignals.com/index.php/2009/07/jquery-delay/ - delay: function( time, type ) { - time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; - type = type || "fx"; - - return this.queue( type, function( next, hooks ) { - var timeout = setTimeout( next, time ); - hooks.stop = function() { - clearTimeout( timeout ); - }; - }); + } ); }, clearQueue: function( type ) { return this.queue( type || "fx", [] ); }, + // Get a promise resolved when queues of a certain type // are emptied (fx is the type by default) promise: function( type, obj ) { @@ -4033,8 +4448,8 @@ jQuery.fn.extend({ } type = type || "fx"; - while( i-- ) { - tmp = jQuery._data( elements[ i ], type + "queueHooks" ); + while ( i-- ) { + tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); if ( tmp && tmp.empty ) { count++; tmp.empty.add( resolve ); @@ -4043,74 +4458,3380 @@ jQuery.fn.extend({ resolve(); return defer.promise( obj ); } -}); -var nodeHook, boolHook, - rclass = /[\t\r\n\f]/g, - rreturn = /\r/g, - rfocusable = /^(?:input|select|textarea|button|object)$/i, - rclickable = /^(?:a|area)$/i, - ruseDefault = /^(?:checked|selected)$/i, - getSetAttribute = jQuery.support.getSetAttribute, - getSetInput = jQuery.support.input; +} ); +var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; -jQuery.fn.extend({ +var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); + + +var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; + +var isHiddenWithinTree = function( elem, el ) { + + // isHiddenWithinTree might be called from jQuery#filter function; + // in that case, element will be second argument + elem = el || elem; + + // Inline style trumps all + return elem.style.display === "none" || + elem.style.display === "" && + + // Otherwise, check computed style + // Support: Firefox <=43 - 45 + // Disconnected elements can have computed display: none, so first confirm that elem is + // in the document. + jQuery.contains( elem.ownerDocument, elem ) && + + jQuery.css( elem, "display" ) === "none"; + }; + +var swap = function( elem, options, callback, args ) { + var ret, name, + old = {}; + + // Remember the old values, and insert the new ones + for ( name in options ) { + old[ name ] = elem.style[ name ]; + elem.style[ name ] = options[ name ]; + } + + ret = callback.apply( elem, args || [] ); + + // Revert the old values + for ( name in options ) { + elem.style[ name ] = old[ name ]; + } + + return ret; +}; + + + + +function adjustCSS( elem, prop, valueParts, tween ) { + var adjusted, scale, + maxIterations = 20, + currentValue = tween ? + function() { + return tween.cur(); + } : + function() { + return jQuery.css( elem, prop, "" ); + }, + initial = currentValue(), + unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), + + // Starting value computation is required for potential unit mismatches + initialInUnit = ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && + rcssNum.exec( jQuery.css( elem, prop ) ); + + if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { + + // Support: Firefox <=54 + // Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144) + initial = initial / 2; + + // Trust units reported by jQuery.css + unit = unit || initialInUnit[ 3 ]; + + // Iteratively approximate from a nonzero starting point + initialInUnit = +initial || 1; + + while ( maxIterations-- ) { + + // Evaluate and update our best guess (doubling guesses that zero out). + // Finish if the scale equals or crosses 1 (making the old*new product non-positive). + jQuery.style( elem, prop, initialInUnit + unit ); + if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) { + maxIterations = 0; + } + initialInUnit = initialInUnit / scale; + + } + + initialInUnit = initialInUnit * 2; + jQuery.style( elem, prop, initialInUnit + unit ); + + // Make sure we update the tween properties later on + valueParts = valueParts || []; + } + + if ( valueParts ) { + initialInUnit = +initialInUnit || +initial || 0; + + // Apply relative offset (+=/-=) if specified + adjusted = valueParts[ 1 ] ? + initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : + +valueParts[ 2 ]; + if ( tween ) { + tween.unit = unit; + tween.start = initialInUnit; + tween.end = adjusted; + } + } + return adjusted; +} + + +var defaultDisplayMap = {}; + +function getDefaultDisplay( elem ) { + var temp, + doc = elem.ownerDocument, + nodeName = elem.nodeName, + display = defaultDisplayMap[ nodeName ]; + + if ( display ) { + return display; + } + + temp = doc.body.appendChild( doc.createElement( nodeName ) ); + display = jQuery.css( temp, "display" ); + + temp.parentNode.removeChild( temp ); + + if ( display === "none" ) { + display = "block"; + } + defaultDisplayMap[ nodeName ] = display; + + return display; +} + +function showHide( elements, show ) { + var display, elem, + values = [], + index = 0, + length = elements.length; + + // Determine new display value for elements that need to change + for ( ; index < length; index++ ) { + elem = elements[ index ]; + if ( !elem.style ) { + continue; + } + + display = elem.style.display; + if ( show ) { + + // Since we force visibility upon cascade-hidden elements, an immediate (and slow) + // check is required in this first loop unless we have a nonempty display value (either + // inline or about-to-be-restored) + if ( display === "none" ) { + values[ index ] = dataPriv.get( elem, "display" ) || null; + if ( !values[ index ] ) { + elem.style.display = ""; + } + } + if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { + values[ index ] = getDefaultDisplay( elem ); + } + } else { + if ( display !== "none" ) { + values[ index ] = "none"; + + // Remember what we're overwriting + dataPriv.set( elem, "display", display ); + } + } + } + + // Set the display of the elements in a second loop to avoid constant reflow + for ( index = 0; index < length; index++ ) { + if ( values[ index ] != null ) { + elements[ index ].style.display = values[ index ]; + } + } + + return elements; +} + +jQuery.fn.extend( { + show: function() { + return showHide( this, true ); + }, + hide: function() { + return showHide( this ); + }, + toggle: function( state ) { + if ( typeof state === "boolean" ) { + return state ? this.show() : this.hide(); + } + + return this.each( function() { + if ( isHiddenWithinTree( this ) ) { + jQuery( this ).show(); + } else { + jQuery( this ).hide(); + } + } ); + } +} ); +var rcheckableType = ( /^(?:checkbox|radio)$/i ); + +var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]+)/i ); + +var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i ); + + + +// We have to close these tags to support XHTML (#13200) +var wrapMap = { + + // Support: IE <=9 only + option: [ 1, "" ], + + // XHTML parsers do not magically insert elements in the + // same way that tag soup parsers do. So we cannot shorten + // this by omitting or other required elements. + thead: [ 1, "", "
" ], + col: [ 2, "", "
" ], + tr: [ 2, "", "
" ], + td: [ 3, "", "
" ], + + _default: [ 0, "", "" ] +}; + +// Support: IE <=9 only +wrapMap.optgroup = wrapMap.option; + +wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; +wrapMap.th = wrapMap.td; + + +function getAll( context, tag ) { + + // Support: IE <=9 - 11 only + // Use typeof to avoid zero-argument method invocation on host objects (#15151) + var ret; + + if ( typeof context.getElementsByTagName !== "undefined" ) { + ret = context.getElementsByTagName( tag || "*" ); + + } else if ( typeof context.querySelectorAll !== "undefined" ) { + ret = context.querySelectorAll( tag || "*" ); + + } else { + ret = []; + } + + if ( tag === undefined || tag && nodeName( context, tag ) ) { + return jQuery.merge( [ context ], ret ); + } + + return ret; +} + + +// Mark scripts as having already been evaluated +function setGlobalEval( elems, refElements ) { + var i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + dataPriv.set( + elems[ i ], + "globalEval", + !refElements || dataPriv.get( refElements[ i ], "globalEval" ) + ); + } +} + + +var rhtml = /<|&#?\w+;/; + +function buildFragment( elems, context, scripts, selection, ignored ) { + var elem, tmp, tag, wrap, contains, j, + fragment = context.createDocumentFragment(), + nodes = [], + i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + elem = elems[ i ]; + + if ( elem || elem === 0 ) { + + // Add nodes directly + if ( toType( elem ) === "object" ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); + + // Convert non-html into a text node + } else if ( !rhtml.test( elem ) ) { + nodes.push( context.createTextNode( elem ) ); + + // Convert html into DOM nodes + } else { + tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); + + // Deserialize a standard representation + tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); + wrap = wrapMap[ tag ] || wrapMap._default; + tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; + + // Descend through wrappers to the right content + j = wrap[ 0 ]; + while ( j-- ) { + tmp = tmp.lastChild; + } + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, tmp.childNodes ); + + // Remember the top-level container + tmp = fragment.firstChild; + + // Ensure the created nodes are orphaned (#12392) + tmp.textContent = ""; + } + } + } + + // Remove wrapper from fragment + fragment.textContent = ""; + + i = 0; + while ( ( elem = nodes[ i++ ] ) ) { + + // Skip elements already in the context collection (trac-4087) + if ( selection && jQuery.inArray( elem, selection ) > -1 ) { + if ( ignored ) { + ignored.push( elem ); + } + continue; + } + + contains = jQuery.contains( elem.ownerDocument, elem ); + + // Append to fragment + tmp = getAll( fragment.appendChild( elem ), "script" ); + + // Preserve script evaluation history + if ( contains ) { + setGlobalEval( tmp ); + } + + // Capture executables + if ( scripts ) { + j = 0; + while ( ( elem = tmp[ j++ ] ) ) { + if ( rscriptType.test( elem.type || "" ) ) { + scripts.push( elem ); + } + } + } + } + + return fragment; +} + + +( function() { + var fragment = document.createDocumentFragment(), + div = fragment.appendChild( document.createElement( "div" ) ), + input = document.createElement( "input" ); + + // Support: Android 4.0 - 4.3 only + // Check state lost if the name is set (#11217) + // Support: Windows Web Apps (WWA) + // `name` and `type` must use .setAttribute for WWA (#14901) + input.setAttribute( "type", "radio" ); + input.setAttribute( "checked", "checked" ); + input.setAttribute( "name", "t" ); + + div.appendChild( input ); + + // Support: Android <=4.1 only + // Older WebKit doesn't clone checked state correctly in fragments + support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; + + // Support: IE <=11 only + // Make sure textarea (and checkbox) defaultValue is properly cloned + div.innerHTML = ""; + support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; +} )(); +var documentElement = document.documentElement; + + + +var + rkeyEvent = /^key/, + rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, + rtypenamespace = /^([^.]*)(?:\.(.+)|)/; + +function returnTrue() { + return true; +} + +function returnFalse() { + return false; +} + +// Support: IE <=9 only +// See #13393 for more info +function safeActiveElement() { + try { + return document.activeElement; + } catch ( err ) { } +} + +function on( elem, types, selector, data, fn, one ) { + var origFn, type; + + // Types can be a map of types/handlers + if ( typeof types === "object" ) { + + // ( types-Object, selector, data ) + if ( typeof selector !== "string" ) { + + // ( types-Object, data ) + data = data || selector; + selector = undefined; + } + for ( type in types ) { + on( elem, type, selector, data, types[ type ], one ); + } + return elem; + } + + if ( data == null && fn == null ) { + + // ( types, fn ) + fn = selector; + data = selector = undefined; + } else if ( fn == null ) { + if ( typeof selector === "string" ) { + + // ( types, selector, fn ) + fn = data; + data = undefined; + } else { + + // ( types, data, fn ) + fn = data; + data = selector; + selector = undefined; + } + } + if ( fn === false ) { + fn = returnFalse; + } else if ( !fn ) { + return elem; + } + + if ( one === 1 ) { + origFn = fn; + fn = function( event ) { + + // Can use an empty set, since event contains the info + jQuery().off( event ); + return origFn.apply( this, arguments ); + }; + + // Use same guid so caller can remove using origFn + fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); + } + return elem.each( function() { + jQuery.event.add( this, types, fn, data, selector ); + } ); +} + +/* + * Helper functions for managing events -- not part of the public interface. + * Props to Dean Edwards' addEvent library for many of the ideas. + */ +jQuery.event = { + + global: {}, + + add: function( elem, types, handler, data, selector ) { + + var handleObjIn, eventHandle, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.get( elem ); + + // Don't attach events to noData or text/comment nodes (but allow plain objects) + if ( !elemData ) { + return; + } + + // Caller can pass in an object of custom data in lieu of the handler + if ( handler.handler ) { + handleObjIn = handler; + handler = handleObjIn.handler; + selector = handleObjIn.selector; + } + + // Ensure that invalid selectors throw exceptions at attach time + // Evaluate against documentElement in case elem is a non-element node (e.g., document) + if ( selector ) { + jQuery.find.matchesSelector( documentElement, selector ); + } + + // Make sure that the handler has a unique ID, used to find/remove it later + if ( !handler.guid ) { + handler.guid = jQuery.guid++; + } + + // Init the element's event structure and main handler, if this is the first + if ( !( events = elemData.events ) ) { + events = elemData.events = {}; + } + if ( !( eventHandle = elemData.handle ) ) { + eventHandle = elemData.handle = function( e ) { + + // Discard the second event of a jQuery.event.trigger() and + // when an event is called after a page has unloaded + return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? + jQuery.event.dispatch.apply( elem, arguments ) : undefined; + }; + } + + // Handle multiple events separated by a space + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // There *must* be a type, no attaching namespace-only handlers + if ( !type ) { + continue; + } + + // If event changes its type, use the special event handlers for the changed type + special = jQuery.event.special[ type ] || {}; + + // If selector defined, determine special event api type, otherwise given type + type = ( selector ? special.delegateType : special.bindType ) || type; + + // Update special based on newly reset type + special = jQuery.event.special[ type ] || {}; + + // handleObj is passed to all event handlers + handleObj = jQuery.extend( { + type: type, + origType: origType, + data: data, + handler: handler, + guid: handler.guid, + selector: selector, + needsContext: selector && jQuery.expr.match.needsContext.test( selector ), + namespace: namespaces.join( "." ) + }, handleObjIn ); + + // Init the event handler queue if we're the first + if ( !( handlers = events[ type ] ) ) { + handlers = events[ type ] = []; + handlers.delegateCount = 0; + + // Only use addEventListener if the special events handler returns false + if ( !special.setup || + special.setup.call( elem, data, namespaces, eventHandle ) === false ) { + + if ( elem.addEventListener ) { + elem.addEventListener( type, eventHandle ); + } + } + } + + if ( special.add ) { + special.add.call( elem, handleObj ); + + if ( !handleObj.handler.guid ) { + handleObj.handler.guid = handler.guid; + } + } + + // Add to the element's handler list, delegates in front + if ( selector ) { + handlers.splice( handlers.delegateCount++, 0, handleObj ); + } else { + handlers.push( handleObj ); + } + + // Keep track of which events have ever been used, for event optimization + jQuery.event.global[ type ] = true; + } + + }, + + // Detach an event or set of events from an element + remove: function( elem, types, handler, selector, mappedTypes ) { + + var j, origCount, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); + + if ( !elemData || !( events = elemData.events ) ) { + return; + } + + // Once for each type.namespace in types; type may be omitted + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // Unbind all events (on this namespace, if provided) for the element + if ( !type ) { + for ( type in events ) { + jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); + } + continue; + } + + special = jQuery.event.special[ type ] || {}; + type = ( selector ? special.delegateType : special.bindType ) || type; + handlers = events[ type ] || []; + tmp = tmp[ 2 ] && + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); + + // Remove matching events + origCount = j = handlers.length; + while ( j-- ) { + handleObj = handlers[ j ]; + + if ( ( mappedTypes || origType === handleObj.origType ) && + ( !handler || handler.guid === handleObj.guid ) && + ( !tmp || tmp.test( handleObj.namespace ) ) && + ( !selector || selector === handleObj.selector || + selector === "**" && handleObj.selector ) ) { + handlers.splice( j, 1 ); + + if ( handleObj.selector ) { + handlers.delegateCount--; + } + if ( special.remove ) { + special.remove.call( elem, handleObj ); + } + } + } + + // Remove generic event handler if we removed something and no more handlers exist + // (avoids potential for endless recursion during removal of special event handlers) + if ( origCount && !handlers.length ) { + if ( !special.teardown || + special.teardown.call( elem, namespaces, elemData.handle ) === false ) { + + jQuery.removeEvent( elem, type, elemData.handle ); + } + + delete events[ type ]; + } + } + + // Remove data and the expando if it's no longer used + if ( jQuery.isEmptyObject( events ) ) { + dataPriv.remove( elem, "handle events" ); + } + }, + + dispatch: function( nativeEvent ) { + + // Make a writable jQuery.Event from the native event object + var event = jQuery.event.fix( nativeEvent ); + + var i, j, ret, matched, handleObj, handlerQueue, + args = new Array( arguments.length ), + handlers = ( dataPriv.get( this, "events" ) || {} )[ event.type ] || [], + special = jQuery.event.special[ event.type ] || {}; + + // Use the fix-ed jQuery.Event rather than the (read-only) native event + args[ 0 ] = event; + + for ( i = 1; i < arguments.length; i++ ) { + args[ i ] = arguments[ i ]; + } + + event.delegateTarget = this; + + // Call the preDispatch hook for the mapped type, and let it bail if desired + if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { + return; + } + + // Determine handlers + handlerQueue = jQuery.event.handlers.call( this, event, handlers ); + + // Run delegates first; they may want to stop propagation beneath us + i = 0; + while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { + event.currentTarget = matched.elem; + + j = 0; + while ( ( handleObj = matched.handlers[ j++ ] ) && + !event.isImmediatePropagationStopped() ) { + + // Triggered event must either 1) have no namespace, or 2) have namespace(s) + // a subset or equal to those in the bound event (both can have no namespace). + if ( !event.rnamespace || event.rnamespace.test( handleObj.namespace ) ) { + + event.handleObj = handleObj; + event.data = handleObj.data; + + ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || + handleObj.handler ).apply( matched.elem, args ); + + if ( ret !== undefined ) { + if ( ( event.result = ret ) === false ) { + event.preventDefault(); + event.stopPropagation(); + } + } + } + } + } + + // Call the postDispatch hook for the mapped type + if ( special.postDispatch ) { + special.postDispatch.call( this, event ); + } + + return event.result; + }, + + handlers: function( event, handlers ) { + var i, handleObj, sel, matchedHandlers, matchedSelectors, + handlerQueue = [], + delegateCount = handlers.delegateCount, + cur = event.target; + + // Find delegate handlers + if ( delegateCount && + + // Support: IE <=9 + // Black-hole SVG instance trees (trac-13180) + cur.nodeType && + + // Support: Firefox <=42 + // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) + // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click + // Support: IE 11 only + // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) + !( event.type === "click" && event.button >= 1 ) ) { + + for ( ; cur !== this; cur = cur.parentNode || this ) { + + // Don't check non-elements (#13208) + // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) + if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { + matchedHandlers = []; + matchedSelectors = {}; + for ( i = 0; i < delegateCount; i++ ) { + handleObj = handlers[ i ]; + + // Don't conflict with Object.prototype properties (#13203) + sel = handleObj.selector + " "; + + if ( matchedSelectors[ sel ] === undefined ) { + matchedSelectors[ sel ] = handleObj.needsContext ? + jQuery( sel, this ).index( cur ) > -1 : + jQuery.find( sel, this, null, [ cur ] ).length; + } + if ( matchedSelectors[ sel ] ) { + matchedHandlers.push( handleObj ); + } + } + if ( matchedHandlers.length ) { + handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); + } + } + } + } + + // Add the remaining (directly-bound) handlers + cur = this; + if ( delegateCount < handlers.length ) { + handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); + } + + return handlerQueue; + }, + + addProp: function( name, hook ) { + Object.defineProperty( jQuery.Event.prototype, name, { + enumerable: true, + configurable: true, + + get: isFunction( hook ) ? + function() { + if ( this.originalEvent ) { + return hook( this.originalEvent ); + } + } : + function() { + if ( this.originalEvent ) { + return this.originalEvent[ name ]; + } + }, + + set: function( value ) { + Object.defineProperty( this, name, { + enumerable: true, + configurable: true, + writable: true, + value: value + } ); + } + } ); + }, + + fix: function( originalEvent ) { + return originalEvent[ jQuery.expando ] ? + originalEvent : + new jQuery.Event( originalEvent ); + }, + + special: { + load: { + + // Prevent triggered image.load events from bubbling to window.load + noBubble: true + }, + focus: { + + // Fire native event if possible so blur/focus sequence is correct + trigger: function() { + if ( this !== safeActiveElement() && this.focus ) { + this.focus(); + return false; + } + }, + delegateType: "focusin" + }, + blur: { + trigger: function() { + if ( this === safeActiveElement() && this.blur ) { + this.blur(); + return false; + } + }, + delegateType: "focusout" + }, + click: { + + // For checkbox, fire native event so checked state will be right + trigger: function() { + if ( this.type === "checkbox" && this.click && nodeName( this, "input" ) ) { + this.click(); + return false; + } + }, + + // For cross-browser consistency, don't fire native .click() on links + _default: function( event ) { + return nodeName( event.target, "a" ); + } + }, + + beforeunload: { + postDispatch: function( event ) { + + // Support: Firefox 20+ + // Firefox doesn't alert if the returnValue field is not set. + if ( event.result !== undefined && event.originalEvent ) { + event.originalEvent.returnValue = event.result; + } + } + } + } +}; + +jQuery.removeEvent = function( elem, type, handle ) { + + // This "if" is needed for plain objects + if ( elem.removeEventListener ) { + elem.removeEventListener( type, handle ); + } +}; + +jQuery.Event = function( src, props ) { + + // Allow instantiation without the 'new' keyword + if ( !( this instanceof jQuery.Event ) ) { + return new jQuery.Event( src, props ); + } + + // Event object + if ( src && src.type ) { + this.originalEvent = src; + this.type = src.type; + + // Events bubbling up the document may have been marked as prevented + // by a handler lower down the tree; reflect the correct value. + this.isDefaultPrevented = src.defaultPrevented || + src.defaultPrevented === undefined && + + // Support: Android <=2.3 only + src.returnValue === false ? + returnTrue : + returnFalse; + + // Create target properties + // Support: Safari <=6 - 7 only + // Target should not be a text node (#504, #13143) + this.target = ( src.target && src.target.nodeType === 3 ) ? + src.target.parentNode : + src.target; + + this.currentTarget = src.currentTarget; + this.relatedTarget = src.relatedTarget; + + // Event type + } else { + this.type = src; + } + + // Put explicitly provided properties onto the event object + if ( props ) { + jQuery.extend( this, props ); + } + + // Create a timestamp if incoming event doesn't have one + this.timeStamp = src && src.timeStamp || Date.now(); + + // Mark it as fixed + this[ jQuery.expando ] = true; +}; + +// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding +// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html +jQuery.Event.prototype = { + constructor: jQuery.Event, + isDefaultPrevented: returnFalse, + isPropagationStopped: returnFalse, + isImmediatePropagationStopped: returnFalse, + isSimulated: false, + + preventDefault: function() { + var e = this.originalEvent; + + this.isDefaultPrevented = returnTrue; + + if ( e && !this.isSimulated ) { + e.preventDefault(); + } + }, + stopPropagation: function() { + var e = this.originalEvent; + + this.isPropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopPropagation(); + } + }, + stopImmediatePropagation: function() { + var e = this.originalEvent; + + this.isImmediatePropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopImmediatePropagation(); + } + + this.stopPropagation(); + } +}; + +// Includes all common event props including KeyEvent and MouseEvent specific props +jQuery.each( { + altKey: true, + bubbles: true, + cancelable: true, + changedTouches: true, + ctrlKey: true, + detail: true, + eventPhase: true, + metaKey: true, + pageX: true, + pageY: true, + shiftKey: true, + view: true, + "char": true, + charCode: true, + key: true, + keyCode: true, + button: true, + buttons: true, + clientX: true, + clientY: true, + offsetX: true, + offsetY: true, + pointerId: true, + pointerType: true, + screenX: true, + screenY: true, + targetTouches: true, + toElement: true, + touches: true, + + which: function( event ) { + var button = event.button; + + // Add which for key events + if ( event.which == null && rkeyEvent.test( event.type ) ) { + return event.charCode != null ? event.charCode : event.keyCode; + } + + // Add which for click: 1 === left; 2 === middle; 3 === right + if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { + if ( button & 1 ) { + return 1; + } + + if ( button & 2 ) { + return 3; + } + + if ( button & 4 ) { + return 2; + } + + return 0; + } + + return event.which; + } +}, jQuery.event.addProp ); + +// Create mouseenter/leave events using mouseover/out and event-time checks +// so that event delegation works in jQuery. +// Do the same for pointerenter/pointerleave and pointerover/pointerout +// +// Support: Safari 7 only +// Safari sends mouseenter too often; see: +// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 +// for the description of the bug (it existed in older Chrome versions as well). +jQuery.each( { + mouseenter: "mouseover", + mouseleave: "mouseout", + pointerenter: "pointerover", + pointerleave: "pointerout" +}, function( orig, fix ) { + jQuery.event.special[ orig ] = { + delegateType: fix, + bindType: fix, + + handle: function( event ) { + var ret, + target = this, + related = event.relatedTarget, + handleObj = event.handleObj; + + // For mouseenter/leave call the handler if related is outside the target. + // NB: No relatedTarget if the mouse left/entered the browser window + if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { + event.type = handleObj.origType; + ret = handleObj.handler.apply( this, arguments ); + event.type = fix; + } + return ret; + } + }; +} ); + +jQuery.fn.extend( { + + on: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn ); + }, + one: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn, 1 ); + }, + off: function( types, selector, fn ) { + var handleObj, type; + if ( types && types.preventDefault && types.handleObj ) { + + // ( event ) dispatched jQuery.Event + handleObj = types.handleObj; + jQuery( types.delegateTarget ).off( + handleObj.namespace ? + handleObj.origType + "." + handleObj.namespace : + handleObj.origType, + handleObj.selector, + handleObj.handler + ); + return this; + } + if ( typeof types === "object" ) { + + // ( types-object [, selector] ) + for ( type in types ) { + this.off( type, selector, types[ type ] ); + } + return this; + } + if ( selector === false || typeof selector === "function" ) { + + // ( types [, fn] ) + fn = selector; + selector = undefined; + } + if ( fn === false ) { + fn = returnFalse; + } + return this.each( function() { + jQuery.event.remove( this, types, fn, selector ); + } ); + } +} ); + + +var + + /* eslint-disable max-len */ + + // See https://github.com/eslint/eslint/issues/3229 + rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi, + + /* eslint-enable */ + + // Support: IE <=10 - 11, Edge 12 - 13 only + // In IE/Edge using regex groups here causes severe slowdowns. + // See https://connect.microsoft.com/IE/feedback/details/1736512/ + rnoInnerhtml = /\s*$/g; + +// Prefer a tbody over its parent table for containing new rows +function manipulationTarget( elem, content ) { + if ( nodeName( elem, "table" ) && + nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { + + return jQuery( elem ).children( "tbody" )[ 0 ] || elem; + } + + return elem; +} + +// Replace/restore the type attribute of script elements for safe DOM manipulation +function disableScript( elem ) { + elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; + return elem; +} +function restoreScript( elem ) { + if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) { + elem.type = elem.type.slice( 5 ); + } else { + elem.removeAttribute( "type" ); + } + + return elem; +} + +function cloneCopyEvent( src, dest ) { + var i, l, type, pdataOld, pdataCur, udataOld, udataCur, events; + + if ( dest.nodeType !== 1 ) { + return; + } + + // 1. Copy private data: events, handlers, etc. + if ( dataPriv.hasData( src ) ) { + pdataOld = dataPriv.access( src ); + pdataCur = dataPriv.set( dest, pdataOld ); + events = pdataOld.events; + + if ( events ) { + delete pdataCur.handle; + pdataCur.events = {}; + + for ( type in events ) { + for ( i = 0, l = events[ type ].length; i < l; i++ ) { + jQuery.event.add( dest, type, events[ type ][ i ] ); + } + } + } + } + + // 2. Copy user data + if ( dataUser.hasData( src ) ) { + udataOld = dataUser.access( src ); + udataCur = jQuery.extend( {}, udataOld ); + + dataUser.set( dest, udataCur ); + } +} + +// Fix IE bugs, see support tests +function fixInput( src, dest ) { + var nodeName = dest.nodeName.toLowerCase(); + + // Fails to persist the checked state of a cloned checkbox or radio button. + if ( nodeName === "input" && rcheckableType.test( src.type ) ) { + dest.checked = src.checked; + + // Fails to return the selected option to the default selected state when cloning options + } else if ( nodeName === "input" || nodeName === "textarea" ) { + dest.defaultValue = src.defaultValue; + } +} + +function domManip( collection, args, callback, ignored ) { + + // Flatten any nested arrays + args = concat.apply( [], args ); + + var fragment, first, scripts, hasScripts, node, doc, + i = 0, + l = collection.length, + iNoClone = l - 1, + value = args[ 0 ], + valueIsFunction = isFunction( value ); + + // We can't cloneNode fragments that contain checked, in WebKit + if ( valueIsFunction || + ( l > 1 && typeof value === "string" && + !support.checkClone && rchecked.test( value ) ) ) { + return collection.each( function( index ) { + var self = collection.eq( index ); + if ( valueIsFunction ) { + args[ 0 ] = value.call( this, index, self.html() ); + } + domManip( self, args, callback, ignored ); + } ); + } + + if ( l ) { + fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); + first = fragment.firstChild; + + if ( fragment.childNodes.length === 1 ) { + fragment = first; + } + + // Require either new content or an interest in ignored elements to invoke the callback + if ( first || ignored ) { + scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); + hasScripts = scripts.length; + + // Use the original fragment for the last item + // instead of the first because it can end up + // being emptied incorrectly in certain situations (#8070). + for ( ; i < l; i++ ) { + node = fragment; + + if ( i !== iNoClone ) { + node = jQuery.clone( node, true, true ); + + // Keep references to cloned scripts for later restoration + if ( hasScripts ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( scripts, getAll( node, "script" ) ); + } + } + + callback.call( collection[ i ], node, i ); + } + + if ( hasScripts ) { + doc = scripts[ scripts.length - 1 ].ownerDocument; + + // Reenable scripts + jQuery.map( scripts, restoreScript ); + + // Evaluate executable scripts on first document insertion + for ( i = 0; i < hasScripts; i++ ) { + node = scripts[ i ]; + if ( rscriptType.test( node.type || "" ) && + !dataPriv.access( node, "globalEval" ) && + jQuery.contains( doc, node ) ) { + + if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) { + + // Optional AJAX dependency, but won't run scripts if not present + if ( jQuery._evalUrl ) { + jQuery._evalUrl( node.src ); + } + } else { + DOMEval( node.textContent.replace( rcleanScript, "" ), doc, node ); + } + } + } + } + } + } + + return collection; +} + +function remove( elem, selector, keepData ) { + var node, + nodes = selector ? jQuery.filter( selector, elem ) : elem, + i = 0; + + for ( ; ( node = nodes[ i ] ) != null; i++ ) { + if ( !keepData && node.nodeType === 1 ) { + jQuery.cleanData( getAll( node ) ); + } + + if ( node.parentNode ) { + if ( keepData && jQuery.contains( node.ownerDocument, node ) ) { + setGlobalEval( getAll( node, "script" ) ); + } + node.parentNode.removeChild( node ); + } + } + + return elem; +} + +jQuery.extend( { + htmlPrefilter: function( html ) { + return html.replace( rxhtmlTag, "<$1>" ); + }, + + clone: function( elem, dataAndEvents, deepDataAndEvents ) { + var i, l, srcElements, destElements, + clone = elem.cloneNode( true ), + inPage = jQuery.contains( elem.ownerDocument, elem ); + + // Fix IE cloning issues + if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && + !jQuery.isXMLDoc( elem ) ) { + + // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 + destElements = getAll( clone ); + srcElements = getAll( elem ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + fixInput( srcElements[ i ], destElements[ i ] ); + } + } + + // Copy the events from the original to the clone + if ( dataAndEvents ) { + if ( deepDataAndEvents ) { + srcElements = srcElements || getAll( elem ); + destElements = destElements || getAll( clone ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + cloneCopyEvent( srcElements[ i ], destElements[ i ] ); + } + } else { + cloneCopyEvent( elem, clone ); + } + } + + // Preserve script evaluation history + destElements = getAll( clone, "script" ); + if ( destElements.length > 0 ) { + setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); + } + + // Return the cloned set + return clone; + }, + + cleanData: function( elems ) { + var data, elem, type, + special = jQuery.event.special, + i = 0; + + for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { + if ( acceptData( elem ) ) { + if ( ( data = elem[ dataPriv.expando ] ) ) { + if ( data.events ) { + for ( type in data.events ) { + if ( special[ type ] ) { + jQuery.event.remove( elem, type ); + + // This is a shortcut to avoid jQuery.event.remove's overhead + } else { + jQuery.removeEvent( elem, type, data.handle ); + } + } + } + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataPriv.expando ] = undefined; + } + if ( elem[ dataUser.expando ] ) { + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataUser.expando ] = undefined; + } + } + } + } +} ); + +jQuery.fn.extend( { + detach: function( selector ) { + return remove( this, selector, true ); + }, + + remove: function( selector ) { + return remove( this, selector ); + }, + + text: function( value ) { + return access( this, function( value ) { + return value === undefined ? + jQuery.text( this ) : + this.empty().each( function() { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + this.textContent = value; + } + } ); + }, null, value, arguments.length ); + }, + + append: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.appendChild( elem ); + } + } ); + }, + + prepend: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.insertBefore( elem, target.firstChild ); + } + } ); + }, + + before: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this ); + } + } ); + }, + + after: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this.nextSibling ); + } + } ); + }, + + empty: function() { + var elem, + i = 0; + + for ( ; ( elem = this[ i ] ) != null; i++ ) { + if ( elem.nodeType === 1 ) { + + // Prevent memory leaks + jQuery.cleanData( getAll( elem, false ) ); + + // Remove any remaining nodes + elem.textContent = ""; + } + } + + return this; + }, + + clone: function( dataAndEvents, deepDataAndEvents ) { + dataAndEvents = dataAndEvents == null ? false : dataAndEvents; + deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; + + return this.map( function() { + return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); + } ); + }, + + html: function( value ) { + return access( this, function( value ) { + var elem = this[ 0 ] || {}, + i = 0, + l = this.length; + + if ( value === undefined && elem.nodeType === 1 ) { + return elem.innerHTML; + } + + // See if we can take a shortcut and just use innerHTML + if ( typeof value === "string" && !rnoInnerhtml.test( value ) && + !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { + + value = jQuery.htmlPrefilter( value ); + + try { + for ( ; i < l; i++ ) { + elem = this[ i ] || {}; + + // Remove element nodes and prevent memory leaks + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + elem.innerHTML = value; + } + } + + elem = 0; + + // If using innerHTML throws an exception, use the fallback method + } catch ( e ) {} + } + + if ( elem ) { + this.empty().append( value ); + } + }, null, value, arguments.length ); + }, + + replaceWith: function() { + var ignored = []; + + // Make the changes, replacing each non-ignored context element with the new content + return domManip( this, arguments, function( elem ) { + var parent = this.parentNode; + + if ( jQuery.inArray( this, ignored ) < 0 ) { + jQuery.cleanData( getAll( this ) ); + if ( parent ) { + parent.replaceChild( elem, this ); + } + } + + // Force callback invocation + }, ignored ); + } +} ); + +jQuery.each( { + appendTo: "append", + prependTo: "prepend", + insertBefore: "before", + insertAfter: "after", + replaceAll: "replaceWith" +}, function( name, original ) { + jQuery.fn[ name ] = function( selector ) { + var elems, + ret = [], + insert = jQuery( selector ), + last = insert.length - 1, + i = 0; + + for ( ; i <= last; i++ ) { + elems = i === last ? this : this.clone( true ); + jQuery( insert[ i ] )[ original ]( elems ); + + // Support: Android <=4.0 only, PhantomJS 1 only + // .get() because push.apply(_, arraylike) throws on ancient WebKit + push.apply( ret, elems.get() ); + } + + return this.pushStack( ret ); + }; +} ); +var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); + +var getStyles = function( elem ) { + + // Support: IE <=11 only, Firefox <=30 (#15098, #14150) + // IE throws on elements created in popups + // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" + var view = elem.ownerDocument.defaultView; + + if ( !view || !view.opener ) { + view = window; + } + + return view.getComputedStyle( elem ); + }; + +var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); + + + +( function() { + + // Executing both pixelPosition & boxSizingReliable tests require only one layout + // so they're executed at the same time to save the second computation. + function computeStyleTests() { + + // This is a singleton, we need to execute it only once + if ( !div ) { + return; + } + + container.style.cssText = "position:absolute;left:-11111px;width:60px;" + + "margin-top:1px;padding:0;border:0"; + div.style.cssText = + "position:relative;display:block;box-sizing:border-box;overflow:scroll;" + + "margin:auto;border:1px;padding:1px;" + + "width:60%;top:1%"; + documentElement.appendChild( container ).appendChild( div ); + + var divStyle = window.getComputedStyle( div ); + pixelPositionVal = divStyle.top !== "1%"; + + // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 + reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12; + + // Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3 + // Some styles come back with percentage values, even though they shouldn't + div.style.right = "60%"; + pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36; + + // Support: IE 9 - 11 only + // Detect misreporting of content dimensions for box-sizing:border-box elements + boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36; + + // Support: IE 9 only + // Detect overflow:scroll screwiness (gh-3699) + div.style.position = "absolute"; + scrollboxSizeVal = div.offsetWidth === 36 || "absolute"; + + documentElement.removeChild( container ); + + // Nullify the div so it wouldn't be stored in the memory and + // it will also be a sign that checks already performed + div = null; + } + + function roundPixelMeasures( measure ) { + return Math.round( parseFloat( measure ) ); + } + + var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal, + reliableMarginLeftVal, + container = document.createElement( "div" ), + div = document.createElement( "div" ); + + // Finish early in limited (non-browser) environments + if ( !div.style ) { + return; + } + + // Support: IE <=9 - 11 only + // Style of cloned element affects source element cloned (#8908) + div.style.backgroundClip = "content-box"; + div.cloneNode( true ).style.backgroundClip = ""; + support.clearCloneStyle = div.style.backgroundClip === "content-box"; + + jQuery.extend( support, { + boxSizingReliable: function() { + computeStyleTests(); + return boxSizingReliableVal; + }, + pixelBoxStyles: function() { + computeStyleTests(); + return pixelBoxStylesVal; + }, + pixelPosition: function() { + computeStyleTests(); + return pixelPositionVal; + }, + reliableMarginLeft: function() { + computeStyleTests(); + return reliableMarginLeftVal; + }, + scrollboxSize: function() { + computeStyleTests(); + return scrollboxSizeVal; + } + } ); +} )(); + + +function curCSS( elem, name, computed ) { + var width, minWidth, maxWidth, ret, + + // Support: Firefox 51+ + // Retrieving style before computed somehow + // fixes an issue with getting wrong values + // on detached elements + style = elem.style; + + computed = computed || getStyles( elem ); + + // getPropertyValue is needed for: + // .css('filter') (IE 9 only, #12537) + // .css('--customProperty) (#3144) + if ( computed ) { + ret = computed.getPropertyValue( name ) || computed[ name ]; + + if ( ret === "" && !jQuery.contains( elem.ownerDocument, elem ) ) { + ret = jQuery.style( elem, name ); + } + + // A tribute to the "awesome hack by Dean Edwards" + // Android Browser returns percentage for some values, + // but width seems to be reliably pixels. + // This is against the CSSOM draft spec: + // https://drafts.csswg.org/cssom/#resolved-values + if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) { + + // Remember the original values + width = style.width; + minWidth = style.minWidth; + maxWidth = style.maxWidth; + + // Put in the new values to get a computed value out + style.minWidth = style.maxWidth = style.width = ret; + ret = computed.width; + + // Revert the changed values + style.width = width; + style.minWidth = minWidth; + style.maxWidth = maxWidth; + } + } + + return ret !== undefined ? + + // Support: IE <=9 - 11 only + // IE returns zIndex value as an integer. + ret + "" : + ret; +} + + +function addGetHookIf( conditionFn, hookFn ) { + + // Define the hook, we'll check on the first run if it's really needed. + return { + get: function() { + if ( conditionFn() ) { + + // Hook not needed (or it's not possible to use it due + // to missing dependency), remove it. + delete this.get; + return; + } + + // Hook needed; redefine it so that the support test is not executed again. + return ( this.get = hookFn ).apply( this, arguments ); + } + }; +} + + +var + + // Swappable if display is none or starts with table + // except "table", "table-cell", or "table-caption" + // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display + rdisplayswap = /^(none|table(?!-c[ea]).+)/, + rcustomProp = /^--/, + cssShow = { position: "absolute", visibility: "hidden", display: "block" }, + cssNormalTransform = { + letterSpacing: "0", + fontWeight: "400" + }, + + cssPrefixes = [ "Webkit", "Moz", "ms" ], + emptyStyle = document.createElement( "div" ).style; + +// Return a css property mapped to a potentially vendor prefixed property +function vendorPropName( name ) { + + // Shortcut for names that are not vendor prefixed + if ( name in emptyStyle ) { + return name; + } + + // Check for vendor prefixed names + var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), + i = cssPrefixes.length; + + while ( i-- ) { + name = cssPrefixes[ i ] + capName; + if ( name in emptyStyle ) { + return name; + } + } +} + +// Return a property mapped along what jQuery.cssProps suggests or to +// a vendor prefixed property. +function finalPropName( name ) { + var ret = jQuery.cssProps[ name ]; + if ( !ret ) { + ret = jQuery.cssProps[ name ] = vendorPropName( name ) || name; + } + return ret; +} + +function setPositiveNumber( elem, value, subtract ) { + + // Any relative (+/-) values have already been + // normalized at this point + var matches = rcssNum.exec( value ); + return matches ? + + // Guard against undefined "subtract", e.g., when used as in cssHooks + Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : + value; +} + +function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) { + var i = dimension === "width" ? 1 : 0, + extra = 0, + delta = 0; + + // Adjustment may not be necessary + if ( box === ( isBorderBox ? "border" : "content" ) ) { + return 0; + } + + for ( ; i < 4; i += 2 ) { + + // Both box models exclude margin + if ( box === "margin" ) { + delta += jQuery.css( elem, box + cssExpand[ i ], true, styles ); + } + + // If we get here with a content-box, we're seeking "padding" or "border" or "margin" + if ( !isBorderBox ) { + + // Add padding + delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + + // For "border" or "margin", add border + if ( box !== "padding" ) { + delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + + // But still keep track of it otherwise + } else { + extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + + // If we get here with a border-box (content + padding + border), we're seeking "content" or + // "padding" or "margin" + } else { + + // For "content", subtract padding + if ( box === "content" ) { + delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + } + + // For "content" or "padding", subtract border + if ( box !== "margin" ) { + delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } + } + + // Account for positive content-box scroll gutter when requested by providing computedVal + if ( !isBorderBox && computedVal >= 0 ) { + + // offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border + // Assuming integer scroll gutter, subtract the rest and round down + delta += Math.max( 0, Math.ceil( + elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - + computedVal - + delta - + extra - + 0.5 + ) ); + } + + return delta; +} + +function getWidthOrHeight( elem, dimension, extra ) { + + // Start with computed style + var styles = getStyles( elem ), + val = curCSS( elem, dimension, styles ), + isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + valueIsBorderBox = isBorderBox; + + // Support: Firefox <=54 + // Return a confounding non-pixel value or feign ignorance, as appropriate. + if ( rnumnonpx.test( val ) ) { + if ( !extra ) { + return val; + } + val = "auto"; + } + + // Check for style in case a browser which returns unreliable values + // for getComputedStyle silently falls back to the reliable elem.style + valueIsBorderBox = valueIsBorderBox && + ( support.boxSizingReliable() || val === elem.style[ dimension ] ); + + // Fall back to offsetWidth/offsetHeight when value is "auto" + // This happens for inline elements with no explicit setting (gh-3571) + // Support: Android <=4.1 - 4.3 only + // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) + if ( val === "auto" || + !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) { + + val = elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ]; + + // offsetWidth/offsetHeight provide border-box values + valueIsBorderBox = true; + } + + // Normalize "" and auto + val = parseFloat( val ) || 0; + + // Adjust for the element's box model + return ( val + + boxModelAdjustment( + elem, + dimension, + extra || ( isBorderBox ? "border" : "content" ), + valueIsBorderBox, + styles, + + // Provide the current computed size to request scroll gutter calculation (gh-3589) + val + ) + ) + "px"; +} + +jQuery.extend( { + + // Add in style property hooks for overriding the default + // behavior of getting and setting a style property + cssHooks: { + opacity: { + get: function( elem, computed ) { + if ( computed ) { + + // We should always get a number back from opacity + var ret = curCSS( elem, "opacity" ); + return ret === "" ? "1" : ret; + } + } + } + }, + + // Don't automatically add "px" to these possibly-unitless properties + cssNumber: { + "animationIterationCount": true, + "columnCount": true, + "fillOpacity": true, + "flexGrow": true, + "flexShrink": true, + "fontWeight": true, + "lineHeight": true, + "opacity": true, + "order": true, + "orphans": true, + "widows": true, + "zIndex": true, + "zoom": true + }, + + // Add in properties whose names you wish to fix before + // setting or getting the value + cssProps: {}, + + // Get and set the style property on a DOM Node + style: function( elem, name, value, extra ) { + + // Don't set styles on text and comment nodes + if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { + return; + } + + // Make sure that we're working with the right name + var ret, type, hooks, + origName = camelCase( name ), + isCustomProp = rcustomProp.test( name ), + style = elem.style; + + // Make sure that we're working with the right name. We don't + // want to query the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Gets hook for the prefixed version, then unprefixed version + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // Check if we're setting a value + if ( value !== undefined ) { + type = typeof value; + + // Convert "+=" or "-=" to relative numbers (#7345) + if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { + value = adjustCSS( elem, name, ret ); + + // Fixes bug #9237 + type = "number"; + } + + // Make sure that null and NaN values aren't set (#7116) + if ( value == null || value !== value ) { + return; + } + + // If a number was passed in, add the unit (except for certain CSS properties) + if ( type === "number" ) { + value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); + } + + // background-* props affect original clone's values + if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { + style[ name ] = "inherit"; + } + + // If a hook was provided, use that value, otherwise just set the specified value + if ( !hooks || !( "set" in hooks ) || + ( value = hooks.set( elem, value, extra ) ) !== undefined ) { + + if ( isCustomProp ) { + style.setProperty( name, value ); + } else { + style[ name ] = value; + } + } + + } else { + + // If a hook was provided get the non-computed value from there + if ( hooks && "get" in hooks && + ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { + + return ret; + } + + // Otherwise just get the value from the style object + return style[ name ]; + } + }, + + css: function( elem, name, extra, styles ) { + var val, num, hooks, + origName = camelCase( name ), + isCustomProp = rcustomProp.test( name ); + + // Make sure that we're working with the right name. We don't + // want to modify the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Try prefixed name followed by the unprefixed name + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // If a hook was provided get the computed value from there + if ( hooks && "get" in hooks ) { + val = hooks.get( elem, true, extra ); + } + + // Otherwise, if a way to get the computed value exists, use that + if ( val === undefined ) { + val = curCSS( elem, name, styles ); + } + + // Convert "normal" to computed value + if ( val === "normal" && name in cssNormalTransform ) { + val = cssNormalTransform[ name ]; + } + + // Make numeric if forced or a qualifier was provided and val looks numeric + if ( extra === "" || extra ) { + num = parseFloat( val ); + return extra === true || isFinite( num ) ? num || 0 : val; + } + + return val; + } +} ); + +jQuery.each( [ "height", "width" ], function( i, dimension ) { + jQuery.cssHooks[ dimension ] = { + get: function( elem, computed, extra ) { + if ( computed ) { + + // Certain elements can have dimension info if we invisibly show them + // but it must have a current display style that would benefit + return rdisplayswap.test( jQuery.css( elem, "display" ) ) && + + // Support: Safari 8+ + // Table columns in Safari have non-zero offsetWidth & zero + // getBoundingClientRect().width unless display is changed. + // Support: IE <=11 only + // Running getBoundingClientRect on a disconnected node + // in IE throws an error. + ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? + swap( elem, cssShow, function() { + return getWidthOrHeight( elem, dimension, extra ); + } ) : + getWidthOrHeight( elem, dimension, extra ); + } + }, + + set: function( elem, value, extra ) { + var matches, + styles = getStyles( elem ), + isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + subtract = extra && boxModelAdjustment( + elem, + dimension, + extra, + isBorderBox, + styles + ); + + // Account for unreliable border-box dimensions by comparing offset* to computed and + // faking a content-box to get border and padding (gh-3699) + if ( isBorderBox && support.scrollboxSize() === styles.position ) { + subtract -= Math.ceil( + elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - + parseFloat( styles[ dimension ] ) - + boxModelAdjustment( elem, dimension, "border", false, styles ) - + 0.5 + ); + } + + // Convert to pixels if value adjustment is needed + if ( subtract && ( matches = rcssNum.exec( value ) ) && + ( matches[ 3 ] || "px" ) !== "px" ) { + + elem.style[ dimension ] = value; + value = jQuery.css( elem, dimension ); + } + + return setPositiveNumber( elem, value, subtract ); + } + }; +} ); + +jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, + function( elem, computed ) { + if ( computed ) { + return ( parseFloat( curCSS( elem, "marginLeft" ) ) || + elem.getBoundingClientRect().left - + swap( elem, { marginLeft: 0 }, function() { + return elem.getBoundingClientRect().left; + } ) + ) + "px"; + } + } +); + +// These hooks are used by animate to expand properties +jQuery.each( { + margin: "", + padding: "", + border: "Width" +}, function( prefix, suffix ) { + jQuery.cssHooks[ prefix + suffix ] = { + expand: function( value ) { + var i = 0, + expanded = {}, + + // Assumes a single number if not a string + parts = typeof value === "string" ? value.split( " " ) : [ value ]; + + for ( ; i < 4; i++ ) { + expanded[ prefix + cssExpand[ i ] + suffix ] = + parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; + } + + return expanded; + } + }; + + if ( prefix !== "margin" ) { + jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; + } +} ); + +jQuery.fn.extend( { + css: function( name, value ) { + return access( this, function( elem, name, value ) { + var styles, len, + map = {}, + i = 0; + + if ( Array.isArray( name ) ) { + styles = getStyles( elem ); + len = name.length; + + for ( ; i < len; i++ ) { + map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); + } + + return map; + } + + return value !== undefined ? + jQuery.style( elem, name, value ) : + jQuery.css( elem, name ); + }, name, value, arguments.length > 1 ); + } +} ); + + +function Tween( elem, options, prop, end, easing ) { + return new Tween.prototype.init( elem, options, prop, end, easing ); +} +jQuery.Tween = Tween; + +Tween.prototype = { + constructor: Tween, + init: function( elem, options, prop, end, easing, unit ) { + this.elem = elem; + this.prop = prop; + this.easing = easing || jQuery.easing._default; + this.options = options; + this.start = this.now = this.cur(); + this.end = end; + this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); + }, + cur: function() { + var hooks = Tween.propHooks[ this.prop ]; + + return hooks && hooks.get ? + hooks.get( this ) : + Tween.propHooks._default.get( this ); + }, + run: function( percent ) { + var eased, + hooks = Tween.propHooks[ this.prop ]; + + if ( this.options.duration ) { + this.pos = eased = jQuery.easing[ this.easing ]( + percent, this.options.duration * percent, 0, 1, this.options.duration + ); + } else { + this.pos = eased = percent; + } + this.now = ( this.end - this.start ) * eased + this.start; + + if ( this.options.step ) { + this.options.step.call( this.elem, this.now, this ); + } + + if ( hooks && hooks.set ) { + hooks.set( this ); + } else { + Tween.propHooks._default.set( this ); + } + return this; + } +}; + +Tween.prototype.init.prototype = Tween.prototype; + +Tween.propHooks = { + _default: { + get: function( tween ) { + var result; + + // Use a property on the element directly when it is not a DOM element, + // or when there is no matching style property that exists. + if ( tween.elem.nodeType !== 1 || + tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { + return tween.elem[ tween.prop ]; + } + + // Passing an empty string as a 3rd parameter to .css will automatically + // attempt a parseFloat and fallback to a string if the parse fails. + // Simple values such as "10px" are parsed to Float; + // complex values such as "rotate(1rad)" are returned as-is. + result = jQuery.css( tween.elem, tween.prop, "" ); + + // Empty strings, null, undefined and "auto" are converted to 0. + return !result || result === "auto" ? 0 : result; + }, + set: function( tween ) { + + // Use step hook for back compat. + // Use cssHook if its there. + // Use .style if available and use plain properties where available. + if ( jQuery.fx.step[ tween.prop ] ) { + jQuery.fx.step[ tween.prop ]( tween ); + } else if ( tween.elem.nodeType === 1 && + ( tween.elem.style[ jQuery.cssProps[ tween.prop ] ] != null || + jQuery.cssHooks[ tween.prop ] ) ) { + jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); + } else { + tween.elem[ tween.prop ] = tween.now; + } + } + } +}; + +// Support: IE <=9 only +// Panic based approach to setting things on disconnected nodes +Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { + set: function( tween ) { + if ( tween.elem.nodeType && tween.elem.parentNode ) { + tween.elem[ tween.prop ] = tween.now; + } + } +}; + +jQuery.easing = { + linear: function( p ) { + return p; + }, + swing: function( p ) { + return 0.5 - Math.cos( p * Math.PI ) / 2; + }, + _default: "swing" +}; + +jQuery.fx = Tween.prototype.init; + +// Back compat <1.8 extension point +jQuery.fx.step = {}; + + + + +var + fxNow, inProgress, + rfxtypes = /^(?:toggle|show|hide)$/, + rrun = /queueHooks$/; + +function schedule() { + if ( inProgress ) { + if ( document.hidden === false && window.requestAnimationFrame ) { + window.requestAnimationFrame( schedule ); + } else { + window.setTimeout( schedule, jQuery.fx.interval ); + } + + jQuery.fx.tick(); + } +} + +// Animations created synchronously will run synchronously +function createFxNow() { + window.setTimeout( function() { + fxNow = undefined; + } ); + return ( fxNow = Date.now() ); +} + +// Generate parameters to create a standard animation +function genFx( type, includeWidth ) { + var which, + i = 0, + attrs = { height: type }; + + // If we include width, step value is 1 to do all cssExpand values, + // otherwise step value is 2 to skip over Left and Right + includeWidth = includeWidth ? 1 : 0; + for ( ; i < 4; i += 2 - includeWidth ) { + which = cssExpand[ i ]; + attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; + } + + if ( includeWidth ) { + attrs.opacity = attrs.width = type; + } + + return attrs; +} + +function createTween( value, prop, animation ) { + var tween, + collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), + index = 0, + length = collection.length; + for ( ; index < length; index++ ) { + if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { + + // We're done with this property + return tween; + } + } +} + +function defaultPrefilter( elem, props, opts ) { + var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, + isBox = "width" in props || "height" in props, + anim = this, + orig = {}, + style = elem.style, + hidden = elem.nodeType && isHiddenWithinTree( elem ), + dataShow = dataPriv.get( elem, "fxshow" ); + + // Queue-skipping animations hijack the fx hooks + if ( !opts.queue ) { + hooks = jQuery._queueHooks( elem, "fx" ); + if ( hooks.unqueued == null ) { + hooks.unqueued = 0; + oldfire = hooks.empty.fire; + hooks.empty.fire = function() { + if ( !hooks.unqueued ) { + oldfire(); + } + }; + } + hooks.unqueued++; + + anim.always( function() { + + // Ensure the complete handler is called before this completes + anim.always( function() { + hooks.unqueued--; + if ( !jQuery.queue( elem, "fx" ).length ) { + hooks.empty.fire(); + } + } ); + } ); + } + + // Detect show/hide animations + for ( prop in props ) { + value = props[ prop ]; + if ( rfxtypes.test( value ) ) { + delete props[ prop ]; + toggle = toggle || value === "toggle"; + if ( value === ( hidden ? "hide" : "show" ) ) { + + // Pretend to be hidden if this is a "show" and + // there is still data from a stopped show/hide + if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { + hidden = true; + + // Ignore all other no-op show/hide data + } else { + continue; + } + } + orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); + } + } + + // Bail out if this is a no-op like .hide().hide() + propTween = !jQuery.isEmptyObject( props ); + if ( !propTween && jQuery.isEmptyObject( orig ) ) { + return; + } + + // Restrict "overflow" and "display" styles during box animations + if ( isBox && elem.nodeType === 1 ) { + + // Support: IE <=9 - 11, Edge 12 - 15 + // Record all 3 overflow attributes because IE does not infer the shorthand + // from identically-valued overflowX and overflowY and Edge just mirrors + // the overflowX value there. + opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; + + // Identify a display type, preferring old show/hide data over the CSS cascade + restoreDisplay = dataShow && dataShow.display; + if ( restoreDisplay == null ) { + restoreDisplay = dataPriv.get( elem, "display" ); + } + display = jQuery.css( elem, "display" ); + if ( display === "none" ) { + if ( restoreDisplay ) { + display = restoreDisplay; + } else { + + // Get nonempty value(s) by temporarily forcing visibility + showHide( [ elem ], true ); + restoreDisplay = elem.style.display || restoreDisplay; + display = jQuery.css( elem, "display" ); + showHide( [ elem ] ); + } + } + + // Animate inline elements as inline-block + if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { + if ( jQuery.css( elem, "float" ) === "none" ) { + + // Restore the original display value at the end of pure show/hide animations + if ( !propTween ) { + anim.done( function() { + style.display = restoreDisplay; + } ); + if ( restoreDisplay == null ) { + display = style.display; + restoreDisplay = display === "none" ? "" : display; + } + } + style.display = "inline-block"; + } + } + } + + if ( opts.overflow ) { + style.overflow = "hidden"; + anim.always( function() { + style.overflow = opts.overflow[ 0 ]; + style.overflowX = opts.overflow[ 1 ]; + style.overflowY = opts.overflow[ 2 ]; + } ); + } + + // Implement show/hide animations + propTween = false; + for ( prop in orig ) { + + // General show/hide setup for this element animation + if ( !propTween ) { + if ( dataShow ) { + if ( "hidden" in dataShow ) { + hidden = dataShow.hidden; + } + } else { + dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); + } + + // Store hidden/visible for toggle so `.stop().toggle()` "reverses" + if ( toggle ) { + dataShow.hidden = !hidden; + } + + // Show elements before animating them + if ( hidden ) { + showHide( [ elem ], true ); + } + + /* eslint-disable no-loop-func */ + + anim.done( function() { + + /* eslint-enable no-loop-func */ + + // The final step of a "hide" animation is actually hiding the element + if ( !hidden ) { + showHide( [ elem ] ); + } + dataPriv.remove( elem, "fxshow" ); + for ( prop in orig ) { + jQuery.style( elem, prop, orig[ prop ] ); + } + } ); + } + + // Per-property setup + propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); + if ( !( prop in dataShow ) ) { + dataShow[ prop ] = propTween.start; + if ( hidden ) { + propTween.end = propTween.start; + propTween.start = 0; + } + } + } +} + +function propFilter( props, specialEasing ) { + var index, name, easing, value, hooks; + + // camelCase, specialEasing and expand cssHook pass + for ( index in props ) { + name = camelCase( index ); + easing = specialEasing[ name ]; + value = props[ index ]; + if ( Array.isArray( value ) ) { + easing = value[ 1 ]; + value = props[ index ] = value[ 0 ]; + } + + if ( index !== name ) { + props[ name ] = value; + delete props[ index ]; + } + + hooks = jQuery.cssHooks[ name ]; + if ( hooks && "expand" in hooks ) { + value = hooks.expand( value ); + delete props[ name ]; + + // Not quite $.extend, this won't overwrite existing keys. + // Reusing 'index' because we have the correct "name" + for ( index in value ) { + if ( !( index in props ) ) { + props[ index ] = value[ index ]; + specialEasing[ index ] = easing; + } + } + } else { + specialEasing[ name ] = easing; + } + } +} + +function Animation( elem, properties, options ) { + var result, + stopped, + index = 0, + length = Animation.prefilters.length, + deferred = jQuery.Deferred().always( function() { + + // Don't match elem in the :animated selector + delete tick.elem; + } ), + tick = function() { + if ( stopped ) { + return false; + } + var currentTime = fxNow || createFxNow(), + remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), + + // Support: Android 2.3 only + // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) + temp = remaining / animation.duration || 0, + percent = 1 - temp, + index = 0, + length = animation.tweens.length; + + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( percent ); + } + + deferred.notifyWith( elem, [ animation, percent, remaining ] ); + + // If there's more to do, yield + if ( percent < 1 && length ) { + return remaining; + } + + // If this was an empty animation, synthesize a final progress notification + if ( !length ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + } + + // Resolve the animation and report its conclusion + deferred.resolveWith( elem, [ animation ] ); + return false; + }, + animation = deferred.promise( { + elem: elem, + props: jQuery.extend( {}, properties ), + opts: jQuery.extend( true, { + specialEasing: {}, + easing: jQuery.easing._default + }, options ), + originalProperties: properties, + originalOptions: options, + startTime: fxNow || createFxNow(), + duration: options.duration, + tweens: [], + createTween: function( prop, end ) { + var tween = jQuery.Tween( elem, animation.opts, prop, end, + animation.opts.specialEasing[ prop ] || animation.opts.easing ); + animation.tweens.push( tween ); + return tween; + }, + stop: function( gotoEnd ) { + var index = 0, + + // If we are going to the end, we want to run all the tweens + // otherwise we skip this part + length = gotoEnd ? animation.tweens.length : 0; + if ( stopped ) { + return this; + } + stopped = true; + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( 1 ); + } + + // Resolve when we played the last frame; otherwise, reject + if ( gotoEnd ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + deferred.resolveWith( elem, [ animation, gotoEnd ] ); + } else { + deferred.rejectWith( elem, [ animation, gotoEnd ] ); + } + return this; + } + } ), + props = animation.props; + + propFilter( props, animation.opts.specialEasing ); + + for ( ; index < length; index++ ) { + result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); + if ( result ) { + if ( isFunction( result.stop ) ) { + jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = + result.stop.bind( result ); + } + return result; + } + } + + jQuery.map( props, createTween, animation ); + + if ( isFunction( animation.opts.start ) ) { + animation.opts.start.call( elem, animation ); + } + + // Attach callbacks from options + animation + .progress( animation.opts.progress ) + .done( animation.opts.done, animation.opts.complete ) + .fail( animation.opts.fail ) + .always( animation.opts.always ); + + jQuery.fx.timer( + jQuery.extend( tick, { + elem: elem, + anim: animation, + queue: animation.opts.queue + } ) + ); + + return animation; +} + +jQuery.Animation = jQuery.extend( Animation, { + + tweeners: { + "*": [ function( prop, value ) { + var tween = this.createTween( prop, value ); + adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); + return tween; + } ] + }, + + tweener: function( props, callback ) { + if ( isFunction( props ) ) { + callback = props; + props = [ "*" ]; + } else { + props = props.match( rnothtmlwhite ); + } + + var prop, + index = 0, + length = props.length; + + for ( ; index < length; index++ ) { + prop = props[ index ]; + Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; + Animation.tweeners[ prop ].unshift( callback ); + } + }, + + prefilters: [ defaultPrefilter ], + + prefilter: function( callback, prepend ) { + if ( prepend ) { + Animation.prefilters.unshift( callback ); + } else { + Animation.prefilters.push( callback ); + } + } +} ); + +jQuery.speed = function( speed, easing, fn ) { + var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { + complete: fn || !fn && easing || + isFunction( speed ) && speed, + duration: speed, + easing: fn && easing || easing && !isFunction( easing ) && easing + }; + + // Go to the end state if fx are off + if ( jQuery.fx.off ) { + opt.duration = 0; + + } else { + if ( typeof opt.duration !== "number" ) { + if ( opt.duration in jQuery.fx.speeds ) { + opt.duration = jQuery.fx.speeds[ opt.duration ]; + + } else { + opt.duration = jQuery.fx.speeds._default; + } + } + } + + // Normalize opt.queue - true/undefined/null -> "fx" + if ( opt.queue == null || opt.queue === true ) { + opt.queue = "fx"; + } + + // Queueing + opt.old = opt.complete; + + opt.complete = function() { + if ( isFunction( opt.old ) ) { + opt.old.call( this ); + } + + if ( opt.queue ) { + jQuery.dequeue( this, opt.queue ); + } + }; + + return opt; +}; + +jQuery.fn.extend( { + fadeTo: function( speed, to, easing, callback ) { + + // Show any hidden elements after setting opacity to 0 + return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() + + // Animate to the value specified + .end().animate( { opacity: to }, speed, easing, callback ); + }, + animate: function( prop, speed, easing, callback ) { + var empty = jQuery.isEmptyObject( prop ), + optall = jQuery.speed( speed, easing, callback ), + doAnimation = function() { + + // Operate on a copy of prop so per-property easing won't be lost + var anim = Animation( this, jQuery.extend( {}, prop ), optall ); + + // Empty animations, or finishing resolves immediately + if ( empty || dataPriv.get( this, "finish" ) ) { + anim.stop( true ); + } + }; + doAnimation.finish = doAnimation; + + return empty || optall.queue === false ? + this.each( doAnimation ) : + this.queue( optall.queue, doAnimation ); + }, + stop: function( type, clearQueue, gotoEnd ) { + var stopQueue = function( hooks ) { + var stop = hooks.stop; + delete hooks.stop; + stop( gotoEnd ); + }; + + if ( typeof type !== "string" ) { + gotoEnd = clearQueue; + clearQueue = type; + type = undefined; + } + if ( clearQueue && type !== false ) { + this.queue( type || "fx", [] ); + } + + return this.each( function() { + var dequeue = true, + index = type != null && type + "queueHooks", + timers = jQuery.timers, + data = dataPriv.get( this ); + + if ( index ) { + if ( data[ index ] && data[ index ].stop ) { + stopQueue( data[ index ] ); + } + } else { + for ( index in data ) { + if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { + stopQueue( data[ index ] ); + } + } + } + + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && + ( type == null || timers[ index ].queue === type ) ) { + + timers[ index ].anim.stop( gotoEnd ); + dequeue = false; + timers.splice( index, 1 ); + } + } + + // Start the next in the queue if the last step wasn't forced. + // Timers currently will call their complete callbacks, which + // will dequeue but only if they were gotoEnd. + if ( dequeue || !gotoEnd ) { + jQuery.dequeue( this, type ); + } + } ); + }, + finish: function( type ) { + if ( type !== false ) { + type = type || "fx"; + } + return this.each( function() { + var index, + data = dataPriv.get( this ), + queue = data[ type + "queue" ], + hooks = data[ type + "queueHooks" ], + timers = jQuery.timers, + length = queue ? queue.length : 0; + + // Enable finishing flag on private data + data.finish = true; + + // Empty the queue first + jQuery.queue( this, type, [] ); + + if ( hooks && hooks.stop ) { + hooks.stop.call( this, true ); + } + + // Look for any active animations, and finish them + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && timers[ index ].queue === type ) { + timers[ index ].anim.stop( true ); + timers.splice( index, 1 ); + } + } + + // Look for any animations in the old queue and finish them + for ( index = 0; index < length; index++ ) { + if ( queue[ index ] && queue[ index ].finish ) { + queue[ index ].finish.call( this ); + } + } + + // Turn off finishing flag + delete data.finish; + } ); + } +} ); + +jQuery.each( [ "toggle", "show", "hide" ], function( i, name ) { + var cssFn = jQuery.fn[ name ]; + jQuery.fn[ name ] = function( speed, easing, callback ) { + return speed == null || typeof speed === "boolean" ? + cssFn.apply( this, arguments ) : + this.animate( genFx( name, true ), speed, easing, callback ); + }; +} ); + +// Generate shortcuts for custom animations +jQuery.each( { + slideDown: genFx( "show" ), + slideUp: genFx( "hide" ), + slideToggle: genFx( "toggle" ), + fadeIn: { opacity: "show" }, + fadeOut: { opacity: "hide" }, + fadeToggle: { opacity: "toggle" } +}, function( name, props ) { + jQuery.fn[ name ] = function( speed, easing, callback ) { + return this.animate( props, speed, easing, callback ); + }; +} ); + +jQuery.timers = []; +jQuery.fx.tick = function() { + var timer, + i = 0, + timers = jQuery.timers; + + fxNow = Date.now(); + + for ( ; i < timers.length; i++ ) { + timer = timers[ i ]; + + // Run the timer and safely remove it when done (allowing for external removal) + if ( !timer() && timers[ i ] === timer ) { + timers.splice( i--, 1 ); + } + } + + if ( !timers.length ) { + jQuery.fx.stop(); + } + fxNow = undefined; +}; + +jQuery.fx.timer = function( timer ) { + jQuery.timers.push( timer ); + jQuery.fx.start(); +}; + +jQuery.fx.interval = 13; +jQuery.fx.start = function() { + if ( inProgress ) { + return; + } + + inProgress = true; + schedule(); +}; + +jQuery.fx.stop = function() { + inProgress = null; +}; + +jQuery.fx.speeds = { + slow: 600, + fast: 200, + + // Default speed + _default: 400 +}; + + +// Based off of the plugin by Clint Helfers, with permission. +// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ +jQuery.fn.delay = function( time, type ) { + time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; + type = type || "fx"; + + return this.queue( type, function( next, hooks ) { + var timeout = window.setTimeout( next, time ); + hooks.stop = function() { + window.clearTimeout( timeout ); + }; + } ); +}; + + +( function() { + var input = document.createElement( "input" ), + select = document.createElement( "select" ), + opt = select.appendChild( document.createElement( "option" ) ); + + input.type = "checkbox"; + + // Support: Android <=4.3 only + // Default value for a checkbox should be "on" + support.checkOn = input.value !== ""; + + // Support: IE <=11 only + // Must access selectedIndex to make default options select + support.optSelected = opt.selected; + + // Support: IE <=11 only + // An input loses its value after becoming a radio + input = document.createElement( "input" ); + input.value = "t"; + input.type = "radio"; + support.radioValue = input.value === "t"; +} )(); + + +var boolHook, + attrHandle = jQuery.expr.attrHandle; + +jQuery.fn.extend( { attr: function( name, value ) { - return jQuery.access( this, jQuery.attr, name, value, arguments.length > 1 ); + return access( this, jQuery.attr, name, value, arguments.length > 1 ); }, removeAttr: function( name ) { - return this.each(function() { + return this.each( function() { jQuery.removeAttr( this, name ); - }); + } ); + } +} ); + +jQuery.extend( { + attr: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set attributes on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + // Fallback to prop when attributes are not supported + if ( typeof elem.getAttribute === "undefined" ) { + return jQuery.prop( elem, name, value ); + } + + // Attribute hooks are determined by the lowercase version + // Grab necessary hook if one is defined + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + hooks = jQuery.attrHooks[ name.toLowerCase() ] || + ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); + } + + if ( value !== undefined ) { + if ( value === null ) { + jQuery.removeAttr( elem, name ); + return; + } + + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + elem.setAttribute( name, value + "" ); + return value; + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + ret = jQuery.find.attr( elem, name ); + + // Non-existent attributes return null, we normalize to undefined + return ret == null ? undefined : ret; }, + attrHooks: { + type: { + set: function( elem, value ) { + if ( !support.radioValue && value === "radio" && + nodeName( elem, "input" ) ) { + var val = elem.value; + elem.setAttribute( "type", value ); + if ( val ) { + elem.value = val; + } + return value; + } + } + } + }, + + removeAttr: function( elem, value ) { + var name, + i = 0, + + // Attribute names can contain non-HTML whitespace characters + // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 + attrNames = value && value.match( rnothtmlwhite ); + + if ( attrNames && elem.nodeType === 1 ) { + while ( ( name = attrNames[ i++ ] ) ) { + elem.removeAttribute( name ); + } + } + } +} ); + +// Hooks for boolean attributes +boolHook = { + set: function( elem, value, name ) { + if ( value === false ) { + + // Remove boolean attributes when set to false + jQuery.removeAttr( elem, name ); + } else { + elem.setAttribute( name, name ); + } + return name; + } +}; + +jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( i, name ) { + var getter = attrHandle[ name ] || jQuery.find.attr; + + attrHandle[ name ] = function( elem, name, isXML ) { + var ret, handle, + lowercaseName = name.toLowerCase(); + + if ( !isXML ) { + + // Avoid an infinite loop by temporarily removing this function from the getter + handle = attrHandle[ lowercaseName ]; + attrHandle[ lowercaseName ] = ret; + ret = getter( elem, name, isXML ) != null ? + lowercaseName : + null; + attrHandle[ lowercaseName ] = handle; + } + return ret; + }; +} ); + + + + +var rfocusable = /^(?:input|select|textarea|button)$/i, + rclickable = /^(?:a|area)$/i; + +jQuery.fn.extend( { prop: function( name, value ) { - return jQuery.access( this, jQuery.prop, name, value, arguments.length > 1 ); + return access( this, jQuery.prop, name, value, arguments.length > 1 ); }, removeProp: function( name ) { - name = jQuery.propFix[ name ] || name; - return this.each(function() { - // try/catch handles cases where IE balks (such as removing a property on window) - try { - this[ name ] = undefined; - delete this[ name ]; - } catch( e ) {} - }); - }, + return this.each( function() { + delete this[ jQuery.propFix[ name ] || name ]; + } ); + } +} ); - addClass: function( value ) { - var classes, elem, cur, clazz, j, - i = 0, - len = this.length, - proceed = typeof value === "string" && value; +jQuery.extend( { + prop: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; - if ( jQuery.isFunction( value ) ) { - return this.each(function( j ) { - jQuery( this ).addClass( value.call( this, j, this.className ) ); - }); + // Don't get/set properties on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; } - if ( proceed ) { - // The disjunction here is for better compressibility (see removeClass) - classes = ( value || "" ).match( core_rnotwhite ) || []; + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - for ( ; i < len; i++ ) { - elem = this[ i ]; - cur = elem.nodeType === 1 && ( elem.className ? - ( " " + elem.className + " " ).replace( rclass, " " ) : - " " - ); + // Fix name and attach hooks + name = jQuery.propFix[ name ] || name; + hooks = jQuery.propHooks[ name ]; + } + + if ( value !== undefined ) { + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + return ( elem[ name ] = value ); + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + return elem[ name ]; + }, + + propHooks: { + tabIndex: { + get: function( elem ) { + + // Support: IE <=9 - 11 only + // elem.tabIndex doesn't always return the + // correct value when it hasn't been explicitly set + // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ + // Use proper attribute retrieval(#12072) + var tabindex = jQuery.find.attr( elem, "tabindex" ); + + if ( tabindex ) { + return parseInt( tabindex, 10 ); + } + + if ( + rfocusable.test( elem.nodeName ) || + rclickable.test( elem.nodeName ) && + elem.href + ) { + return 0; + } + + return -1; + } + } + }, + + propFix: { + "for": "htmlFor", + "class": "className" + } +} ); + +// Support: IE <=11 only +// Accessing the selectedIndex property +// forces the browser to respect setting selected +// on the option +// The getter ensures a default option is selected +// when in an optgroup +// eslint rule "no-unused-expressions" is disabled for this code +// since it considers such accessions noop +if ( !support.optSelected ) { + jQuery.propHooks.selected = { + get: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent && parent.parentNode ) { + parent.parentNode.selectedIndex; + } + return null; + }, + set: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent ) { + parent.selectedIndex; + + if ( parent.parentNode ) { + parent.parentNode.selectedIndex; + } + } + } + }; +} + +jQuery.each( [ + "tabIndex", + "readOnly", + "maxLength", + "cellSpacing", + "cellPadding", + "rowSpan", + "colSpan", + "useMap", + "frameBorder", + "contentEditable" +], function() { + jQuery.propFix[ this.toLowerCase() ] = this; +} ); + + + + + // Strip and collapse whitespace according to HTML spec + // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace + function stripAndCollapse( value ) { + var tokens = value.match( rnothtmlwhite ) || []; + return tokens.join( " " ); + } + + +function getClass( elem ) { + return elem.getAttribute && elem.getAttribute( "class" ) || ""; +} + +function classesToArray( value ) { + if ( Array.isArray( value ) ) { + return value; + } + if ( typeof value === "string" ) { + return value.match( rnothtmlwhite ) || []; + } + return []; +} + +jQuery.fn.extend( { + addClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + classes = classesToArray( value ); + + if ( classes.length ) { + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); if ( cur ) { j = 0; - while ( (clazz = classes[j++]) ) { + while ( ( clazz = classes[ j++ ] ) ) { if ( cur.indexOf( " " + clazz + " " ) < 0 ) { cur += clazz + " "; } } - elem.className = jQuery.trim( cur ); + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } } } } @@ -4119,36 +7840,43 @@ jQuery.fn.extend({ }, removeClass: function( value ) { - var classes, elem, cur, clazz, j, - i = 0, - len = this.length, - proceed = arguments.length === 0 || typeof value === "string" && value; + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; - if ( jQuery.isFunction( value ) ) { - return this.each(function( j ) { - jQuery( this ).removeClass( value.call( this, j, this.className ) ); - }); + if ( isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); + } ); } - if ( proceed ) { - classes = ( value || "" ).match( core_rnotwhite ) || []; - for ( ; i < len; i++ ) { - elem = this[ i ]; + if ( !arguments.length ) { + return this.attr( "class", "" ); + } + + classes = classesToArray( value ); + + if ( classes.length ) { + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + // This expression is here for better compressibility (see addClass) - cur = elem.nodeType === 1 && ( elem.className ? - ( " " + elem.className + " " ).replace( rclass, " " ) : - "" - ); + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); if ( cur ) { j = 0; - while ( (clazz = classes[j++]) ) { + while ( ( clazz = classes[ j++ ] ) ) { + // Remove *all* instances - while ( cur.indexOf( " " + clazz + " " ) >= 0 ) { + while ( cur.indexOf( " " + clazz + " " ) > -1 ) { cur = cur.replace( " " + clazz + " ", " " ); } } - elem.className = value ? jQuery.trim( cur ) : ""; + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } } } } @@ -4157,28 +7885,35 @@ jQuery.fn.extend({ }, toggleClass: function( value, stateVal ) { - var type = typeof value; + var type = typeof value, + isValidValue = type === "string" || Array.isArray( value ); - if ( typeof stateVal === "boolean" && type === "string" ) { + if ( typeof stateVal === "boolean" && isValidValue ) { return stateVal ? this.addClass( value ) : this.removeClass( value ); } - if ( jQuery.isFunction( value ) ) { - return this.each(function( i ) { - jQuery( this ).toggleClass( value.call(this, i, this.className, stateVal), stateVal ); - }); + if ( isFunction( value ) ) { + return this.each( function( i ) { + jQuery( this ).toggleClass( + value.call( this, i, getClass( this ), stateVal ), + stateVal + ); + } ); } - return this.each(function() { - if ( type === "string" ) { - // toggle individual class names - var className, - i = 0, - self = jQuery( this ), - classNames = value.match( core_rnotwhite ) || []; + return this.each( function() { + var className, i, self, classNames; - while ( (className = classNames[ i++ ]) ) { - // check each className given, space separated list + if ( isValidValue ) { + + // Toggle individual class names + i = 0; + self = jQuery( this ); + classNames = classesToArray( value ); + + while ( ( className = classNames[ i++ ] ) ) { + + // Check each className given, space separated list if ( self.hasClass( className ) ) { self.removeClass( className ); } else { @@ -4187,68 +7922,91 @@ jQuery.fn.extend({ } // Toggle whole class name - } else if ( type === core_strundefined || type === "boolean" ) { - if ( this.className ) { - // store className if set - jQuery._data( this, "__className__", this.className ); + } else if ( value === undefined || type === "boolean" ) { + className = getClass( this ); + if ( className ) { + + // Store className if set + dataPriv.set( this, "__className__", className ); } - // If the element has a class name or if we're passed "false", + // If the element has a class name or if we're passed `false`, // then remove the whole classname (if there was one, the above saved it). // Otherwise bring back whatever was previously saved (if anything), // falling back to the empty string if nothing was stored. - this.className = this.className || value === false ? "" : jQuery._data( this, "__className__" ) || ""; + if ( this.setAttribute ) { + this.setAttribute( "class", + className || value === false ? + "" : + dataPriv.get( this, "__className__" ) || "" + ); + } } - }); + } ); }, hasClass: function( selector ) { - var className = " " + selector + " ", - i = 0, - l = this.length; - for ( ; i < l; i++ ) { - if ( this[i].nodeType === 1 && (" " + this[i].className + " ").replace(rclass, " ").indexOf( className ) >= 0 ) { - return true; + var className, elem, + i = 0; + + className = " " + selector + " "; + while ( ( elem = this[ i++ ] ) ) { + if ( elem.nodeType === 1 && + ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { + return true; } } return false; - }, + } +} ); + + + +var rreturn = /\r/g; + +jQuery.fn.extend( { val: function( value ) { - var ret, hooks, isFunction, - elem = this[0]; + var hooks, ret, valueIsFunction, + elem = this[ 0 ]; if ( !arguments.length ) { if ( elem ) { - hooks = jQuery.valHooks[ elem.type ] || jQuery.valHooks[ elem.nodeName.toLowerCase() ]; + hooks = jQuery.valHooks[ elem.type ] || + jQuery.valHooks[ elem.nodeName.toLowerCase() ]; - if ( hooks && "get" in hooks && (ret = hooks.get( elem, "value" )) !== undefined ) { + if ( hooks && + "get" in hooks && + ( ret = hooks.get( elem, "value" ) ) !== undefined + ) { return ret; } ret = elem.value; - return typeof ret === "string" ? - // handle most common string cases - ret.replace(rreturn, "") : - // handle cases where value is null/undef or number - ret == null ? "" : ret; + // Handle most common string cases + if ( typeof ret === "string" ) { + return ret.replace( rreturn, "" ); + } + + // Handle cases where value is null/undef or number + return ret == null ? "" : ret; } return; } - isFunction = jQuery.isFunction( value ); + valueIsFunction = isFunction( value ); - return this.each(function( i ) { + return this.each( function( i ) { var val; if ( this.nodeType !== 1 ) { return; } - if ( isFunction ) { + if ( valueIsFunction ) { val = value.call( this, i, jQuery( this ).val() ); } else { val = value; @@ -4257,56 +8015,70 @@ jQuery.fn.extend({ // Treat null/undefined as ""; convert numbers to string if ( val == null ) { val = ""; + } else if ( typeof val === "number" ) { val += ""; - } else if ( jQuery.isArray( val ) ) { - val = jQuery.map(val, function ( value ) { + + } else if ( Array.isArray( val ) ) { + val = jQuery.map( val, function( value ) { return value == null ? "" : value + ""; - }); + } ); } hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; // If set returns undefined, fall back to normal setting - if ( !hooks || !("set" in hooks) || hooks.set( this, val, "value" ) === undefined ) { + if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { this.value = val; } - }); + } ); } -}); +} ); -jQuery.extend({ +jQuery.extend( { valHooks: { option: { get: function( elem ) { - // Use proper attribute retrieval(#6932, #12072) + var val = jQuery.find.attr( elem, "value" ); return val != null ? val : - elem.text; + + // Support: IE <=10 - 11 only + // option.text throws exceptions (#14686, #14858) + // Strip and collapse whitespace + // https://html.spec.whatwg.org/#strip-and-collapse-whitespace + stripAndCollapse( jQuery.text( elem ) ); } }, select: { get: function( elem ) { - var value, option, + var value, option, i, options = elem.options, index = elem.selectedIndex, - one = elem.type === "select-one" || index < 0, + one = elem.type === "select-one", values = one ? null : [], - max = one ? index + 1 : options.length, - i = index < 0 ? - max : - one ? index : 0; + max = one ? index + 1 : options.length; + + if ( index < 0 ) { + i = max; + + } else { + i = one ? index : 0; + } // Loop through all the selected options for ( ; i < max; i++ ) { option = options[ i ]; - // oldIE doesn't update selected after form reset (#2551) + // Support: IE <=9 only + // IE8-9 doesn't update selected after form reset (#2551) if ( ( option.selected || i === index ) && + // Don't return options that are disabled or in a disabled optgroup - ( jQuery.support.optDisabled ? !option.disabled : option.getAttribute("disabled") === null ) && - ( !option.parentNode.disabled || !jQuery.nodeName( option.parentNode, "optgroup" ) ) ) { + !option.disabled && + ( !option.parentNode.disabled || + !nodeName( option.parentNode, "optgroup" ) ) ) { // Get the specific value for the option value = jQuery( option ).val(); @@ -4332,600 +8104,68 @@ jQuery.extend({ while ( i-- ) { option = options[ i ]; - if ( (option.selected = jQuery.inArray( jQuery(option).val(), values ) >= 0) ) { + + /* eslint-disable no-cond-assign */ + + if ( option.selected = + jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 + ) { optionSet = true; } + + /* eslint-enable no-cond-assign */ } - // force browsers to behave consistently when non-matching value is set + // Force browsers to behave consistently when non-matching value is set if ( !optionSet ) { elem.selectedIndex = -1; } return values; } } - }, - - attr: function( elem, name, value ) { - var hooks, ret, - nType = elem.nodeType; - - // don't get/set attributes on text, comment and attribute nodes - if ( !elem || nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - // Fallback to prop when attributes are not supported - if ( typeof elem.getAttribute === core_strundefined ) { - return jQuery.prop( elem, name, value ); - } - - // All attributes are lowercase - // Grab necessary hook if one is defined - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - name = name.toLowerCase(); - hooks = jQuery.attrHooks[ name ] || - ( jQuery.expr.match.bool.test( name ) ? boolHook : nodeHook ); - } - - if ( value !== undefined ) { - - if ( value === null ) { - jQuery.removeAttr( elem, name ); - - } else if ( hooks && "set" in hooks && (ret = hooks.set( elem, value, name )) !== undefined ) { - return ret; - - } else { - elem.setAttribute( name, value + "" ); - return value; - } - - } else if ( hooks && "get" in hooks && (ret = hooks.get( elem, name )) !== null ) { - return ret; - - } else { - ret = jQuery.find.attr( elem, name ); - - // Non-existent attributes return null, we normalize to undefined - return ret == null ? - undefined : - ret; - } - }, - - removeAttr: function( elem, value ) { - var name, propName, - i = 0, - attrNames = value && value.match( core_rnotwhite ); - - if ( attrNames && elem.nodeType === 1 ) { - while ( (name = attrNames[i++]) ) { - propName = jQuery.propFix[ name ] || name; - - // Boolean attributes get special treatment (#10870) - if ( jQuery.expr.match.bool.test( name ) ) { - // Set corresponding property to false - if ( getSetInput && getSetAttribute || !ruseDefault.test( name ) ) { - elem[ propName ] = false; - // Support: IE<9 - // Also clear defaultChecked/defaultSelected (if appropriate) - } else { - elem[ jQuery.camelCase( "default-" + name ) ] = - elem[ propName ] = false; - } - - // See #9699 for explanation of this approach (setting first, then removal) - } else { - jQuery.attr( elem, name, "" ); - } - - elem.removeAttribute( getSetAttribute ? name : propName ); - } - } - }, - - attrHooks: { - type: { - set: function( elem, value ) { - if ( !jQuery.support.radioValue && value === "radio" && jQuery.nodeName(elem, "input") ) { - // Setting the type on a radio button after the value resets the value in IE6-9 - // Reset value to default in case type is set after value during creation - var val = elem.value; - elem.setAttribute( "type", value ); - if ( val ) { - elem.value = val; - } - return value; - } - } - } - }, - - propFix: { - "for": "htmlFor", - "class": "className" - }, - - prop: function( elem, name, value ) { - var ret, hooks, notxml, - nType = elem.nodeType; - - // don't get/set properties on text, comment and attribute nodes - if ( !elem || nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - notxml = nType !== 1 || !jQuery.isXMLDoc( elem ); - - if ( notxml ) { - // Fix name and attach hooks - name = jQuery.propFix[ name ] || name; - hooks = jQuery.propHooks[ name ]; - } - - if ( value !== undefined ) { - return hooks && "set" in hooks && (ret = hooks.set( elem, value, name )) !== undefined ? - ret : - ( elem[ name ] = value ); - - } else { - return hooks && "get" in hooks && (ret = hooks.get( elem, name )) !== null ? - ret : - elem[ name ]; - } - }, - - propHooks: { - tabIndex: { - get: function( elem ) { - // elem.tabIndex doesn't always return the correct value when it hasn't been explicitly set - // http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ - // Use proper attribute retrieval(#12072) - var tabindex = jQuery.find.attr( elem, "tabindex" ); - - return tabindex ? - parseInt( tabindex, 10 ) : - rfocusable.test( elem.nodeName ) || rclickable.test( elem.nodeName ) && elem.href ? - 0 : - -1; - } - } } -}); - -// Hooks for boolean attributes -boolHook = { - set: function( elem, value, name ) { - if ( value === false ) { - // Remove boolean attributes when set to false - jQuery.removeAttr( elem, name ); - } else if ( getSetInput && getSetAttribute || !ruseDefault.test( name ) ) { - // IE<8 needs the *property* name - elem.setAttribute( !getSetAttribute && jQuery.propFix[ name ] || name, name ); - - // Use defaultChecked and defaultSelected for oldIE - } else { - elem[ jQuery.camelCase( "default-" + name ) ] = elem[ name ] = true; - } - - return name; - } -}; -jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( i, name ) { - var getter = jQuery.expr.attrHandle[ name ] || jQuery.find.attr; - - jQuery.expr.attrHandle[ name ] = getSetInput && getSetAttribute || !ruseDefault.test( name ) ? - function( elem, name, isXML ) { - var fn = jQuery.expr.attrHandle[ name ], - ret = isXML ? - undefined : - /* jshint eqeqeq: false */ - (jQuery.expr.attrHandle[ name ] = undefined) != - getter( elem, name, isXML ) ? - - name.toLowerCase() : - null; - jQuery.expr.attrHandle[ name ] = fn; - return ret; - } : - function( elem, name, isXML ) { - return isXML ? - undefined : - elem[ jQuery.camelCase( "default-" + name ) ] ? - name.toLowerCase() : - null; - }; -}); - -// fix oldIE attroperties -if ( !getSetInput || !getSetAttribute ) { - jQuery.attrHooks.value = { - set: function( elem, value, name ) { - if ( jQuery.nodeName( elem, "input" ) ) { - // Does not return so that setAttribute is also used - elem.defaultValue = value; - } else { - // Use nodeHook if defined (#1954); otherwise setAttribute is fine - return nodeHook && nodeHook.set( elem, value, name ); - } - } - }; -} - -// IE6/7 do not support getting/setting some attributes with get/setAttribute -if ( !getSetAttribute ) { - - // Use this for any attribute in IE6/7 - // This fixes almost every IE6/7 issue - nodeHook = { - set: function( elem, value, name ) { - // Set the existing or create a new attribute node - var ret = elem.getAttributeNode( name ); - if ( !ret ) { - elem.setAttributeNode( - (ret = elem.ownerDocument.createAttribute( name )) - ); - } - - ret.value = value += ""; - - // Break association with cloned elements by also using setAttribute (#9646) - return name === "value" || value === elem.getAttribute( name ) ? - value : - undefined; - } - }; - jQuery.expr.attrHandle.id = jQuery.expr.attrHandle.name = jQuery.expr.attrHandle.coords = - // Some attributes are constructed with empty-string values when not defined - function( elem, name, isXML ) { - var ret; - return isXML ? - undefined : - (ret = elem.getAttributeNode( name )) && ret.value !== "" ? - ret.value : - null; - }; - jQuery.valHooks.button = { - get: function( elem, name ) { - var ret = elem.getAttributeNode( name ); - return ret && ret.specified ? - ret.value : - undefined; - }, - set: nodeHook.set - }; - - // Set contenteditable to false on removals(#10429) - // Setting to empty string throws an error as an invalid value - jQuery.attrHooks.contenteditable = { - set: function( elem, value, name ) { - nodeHook.set( elem, value === "" ? false : value, name ); - } - }; - - // Set width and height to auto instead of 0 on empty string( Bug #8150 ) - // This is for removals - jQuery.each([ "width", "height" ], function( i, name ) { - jQuery.attrHooks[ name ] = { - set: function( elem, value ) { - if ( value === "" ) { - elem.setAttribute( name, "auto" ); - return value; - } - } - }; - }); -} - - -// Some attributes require a special call on IE -// http://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx -if ( !jQuery.support.hrefNormalized ) { - // href/src property should get the full normalized URL (#10299/#12915) - jQuery.each([ "href", "src" ], function( i, name ) { - jQuery.propHooks[ name ] = { - get: function( elem ) { - return elem.getAttribute( name, 4 ); - } - }; - }); -} - -if ( !jQuery.support.style ) { - jQuery.attrHooks.style = { - get: function( elem ) { - // Return undefined in the case of empty string - // Note: IE uppercases css property names, but if we were to .toLowerCase() - // .cssText, that would destroy case senstitivity in URL's, like in "background" - return elem.style.cssText || undefined; - }, - set: function( elem, value ) { - return ( elem.style.cssText = value + "" ); - } - }; -} - -// Safari mis-reports the default selected property of an option -// Accessing the parent's selectedIndex property fixes it -if ( !jQuery.support.optSelected ) { - jQuery.propHooks.selected = { - get: function( elem ) { - var parent = elem.parentNode; - - if ( parent ) { - parent.selectedIndex; - - // Make sure that it also works with optgroups, see #5701 - if ( parent.parentNode ) { - parent.parentNode.selectedIndex; - } - } - return null; - } - }; -} - -jQuery.each([ - "tabIndex", - "readOnly", - "maxLength", - "cellSpacing", - "cellPadding", - "rowSpan", - "colSpan", - "useMap", - "frameBorder", - "contentEditable" -], function() { - jQuery.propFix[ this.toLowerCase() ] = this; -}); - -// IE6/7 call enctype encoding -if ( !jQuery.support.enctype ) { - jQuery.propFix.enctype = "encoding"; -} +} ); // Radios and checkboxes getter/setter -jQuery.each([ "radio", "checkbox" ], function() { +jQuery.each( [ "radio", "checkbox" ], function() { jQuery.valHooks[ this ] = { set: function( elem, value ) { - if ( jQuery.isArray( value ) ) { - return ( elem.checked = jQuery.inArray( jQuery(elem).val(), value ) >= 0 ); + if ( Array.isArray( value ) ) { + return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); } } }; - if ( !jQuery.support.checkOn ) { + if ( !support.checkOn ) { jQuery.valHooks[ this ].get = function( elem ) { - // Support: Webkit - // "" is returned instead of "on" if a value isn't specified - return elem.getAttribute("value") === null ? "on" : elem.value; + return elem.getAttribute( "value" ) === null ? "on" : elem.value; }; } -}); -var rformElems = /^(?:input|select|textarea)$/i, - rkeyEvent = /^key/, - rmouseEvent = /^(?:mouse|contextmenu)|click/, - rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, - rtypenamespace = /^([^.]*)(?:\.(.+)|)$/; +} ); -function returnTrue() { - return true; -} -function returnFalse() { - return false; -} -function safeActiveElement() { - try { - return document.activeElement; - } catch ( err ) { } -} -/* - * Helper functions for managing events -- not part of the public interface. - * Props to Dean Edwards' addEvent library for many of the ideas. - */ -jQuery.event = { +// Return jQuery for attributes-only inclusion - global: {}, - add: function( elem, types, handler, data, selector ) { - var tmp, events, t, handleObjIn, - special, eventHandle, handleObj, - handlers, type, namespaces, origType, - elemData = jQuery._data( elem ); +support.focusin = "onfocusin" in window; - // Don't attach events to noData or text/comment nodes (but allow plain objects) - if ( !elemData ) { - return; - } - // Caller can pass in an object of custom data in lieu of the handler - if ( handler.handler ) { - handleObjIn = handler; - handler = handleObjIn.handler; - selector = handleObjIn.selector; - } +var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, + stopPropagationCallback = function( e ) { + e.stopPropagation(); + }; - // Make sure that the handler has a unique ID, used to find/remove it later - if ( !handler.guid ) { - handler.guid = jQuery.guid++; - } - - // Init the element's event structure and main handler, if this is the first - if ( !(events = elemData.events) ) { - events = elemData.events = {}; - } - if ( !(eventHandle = elemData.handle) ) { - eventHandle = elemData.handle = function( e ) { - // Discard the second event of a jQuery.event.trigger() and - // when an event is called after a page has unloaded - return typeof jQuery !== core_strundefined && (!e || jQuery.event.triggered !== e.type) ? - jQuery.event.dispatch.apply( eventHandle.elem, arguments ) : - undefined; - }; - // Add elem as a property of the handle fn to prevent a memory leak with IE non-native events - eventHandle.elem = elem; - } - - // Handle multiple events separated by a space - types = ( types || "" ).match( core_rnotwhite ) || [""]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[t] ) || []; - type = origType = tmp[1]; - namespaces = ( tmp[2] || "" ).split( "." ).sort(); - - // There *must* be a type, no attaching namespace-only handlers - if ( !type ) { - continue; - } - - // If event changes its type, use the special event handlers for the changed type - special = jQuery.event.special[ type ] || {}; - - // If selector defined, determine special event api type, otherwise given type - type = ( selector ? special.delegateType : special.bindType ) || type; - - // Update special based on newly reset type - special = jQuery.event.special[ type ] || {}; - - // handleObj is passed to all event handlers - handleObj = jQuery.extend({ - type: type, - origType: origType, - data: data, - handler: handler, - guid: handler.guid, - selector: selector, - needsContext: selector && jQuery.expr.match.needsContext.test( selector ), - namespace: namespaces.join(".") - }, handleObjIn ); - - // Init the event handler queue if we're the first - if ( !(handlers = events[ type ]) ) { - handlers = events[ type ] = []; - handlers.delegateCount = 0; - - // Only use addEventListener/attachEvent if the special events handler returns false - if ( !special.setup || special.setup.call( elem, data, namespaces, eventHandle ) === false ) { - // Bind the global event handler to the element - if ( elem.addEventListener ) { - elem.addEventListener( type, eventHandle, false ); - - } else if ( elem.attachEvent ) { - elem.attachEvent( "on" + type, eventHandle ); - } - } - } - - if ( special.add ) { - special.add.call( elem, handleObj ); - - if ( !handleObj.handler.guid ) { - handleObj.handler.guid = handler.guid; - } - } - - // Add to the element's handler list, delegates in front - if ( selector ) { - handlers.splice( handlers.delegateCount++, 0, handleObj ); - } else { - handlers.push( handleObj ); - } - - // Keep track of which events have ever been used, for event optimization - jQuery.event.global[ type ] = true; - } - - // Nullify elem to prevent memory leaks in IE - elem = null; - }, - - // Detach an event or set of events from an element - remove: function( elem, types, handler, selector, mappedTypes ) { - var j, handleObj, tmp, - origCount, t, events, - special, handlers, type, - namespaces, origType, - elemData = jQuery.hasData( elem ) && jQuery._data( elem ); - - if ( !elemData || !(events = elemData.events) ) { - return; - } - - // Once for each type.namespace in types; type may be omitted - types = ( types || "" ).match( core_rnotwhite ) || [""]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[t] ) || []; - type = origType = tmp[1]; - namespaces = ( tmp[2] || "" ).split( "." ).sort(); - - // Unbind all events (on this namespace, if provided) for the element - if ( !type ) { - for ( type in events ) { - jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); - } - continue; - } - - special = jQuery.event.special[ type ] || {}; - type = ( selector ? special.delegateType : special.bindType ) || type; - handlers = events[ type ] || []; - tmp = tmp[2] && new RegExp( "(^|\\.)" + namespaces.join("\\.(?:.*\\.|)") + "(\\.|$)" ); - - // Remove matching events - origCount = j = handlers.length; - while ( j-- ) { - handleObj = handlers[ j ]; - - if ( ( mappedTypes || origType === handleObj.origType ) && - ( !handler || handler.guid === handleObj.guid ) && - ( !tmp || tmp.test( handleObj.namespace ) ) && - ( !selector || selector === handleObj.selector || selector === "**" && handleObj.selector ) ) { - handlers.splice( j, 1 ); - - if ( handleObj.selector ) { - handlers.delegateCount--; - } - if ( special.remove ) { - special.remove.call( elem, handleObj ); - } - } - } - - // Remove generic event handler if we removed something and no more handlers exist - // (avoids potential for endless recursion during removal of special event handlers) - if ( origCount && !handlers.length ) { - if ( !special.teardown || special.teardown.call( elem, namespaces, elemData.handle ) === false ) { - jQuery.removeEvent( elem, type, elemData.handle ); - } - - delete events[ type ]; - } - } - - // Remove the expando if it's no longer used - if ( jQuery.isEmptyObject( events ) ) { - delete elemData.handle; - - // removeData also checks for emptiness and clears the expando if empty - // so use it instead of delete - jQuery._removeData( elem, "events" ); - } - }, +jQuery.extend( jQuery.event, { trigger: function( event, data, elem, onlyHandlers ) { - var handle, ontype, cur, - bubbleType, special, tmp, i, - eventPath = [ elem || document ], - type = core_hasOwn.call( event, "type" ) ? event.type : event, - namespaces = core_hasOwn.call( event, "namespace" ) ? event.namespace.split(".") : []; - cur = tmp = elem = elem || document; + var i, cur, tmp, bubbleType, ontype, handle, special, lastElement, + eventPath = [ elem || document ], + type = hasOwn.call( event, "type" ) ? event.type : event, + namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; + + cur = lastElement = tmp = elem = elem || document; // Don't do events on text and comment nodes if ( elem.nodeType === 3 || elem.nodeType === 8 ) { @@ -4937,13 +8177,14 @@ jQuery.event = { return; } - if ( type.indexOf(".") >= 0 ) { + if ( type.indexOf( "." ) > -1 ) { + // Namespaced trigger; create a regexp to match event type in handle() - namespaces = type.split("."); + namespaces = type.split( "." ); type = namespaces.shift(); namespaces.sort(); } - ontype = type.indexOf(":") < 0 && "on" + type; + ontype = type.indexOf( ":" ) < 0 && "on" + type; // Caller can pass in a jQuery.Event object, Object, or just an event type string event = event[ jQuery.expando ] ? @@ -4952,9 +8193,9 @@ jQuery.event = { // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) event.isTrigger = onlyHandlers ? 2 : 3; - event.namespace = namespaces.join("."); - event.namespace_re = event.namespace ? - new RegExp( "(^|\\.)" + namespaces.join("\\.(?:.*\\.|)") + "(\\.|$)" ) : + event.namespace = namespaces.join( "." ); + event.rnamespace = event.namespace ? + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : null; // Clean up the event in case it is being reused @@ -4976,7 +8217,7 @@ jQuery.event = { // Determine event propagation path in advance, per W3C events spec (#9951) // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) - if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { + if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) { bubbleType = special.delegateType || type; if ( !rfocusMorph.test( bubbleType + type ) ) { @@ -4988,29 +8229,33 @@ jQuery.event = { } // Only add window if we got to document (e.g., not plain obj or detached DOM) - if ( tmp === (elem.ownerDocument || document) ) { + if ( tmp === ( elem.ownerDocument || document ) ) { eventPath.push( tmp.defaultView || tmp.parentWindow || window ); } } // Fire handlers on the event path i = 0; - while ( (cur = eventPath[i++]) && !event.isPropagationStopped() ) { - + while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { + lastElement = cur; event.type = i > 1 ? bubbleType : special.bindType || type; // jQuery handler - handle = ( jQuery._data( cur, "events" ) || {} )[ event.type ] && jQuery._data( cur, "handle" ); + handle = ( dataPriv.get( cur, "events" ) || {} )[ event.type ] && + dataPriv.get( cur, "handle" ); if ( handle ) { handle.apply( cur, data ); } // Native handler handle = ontype && cur[ ontype ]; - if ( handle && jQuery.acceptData( cur ) && handle.apply && handle.apply( cur, data ) === false ) { - event.preventDefault(); + if ( handle && handle.apply && acceptData( cur ) ) { + event.result = handle.apply( cur, data ); + if ( event.result === false ) { + event.preventDefault(); + } } } event.type = type; @@ -5018,13 +8263,13 @@ jQuery.event = { // If nobody prevented the default action, do it now if ( !onlyHandlers && !event.isDefaultPrevented() ) { - if ( (!special._default || special._default.apply( eventPath.pop(), data ) === false) && - jQuery.acceptData( elem ) ) { + if ( ( !special._default || + special._default.apply( eventPath.pop(), data ) === false ) && + acceptData( elem ) ) { - // Call a native DOM method on the target with the same name name as the event. - // Can't use an .isFunction() check here because IE6/7 fails that test. + // Call a native DOM method on the target with the same name as the event. // Don't do default actions on window, that's where global variables be (#6170) - if ( ontype && elem[ type ] && !jQuery.isWindow( elem ) ) { + if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) { // Don't re-trigger an onFOO event when we call its FOO() method tmp = elem[ ontype ]; @@ -5035,12 +8280,17 @@ jQuery.event = { // Prevent re-triggering of the same event, since we already bubbled it above jQuery.event.triggered = type; - try { - elem[ type ](); - } catch ( e ) { - // IE<9 dies on focus/blur to hidden element (#1486,#12518) - // only reproducible on winXP IE8 native, not IE9 in IE8 mode + + if ( event.isPropagationStopped() ) { + lastElement.addEventListener( type, stopPropagationCallback ); } + + elem[ type ](); + + if ( event.isPropagationStopped() ) { + lastElement.removeEventListener( type, stopPropagationCallback ); + } + jQuery.event.triggered = undefined; if ( tmp ) { @@ -5053,2466 +8303,180 @@ jQuery.event = { return event.result; }, - dispatch: function( event ) { - - // Make a writable jQuery.Event from the native event object - event = jQuery.event.fix( event ); - - var i, ret, handleObj, matched, j, - handlerQueue = [], - args = core_slice.call( arguments ), - handlers = ( jQuery._data( this, "events" ) || {} )[ event.type ] || [], - special = jQuery.event.special[ event.type ] || {}; - - // Use the fix-ed jQuery.Event rather than the (read-only) native event - args[0] = event; - event.delegateTarget = this; - - // Call the preDispatch hook for the mapped type, and let it bail if desired - if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { - return; - } - - // Determine handlers - handlerQueue = jQuery.event.handlers.call( this, event, handlers ); - - // Run delegates first; they may want to stop propagation beneath us - i = 0; - while ( (matched = handlerQueue[ i++ ]) && !event.isPropagationStopped() ) { - event.currentTarget = matched.elem; - - j = 0; - while ( (handleObj = matched.handlers[ j++ ]) && !event.isImmediatePropagationStopped() ) { - - // Triggered event must either 1) have no namespace, or - // 2) have namespace(s) a subset or equal to those in the bound event (both can have no namespace). - if ( !event.namespace_re || event.namespace_re.test( handleObj.namespace ) ) { - - event.handleObj = handleObj; - event.data = handleObj.data; - - ret = ( (jQuery.event.special[ handleObj.origType ] || {}).handle || handleObj.handler ) - .apply( matched.elem, args ); - - if ( ret !== undefined ) { - if ( (event.result = ret) === false ) { - event.preventDefault(); - event.stopPropagation(); - } - } - } - } - } - - // Call the postDispatch hook for the mapped type - if ( special.postDispatch ) { - special.postDispatch.call( this, event ); - } - - return event.result; - }, - - handlers: function( event, handlers ) { - var sel, handleObj, matches, i, - handlerQueue = [], - delegateCount = handlers.delegateCount, - cur = event.target; - - // Find delegate handlers - // Black-hole SVG instance trees (#13180) - // Avoid non-left-click bubbling in Firefox (#3861) - if ( delegateCount && cur.nodeType && (!event.button || event.type !== "click") ) { - - /* jshint eqeqeq: false */ - for ( ; cur != this; cur = cur.parentNode || this ) { - /* jshint eqeqeq: true */ - - // Don't check non-elements (#13208) - // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) - if ( cur.nodeType === 1 && (cur.disabled !== true || event.type !== "click") ) { - matches = []; - for ( i = 0; i < delegateCount; i++ ) { - handleObj = handlers[ i ]; - - // Don't conflict with Object.prototype properties (#13203) - sel = handleObj.selector + " "; - - if ( matches[ sel ] === undefined ) { - matches[ sel ] = handleObj.needsContext ? - jQuery( sel, this ).index( cur ) >= 0 : - jQuery.find( sel, this, null, [ cur ] ).length; - } - if ( matches[ sel ] ) { - matches.push( handleObj ); - } - } - if ( matches.length ) { - handlerQueue.push({ elem: cur, handlers: matches }); - } - } - } - } - - // Add the remaining (directly-bound) handlers - if ( delegateCount < handlers.length ) { - handlerQueue.push({ elem: this, handlers: handlers.slice( delegateCount ) }); - } - - return handlerQueue; - }, - - fix: function( event ) { - if ( event[ jQuery.expando ] ) { - return event; - } - - // Create a writable copy of the event object and normalize some properties - var i, prop, copy, - type = event.type, - originalEvent = event, - fixHook = this.fixHooks[ type ]; - - if ( !fixHook ) { - this.fixHooks[ type ] = fixHook = - rmouseEvent.test( type ) ? this.mouseHooks : - rkeyEvent.test( type ) ? this.keyHooks : - {}; - } - copy = fixHook.props ? this.props.concat( fixHook.props ) : this.props; - - event = new jQuery.Event( originalEvent ); - - i = copy.length; - while ( i-- ) { - prop = copy[ i ]; - event[ prop ] = originalEvent[ prop ]; - } - - // Support: IE<9 - // Fix target property (#1925) - if ( !event.target ) { - event.target = originalEvent.srcElement || document; - } - - // Support: Chrome 23+, Safari? - // Target should not be a text node (#504, #13143) - if ( event.target.nodeType === 3 ) { - event.target = event.target.parentNode; - } - - // Support: IE<9 - // For mouse/key events, metaKey==false if it's undefined (#3368, #11328) - event.metaKey = !!event.metaKey; - - return fixHook.filter ? fixHook.filter( event, originalEvent ) : event; - }, - - // Includes some event props shared by KeyEvent and MouseEvent - props: "altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "), - - fixHooks: {}, - - keyHooks: { - props: "char charCode key keyCode".split(" "), - filter: function( event, original ) { - - // Add which for key events - if ( event.which == null ) { - event.which = original.charCode != null ? original.charCode : original.keyCode; - } - - return event; - } - }, - - mouseHooks: { - props: "button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "), - filter: function( event, original ) { - var body, eventDoc, doc, - button = original.button, - fromElement = original.fromElement; - - // Calculate pageX/Y if missing and clientX/Y available - if ( event.pageX == null && original.clientX != null ) { - eventDoc = event.target.ownerDocument || document; - doc = eventDoc.documentElement; - body = eventDoc.body; - - event.pageX = original.clientX + ( doc && doc.scrollLeft || body && body.scrollLeft || 0 ) - ( doc && doc.clientLeft || body && body.clientLeft || 0 ); - event.pageY = original.clientY + ( doc && doc.scrollTop || body && body.scrollTop || 0 ) - ( doc && doc.clientTop || body && body.clientTop || 0 ); - } - - // Add relatedTarget, if necessary - if ( !event.relatedTarget && fromElement ) { - event.relatedTarget = fromElement === event.target ? original.toElement : fromElement; - } - - // Add which for click: 1 === left; 2 === middle; 3 === right - // Note: button is not normalized, so don't use it - if ( !event.which && button !== undefined ) { - event.which = ( button & 1 ? 1 : ( button & 2 ? 3 : ( button & 4 ? 2 : 0 ) ) ); - } - - return event; - } - }, - - special: { - load: { - // Prevent triggered image.load events from bubbling to window.load - noBubble: true - }, - focus: { - // Fire native event if possible so blur/focus sequence is correct - trigger: function() { - if ( this !== safeActiveElement() && this.focus ) { - try { - this.focus(); - return false; - } catch ( e ) { - // Support: IE<9 - // If we error on focus to hidden element (#1486, #12518), - // let .trigger() run the handlers - } - } - }, - delegateType: "focusin" - }, - blur: { - trigger: function() { - if ( this === safeActiveElement() && this.blur ) { - this.blur(); - return false; - } - }, - delegateType: "focusout" - }, - click: { - // For checkbox, fire native event so checked state will be right - trigger: function() { - if ( jQuery.nodeName( this, "input" ) && this.type === "checkbox" && this.click ) { - this.click(); - return false; - } - }, - - // For cross-browser consistency, don't fire native .click() on links - _default: function( event ) { - return jQuery.nodeName( event.target, "a" ); - } - }, - - beforeunload: { - postDispatch: function( event ) { - - // Even when returnValue equals to undefined Firefox will still show alert - if ( event.result !== undefined ) { - event.originalEvent.returnValue = event.result; - } - } - } - }, - - simulate: function( type, elem, event, bubble ) { - // Piggyback on a donor event to simulate a different one. - // Fake originalEvent to avoid donor's stopPropagation, but if the - // simulated event prevents default then we do the same on the donor. + // Piggyback on a donor event to simulate a different one + // Used only for `focus(in | out)` events + simulate: function( type, elem, event ) { var e = jQuery.extend( new jQuery.Event(), event, { type: type, - isSimulated: true, - originalEvent: {} + isSimulated: true } ); - if ( bubble ) { - jQuery.event.trigger( e, null, elem ); - } else { - jQuery.event.dispatch.call( elem, e ); - } - if ( e.isDefaultPrevented() ) { - event.preventDefault(); - } - } -}; -jQuery.removeEvent = document.removeEventListener ? - function( elem, type, handle ) { - if ( elem.removeEventListener ) { - elem.removeEventListener( type, handle, false ); - } - } : - function( elem, type, handle ) { - var name = "on" + type; - - if ( elem.detachEvent ) { - - // #8545, #7054, preventing memory leaks for custom events in IE6-8 - // detachEvent needed property on element, by name of that event, to properly expose it to GC - if ( typeof elem[ name ] === core_strundefined ) { - elem[ name ] = null; - } - - elem.detachEvent( name, handle ); - } - }; - -jQuery.Event = function( src, props ) { - // Allow instantiation without the 'new' keyword - if ( !(this instanceof jQuery.Event) ) { - return new jQuery.Event( src, props ); + jQuery.event.trigger( e, null, elem ); } - // Event object - if ( src && src.type ) { - this.originalEvent = src; - this.type = src.type; +} ); - // Events bubbling up the document may have been marked as prevented - // by a handler lower down the tree; reflect the correct value. - this.isDefaultPrevented = ( src.defaultPrevented || src.returnValue === false || - src.getPreventDefault && src.getPreventDefault() ) ? returnTrue : returnFalse; - - // Event type - } else { - this.type = src; - } - - // Put explicitly provided properties onto the event object - if ( props ) { - jQuery.extend( this, props ); - } - - // Create a timestamp if incoming event doesn't have one - this.timeStamp = src && src.timeStamp || jQuery.now(); - - // Mark it as fixed - this[ jQuery.expando ] = true; -}; - -// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding -// http://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html -jQuery.Event.prototype = { - isDefaultPrevented: returnFalse, - isPropagationStopped: returnFalse, - isImmediatePropagationStopped: returnFalse, - - preventDefault: function() { - var e = this.originalEvent; - - this.isDefaultPrevented = returnTrue; - if ( !e ) { - return; - } - - // If preventDefault exists, run it on the original event - if ( e.preventDefault ) { - e.preventDefault(); - - // Support: IE - // Otherwise set the returnValue property of the original event to false - } else { - e.returnValue = false; - } - }, - stopPropagation: function() { - var e = this.originalEvent; - - this.isPropagationStopped = returnTrue; - if ( !e ) { - return; - } - // If stopPropagation exists, run it on the original event - if ( e.stopPropagation ) { - e.stopPropagation(); - } - - // Support: IE - // Set the cancelBubble property of the original event to true - e.cancelBubble = true; - }, - stopImmediatePropagation: function() { - this.isImmediatePropagationStopped = returnTrue; - this.stopPropagation(); - } -}; - -// Create mouseenter/leave events using mouseover/out and event-time checks -jQuery.each({ - mouseenter: "mouseover", - mouseleave: "mouseout" -}, function( orig, fix ) { - jQuery.event.special[ orig ] = { - delegateType: fix, - bindType: fix, - - handle: function( event ) { - var ret, - target = this, - related = event.relatedTarget, - handleObj = event.handleObj; - - // For mousenter/leave call the handler if related is outside the target. - // NB: No relatedTarget if the mouse left/entered the browser window - if ( !related || (related !== target && !jQuery.contains( target, related )) ) { - event.type = handleObj.origType; - ret = handleObj.handler.apply( this, arguments ); - event.type = fix; - } - return ret; - } - }; -}); - -// IE submit delegation -if ( !jQuery.support.submitBubbles ) { - - jQuery.event.special.submit = { - setup: function() { - // Only need this for delegated form submit events - if ( jQuery.nodeName( this, "form" ) ) { - return false; - } - - // Lazy-add a submit handler when a descendant form may potentially be submitted - jQuery.event.add( this, "click._submit keypress._submit", function( e ) { - // Node name check avoids a VML-related crash in IE (#9807) - var elem = e.target, - form = jQuery.nodeName( elem, "input" ) || jQuery.nodeName( elem, "button" ) ? elem.form : undefined; - if ( form && !jQuery._data( form, "submitBubbles" ) ) { - jQuery.event.add( form, "submit._submit", function( event ) { - event._submit_bubble = true; - }); - jQuery._data( form, "submitBubbles", true ); - } - }); - // return undefined since we don't need an event listener - }, - - postDispatch: function( event ) { - // If form was submitted by the user, bubble the event up the tree - if ( event._submit_bubble ) { - delete event._submit_bubble; - if ( this.parentNode && !event.isTrigger ) { - jQuery.event.simulate( "submit", this.parentNode, event, true ); - } - } - }, - - teardown: function() { - // Only need this for delegated form submit events - if ( jQuery.nodeName( this, "form" ) ) { - return false; - } - - // Remove delegated handlers; cleanData eventually reaps submit handlers attached above - jQuery.event.remove( this, "._submit" ); - } - }; -} - -// IE change delegation and checkbox/radio fix -if ( !jQuery.support.changeBubbles ) { - - jQuery.event.special.change = { - - setup: function() { - - if ( rformElems.test( this.nodeName ) ) { - // IE doesn't fire change on a check/radio until blur; trigger it on click - // after a propertychange. Eat the blur-change in special.change.handle. - // This still fires onchange a second time for check/radio after blur. - if ( this.type === "checkbox" || this.type === "radio" ) { - jQuery.event.add( this, "propertychange._change", function( event ) { - if ( event.originalEvent.propertyName === "checked" ) { - this._just_changed = true; - } - }); - jQuery.event.add( this, "click._change", function( event ) { - if ( this._just_changed && !event.isTrigger ) { - this._just_changed = false; - } - // Allow triggered, simulated change events (#11500) - jQuery.event.simulate( "change", this, event, true ); - }); - } - return false; - } - // Delegated event; lazy-add a change handler on descendant inputs - jQuery.event.add( this, "beforeactivate._change", function( e ) { - var elem = e.target; - - if ( rformElems.test( elem.nodeName ) && !jQuery._data( elem, "changeBubbles" ) ) { - jQuery.event.add( elem, "change._change", function( event ) { - if ( this.parentNode && !event.isSimulated && !event.isTrigger ) { - jQuery.event.simulate( "change", this.parentNode, event, true ); - } - }); - jQuery._data( elem, "changeBubbles", true ); - } - }); - }, - - handle: function( event ) { - var elem = event.target; - - // Swallow native change events from checkbox/radio, we already triggered them above - if ( this !== elem || event.isSimulated || event.isTrigger || (elem.type !== "radio" && elem.type !== "checkbox") ) { - return event.handleObj.handler.apply( this, arguments ); - } - }, - - teardown: function() { - jQuery.event.remove( this, "._change" ); - - return !rformElems.test( this.nodeName ); - } - }; -} - -// Create "bubbling" focus and blur events -if ( !jQuery.support.focusinBubbles ) { - jQuery.each({ focus: "focusin", blur: "focusout" }, function( orig, fix ) { - - // Attach a single capturing handler while someone wants focusin/focusout - var attaches = 0, - handler = function( event ) { - jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ), true ); - }; - - jQuery.event.special[ fix ] = { - setup: function() { - if ( attaches++ === 0 ) { - document.addEventListener( orig, handler, true ); - } - }, - teardown: function() { - if ( --attaches === 0 ) { - document.removeEventListener( orig, handler, true ); - } - } - }; - }); -} - -jQuery.fn.extend({ - - on: function( types, selector, data, fn, /*INTERNAL*/ one ) { - var type, origFn; - - // Types can be a map of types/handlers - if ( typeof types === "object" ) { - // ( types-Object, selector, data ) - if ( typeof selector !== "string" ) { - // ( types-Object, data ) - data = data || selector; - selector = undefined; - } - for ( type in types ) { - this.on( type, selector, data, types[ type ], one ); - } - return this; - } - - if ( data == null && fn == null ) { - // ( types, fn ) - fn = selector; - data = selector = undefined; - } else if ( fn == null ) { - if ( typeof selector === "string" ) { - // ( types, selector, fn ) - fn = data; - data = undefined; - } else { - // ( types, data, fn ) - fn = data; - data = selector; - selector = undefined; - } - } - if ( fn === false ) { - fn = returnFalse; - } else if ( !fn ) { - return this; - } - - if ( one === 1 ) { - origFn = fn; - fn = function( event ) { - // Can use an empty set, since event contains the info - jQuery().off( event ); - return origFn.apply( this, arguments ); - }; - // Use same guid so caller can remove using origFn - fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); - } - return this.each( function() { - jQuery.event.add( this, types, fn, data, selector ); - }); - }, - one: function( types, selector, data, fn ) { - return this.on( types, selector, data, fn, 1 ); - }, - off: function( types, selector, fn ) { - var handleObj, type; - if ( types && types.preventDefault && types.handleObj ) { - // ( event ) dispatched jQuery.Event - handleObj = types.handleObj; - jQuery( types.delegateTarget ).off( - handleObj.namespace ? handleObj.origType + "." + handleObj.namespace : handleObj.origType, - handleObj.selector, - handleObj.handler - ); - return this; - } - if ( typeof types === "object" ) { - // ( types-object [, selector] ) - for ( type in types ) { - this.off( type, selector, types[ type ] ); - } - return this; - } - if ( selector === false || typeof selector === "function" ) { - // ( types [, fn] ) - fn = selector; - selector = undefined; - } - if ( fn === false ) { - fn = returnFalse; - } - return this.each(function() { - jQuery.event.remove( this, types, fn, selector ); - }); - }, +jQuery.fn.extend( { trigger: function( type, data ) { - return this.each(function() { + return this.each( function() { jQuery.event.trigger( type, data, this ); - }); + } ); }, triggerHandler: function( type, data ) { - var elem = this[0]; + var elem = this[ 0 ]; if ( elem ) { return jQuery.event.trigger( type, data, elem, true ); } } -}); -var isSimple = /^.[^:#\[\.,]*$/, - rparentsprev = /^(?:parents|prev(?:Until|All))/, - rneedsContext = jQuery.expr.match.needsContext, - // methods guaranteed to produce a unique set when starting from a unique set - guaranteedUnique = { - children: true, - contents: true, - next: true, - prev: true - }; +} ); -jQuery.fn.extend({ - find: function( selector ) { - var i, - ret = [], - self = this, - len = self.length; - if ( typeof selector !== "string" ) { - return this.pushStack( jQuery( selector ).filter(function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( self[ i ], this ) ) { - return true; - } +// Support: Firefox <=44 +// Firefox doesn't have focus(in | out) events +// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 +// +// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 +// focus(in | out) events fire after focus & blur events, +// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order +// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 +if ( !support.focusin ) { + jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { + + // Attach a single capturing handler on the document while someone wants focusin/focusout + var handler = function( event ) { + jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); + }; + + jQuery.event.special[ fix ] = { + setup: function() { + var doc = this.ownerDocument || this, + attaches = dataPriv.access( doc, fix ); + + if ( !attaches ) { + doc.addEventListener( orig, handler, true ); } - }) ); - } + dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); + }, + teardown: function() { + var doc = this.ownerDocument || this, + attaches = dataPriv.access( doc, fix ) - 1; - for ( i = 0; i < len; i++ ) { - jQuery.find( selector, self[ i ], ret ); - } + if ( !attaches ) { + doc.removeEventListener( orig, handler, true ); + dataPriv.remove( doc, fix ); - // Needed because $( selector, context ) becomes $( context ).find( selector ) - ret = this.pushStack( len > 1 ? jQuery.unique( ret ) : ret ); - ret.selector = this.selector ? this.selector + " " + selector : selector; - return ret; - }, - - has: function( target ) { - var i, - targets = jQuery( target, this ), - len = targets.length; - - return this.filter(function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( this, targets[i] ) ) { - return true; - } - } - }); - }, - - not: function( selector ) { - return this.pushStack( winnow(this, selector || [], true) ); - }, - - filter: function( selector ) { - return this.pushStack( winnow(this, selector || [], false) ); - }, - - is: function( selector ) { - return !!winnow( - this, - - // If this is a positional/relative selector, check membership in the returned set - // so $("p:first").is("p:last") won't return true for a doc with two "p". - typeof selector === "string" && rneedsContext.test( selector ) ? - jQuery( selector ) : - selector || [], - false - ).length; - }, - - closest: function( selectors, context ) { - var cur, - i = 0, - l = this.length, - ret = [], - pos = rneedsContext.test( selectors ) || typeof selectors !== "string" ? - jQuery( selectors, context || this.context ) : - 0; - - for ( ; i < l; i++ ) { - for ( cur = this[i]; cur && cur !== context; cur = cur.parentNode ) { - // Always skip document fragments - if ( cur.nodeType < 11 && (pos ? - pos.index(cur) > -1 : - - // Don't pass non-elements to Sizzle - cur.nodeType === 1 && - jQuery.find.matchesSelector(cur, selectors)) ) { - - cur = ret.push( cur ); - break; - } - } - } - - return this.pushStack( ret.length > 1 ? jQuery.unique( ret ) : ret ); - }, - - // Determine the position of an element within - // the matched set of elements - index: function( elem ) { - - // No argument, return index in parent - if ( !elem ) { - return ( this[0] && this[0].parentNode ) ? this.first().prevAll().length : -1; - } - - // index in selector - if ( typeof elem === "string" ) { - return jQuery.inArray( this[0], jQuery( elem ) ); - } - - // Locate the position of the desired element - return jQuery.inArray( - // If it receives a jQuery object, the first element is used - elem.jquery ? elem[0] : elem, this ); - }, - - add: function( selector, context ) { - var set = typeof selector === "string" ? - jQuery( selector, context ) : - jQuery.makeArray( selector && selector.nodeType ? [ selector ] : selector ), - all = jQuery.merge( this.get(), set ); - - return this.pushStack( jQuery.unique(all) ); - }, - - addBack: function( selector ) { - return this.add( selector == null ? - this.prevObject : this.prevObject.filter(selector) - ); - } -}); - -function sibling( cur, dir ) { - do { - cur = cur[ dir ]; - } while ( cur && cur.nodeType !== 1 ); - - return cur; -} - -jQuery.each({ - parent: function( elem ) { - var parent = elem.parentNode; - return parent && parent.nodeType !== 11 ? parent : null; - }, - parents: function( elem ) { - return jQuery.dir( elem, "parentNode" ); - }, - parentsUntil: function( elem, i, until ) { - return jQuery.dir( elem, "parentNode", until ); - }, - next: function( elem ) { - return sibling( elem, "nextSibling" ); - }, - prev: function( elem ) { - return sibling( elem, "previousSibling" ); - }, - nextAll: function( elem ) { - return jQuery.dir( elem, "nextSibling" ); - }, - prevAll: function( elem ) { - return jQuery.dir( elem, "previousSibling" ); - }, - nextUntil: function( elem, i, until ) { - return jQuery.dir( elem, "nextSibling", until ); - }, - prevUntil: function( elem, i, until ) { - return jQuery.dir( elem, "previousSibling", until ); - }, - siblings: function( elem ) { - return jQuery.sibling( ( elem.parentNode || {} ).firstChild, elem ); - }, - children: function( elem ) { - return jQuery.sibling( elem.firstChild ); - }, - contents: function( elem ) { - return jQuery.nodeName( elem, "iframe" ) ? - elem.contentDocument || elem.contentWindow.document : - jQuery.merge( [], elem.childNodes ); - } -}, function( name, fn ) { - jQuery.fn[ name ] = function( until, selector ) { - var ret = jQuery.map( this, fn, until ); - - if ( name.slice( -5 ) !== "Until" ) { - selector = until; - } - - if ( selector && typeof selector === "string" ) { - ret = jQuery.filter( selector, ret ); - } - - if ( this.length > 1 ) { - // Remove duplicates - if ( !guaranteedUnique[ name ] ) { - ret = jQuery.unique( ret ); - } - - // Reverse order for parents* and prev-derivatives - if ( rparentsprev.test( name ) ) { - ret = ret.reverse(); - } - } - - return this.pushStack( ret ); - }; -}); - -jQuery.extend({ - filter: function( expr, elems, not ) { - var elem = elems[ 0 ]; - - if ( not ) { - expr = ":not(" + expr + ")"; - } - - return elems.length === 1 && elem.nodeType === 1 ? - jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : [] : - jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { - return elem.nodeType === 1; - })); - }, - - dir: function( elem, dir, until ) { - var matched = [], - cur = elem[ dir ]; - - while ( cur && cur.nodeType !== 9 && (until === undefined || cur.nodeType !== 1 || !jQuery( cur ).is( until )) ) { - if ( cur.nodeType === 1 ) { - matched.push( cur ); - } - cur = cur[dir]; - } - return matched; - }, - - sibling: function( n, elem ) { - var r = []; - - for ( ; n; n = n.nextSibling ) { - if ( n.nodeType === 1 && n !== elem ) { - r.push( n ); - } - } - - return r; - } -}); - -// Implement the identical functionality for filter and not -function winnow( elements, qualifier, not ) { - if ( jQuery.isFunction( qualifier ) ) { - return jQuery.grep( elements, function( elem, i ) { - /* jshint -W018 */ - return !!qualifier.call( elem, i, elem ) !== not; - }); - - } - - if ( qualifier.nodeType ) { - return jQuery.grep( elements, function( elem ) { - return ( elem === qualifier ) !== not; - }); - - } - - if ( typeof qualifier === "string" ) { - if ( isSimple.test( qualifier ) ) { - return jQuery.filter( qualifier, elements, not ); - } - - qualifier = jQuery.filter( qualifier, elements ); - } - - return jQuery.grep( elements, function( elem ) { - return ( jQuery.inArray( elem, qualifier ) >= 0 ) !== not; - }); -} -function createSafeFragment( document ) { - var list = nodeNames.split( "|" ), - safeFrag = document.createDocumentFragment(); - - if ( safeFrag.createElement ) { - while ( list.length ) { - safeFrag.createElement( - list.pop() - ); - } - } - return safeFrag; -} - -var nodeNames = "abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|" + - "header|hgroup|mark|meter|nav|output|progress|section|summary|time|video", - rinlinejQuery = / jQuery\d+="(?:null|\d+)"/g, - rnoshimcache = new RegExp("<(?:" + nodeNames + ")[\\s/>]", "i"), - rleadingWhitespace = /^\s+/, - rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi, - rtagName = /<([\w:]+)/, - rtbody = /\s*$/g, - - // We have to close these tags to support XHTML (#13200) - wrapMap = { - option: [ 1, "" ], - legend: [ 1, "
", "
" ], - area: [ 1, "", "" ], - param: [ 1, "", "" ], - thead: [ 1, "", "
" ], - tr: [ 2, "", "
" ], - col: [ 2, "", "
" ], - td: [ 3, "", "
" ], - - // IE6-8 can't serialize link, script, style, or any html5 (NoScope) tags, - // unless wrapped in a div with non-breaking characters in front of it. - _default: jQuery.support.htmlSerialize ? [ 0, "", "" ] : [ 1, "X
", "
" ] - }, - safeFragment = createSafeFragment( document ), - fragmentDiv = safeFragment.appendChild( document.createElement("div") ); - -wrapMap.optgroup = wrapMap.option; -wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; -wrapMap.th = wrapMap.td; - -jQuery.fn.extend({ - text: function( value ) { - return jQuery.access( this, function( value ) { - return value === undefined ? - jQuery.text( this ) : - this.empty().append( ( this[0] && this[0].ownerDocument || document ).createTextNode( value ) ); - }, null, value, arguments.length ); - }, - - append: function() { - return this.domManip( arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.appendChild( elem ); - } - }); - }, - - prepend: function() { - return this.domManip( arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.insertBefore( elem, target.firstChild ); - } - }); - }, - - before: function() { - return this.domManip( arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this ); - } - }); - }, - - after: function() { - return this.domManip( arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this.nextSibling ); - } - }); - }, - - // keepData is for internal use only--do not document - remove: function( selector, keepData ) { - var elem, - elems = selector ? jQuery.filter( selector, this ) : this, - i = 0; - - for ( ; (elem = elems[i]) != null; i++ ) { - - if ( !keepData && elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem ) ); - } - - if ( elem.parentNode ) { - if ( keepData && jQuery.contains( elem.ownerDocument, elem ) ) { - setGlobalEval( getAll( elem, "script" ) ); - } - elem.parentNode.removeChild( elem ); - } - } - - return this; - }, - - empty: function() { - var elem, - i = 0; - - for ( ; (elem = this[i]) != null; i++ ) { - // Remove element nodes and prevent memory leaks - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - } - - // Remove any remaining nodes - while ( elem.firstChild ) { - elem.removeChild( elem.firstChild ); - } - - // If this is a select, ensure that it displays empty (#12336) - // Support: IE<9 - if ( elem.options && jQuery.nodeName( elem, "select" ) ) { - elem.options.length = 0; - } - } - - return this; - }, - - clone: function( dataAndEvents, deepDataAndEvents ) { - dataAndEvents = dataAndEvents == null ? false : dataAndEvents; - deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; - - return this.map( function () { - return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); - }); - }, - - html: function( value ) { - return jQuery.access( this, function( value ) { - var elem = this[0] || {}, - i = 0, - l = this.length; - - if ( value === undefined ) { - return elem.nodeType === 1 ? - elem.innerHTML.replace( rinlinejQuery, "" ) : - undefined; - } - - // See if we can take a shortcut and just use innerHTML - if ( typeof value === "string" && !rnoInnerhtml.test( value ) && - ( jQuery.support.htmlSerialize || !rnoshimcache.test( value ) ) && - ( jQuery.support.leadingWhitespace || !rleadingWhitespace.test( value ) ) && - !wrapMap[ ( rtagName.exec( value ) || ["", ""] )[1].toLowerCase() ] ) { - - value = value.replace( rxhtmlTag, "<$1>" ); - - try { - for (; i < l; i++ ) { - // Remove element nodes and prevent memory leaks - elem = this[i] || {}; - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - elem.innerHTML = value; - } - } - - elem = 0; - - // If using innerHTML throws an exception, use the fallback method - } catch(e) {} - } - - if ( elem ) { - this.empty().append( value ); - } - }, null, value, arguments.length ); - }, - - replaceWith: function() { - var - // Snapshot the DOM in case .domManip sweeps something relevant into its fragment - args = jQuery.map( this, function( elem ) { - return [ elem.nextSibling, elem.parentNode ]; - }), - i = 0; - - // Make the changes, replacing each context element with the new content - this.domManip( arguments, function( elem ) { - var next = args[ i++ ], - parent = args[ i++ ]; - - if ( parent ) { - // Don't use the snapshot next if it has moved (#13810) - if ( next && next.parentNode !== parent ) { - next = this.nextSibling; - } - jQuery( this ).remove(); - parent.insertBefore( elem, next ); - } - // Allow new content to include elements from the context set - }, true ); - - // Force removal if there was no new content (e.g., from empty arguments) - return i ? this : this.remove(); - }, - - detach: function( selector ) { - return this.remove( selector, true ); - }, - - domManip: function( args, callback, allowIntersection ) { - - // Flatten any nested arrays - args = core_concat.apply( [], args ); - - var first, node, hasScripts, - scripts, doc, fragment, - i = 0, - l = this.length, - set = this, - iNoClone = l - 1, - value = args[0], - isFunction = jQuery.isFunction( value ); - - // We can't cloneNode fragments that contain checked, in WebKit - if ( isFunction || !( l <= 1 || typeof value !== "string" || jQuery.support.checkClone || !rchecked.test( value ) ) ) { - return this.each(function( index ) { - var self = set.eq( index ); - if ( isFunction ) { - args[0] = value.call( this, index, self.html() ); - } - self.domManip( args, callback, allowIntersection ); - }); - } - - if ( l ) { - fragment = jQuery.buildFragment( args, this[ 0 ].ownerDocument, false, !allowIntersection && this ); - first = fragment.firstChild; - - if ( fragment.childNodes.length === 1 ) { - fragment = first; - } - - if ( first ) { - scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); - hasScripts = scripts.length; - - // Use the original fragment for the last item instead of the first because it can end up - // being emptied incorrectly in certain situations (#8070). - for ( ; i < l; i++ ) { - node = fragment; - - if ( i !== iNoClone ) { - node = jQuery.clone( node, true, true ); - - // Keep references to cloned scripts for later restoration - if ( hasScripts ) { - jQuery.merge( scripts, getAll( node, "script" ) ); - } - } - - callback.call( this[i], node, i ); - } - - if ( hasScripts ) { - doc = scripts[ scripts.length - 1 ].ownerDocument; - - // Reenable scripts - jQuery.map( scripts, restoreScript ); - - // Evaluate executable scripts on first document insertion - for ( i = 0; i < hasScripts; i++ ) { - node = scripts[ i ]; - if ( rscriptType.test( node.type || "" ) && - !jQuery._data( node, "globalEval" ) && jQuery.contains( doc, node ) ) { - - if ( node.src ) { - // Hope ajax is available... - jQuery._evalUrl( node.src ); - } else { - jQuery.globalEval( ( node.text || node.textContent || node.innerHTML || "" ).replace( rcleanScript, "" ) ); - } - } - } - } - - // Fix #11809: Avoid leaking memory - fragment = first = null; - } - } - - return this; - } -}); - -// Support: IE<8 -// Manipulating tables requires a tbody -function manipulationTarget( elem, content ) { - return jQuery.nodeName( elem, "table" ) && - jQuery.nodeName( content.nodeType === 1 ? content : content.firstChild, "tr" ) ? - - elem.getElementsByTagName("tbody")[0] || - elem.appendChild( elem.ownerDocument.createElement("tbody") ) : - elem; -} - -// Replace/restore the type attribute of script elements for safe DOM manipulation -function disableScript( elem ) { - elem.type = (jQuery.find.attr( elem, "type" ) !== null) + "/" + elem.type; - return elem; -} -function restoreScript( elem ) { - var match = rscriptTypeMasked.exec( elem.type ); - if ( match ) { - elem.type = match[1]; - } else { - elem.removeAttribute("type"); - } - return elem; -} - -// Mark scripts as having already been evaluated -function setGlobalEval( elems, refElements ) { - var elem, - i = 0; - for ( ; (elem = elems[i]) != null; i++ ) { - jQuery._data( elem, "globalEval", !refElements || jQuery._data( refElements[i], "globalEval" ) ); - } -} - -function cloneCopyEvent( src, dest ) { - - if ( dest.nodeType !== 1 || !jQuery.hasData( src ) ) { - return; - } - - var type, i, l, - oldData = jQuery._data( src ), - curData = jQuery._data( dest, oldData ), - events = oldData.events; - - if ( events ) { - delete curData.handle; - curData.events = {}; - - for ( type in events ) { - for ( i = 0, l = events[ type ].length; i < l; i++ ) { - jQuery.event.add( dest, type, events[ type ][ i ] ); - } - } - } - - // make the cloned public data object a copy from the original - if ( curData.data ) { - curData.data = jQuery.extend( {}, curData.data ); - } -} - -function fixCloneNodeIssues( src, dest ) { - var nodeName, e, data; - - // We do not need to do anything for non-Elements - if ( dest.nodeType !== 1 ) { - return; - } - - nodeName = dest.nodeName.toLowerCase(); - - // IE6-8 copies events bound via attachEvent when using cloneNode. - if ( !jQuery.support.noCloneEvent && dest[ jQuery.expando ] ) { - data = jQuery._data( dest ); - - for ( e in data.events ) { - jQuery.removeEvent( dest, e, data.handle ); - } - - // Event data gets referenced instead of copied if the expando gets copied too - dest.removeAttribute( jQuery.expando ); - } - - // IE blanks contents when cloning scripts, and tries to evaluate newly-set text - if ( nodeName === "script" && dest.text !== src.text ) { - disableScript( dest ).text = src.text; - restoreScript( dest ); - - // IE6-10 improperly clones children of object elements using classid. - // IE10 throws NoModificationAllowedError if parent is null, #12132. - } else if ( nodeName === "object" ) { - if ( dest.parentNode ) { - dest.outerHTML = src.outerHTML; - } - - // This path appears unavoidable for IE9. When cloning an object - // element in IE9, the outerHTML strategy above is not sufficient. - // If the src has innerHTML and the destination does not, - // copy the src.innerHTML into the dest.innerHTML. #10324 - if ( jQuery.support.html5Clone && ( src.innerHTML && !jQuery.trim(dest.innerHTML) ) ) { - dest.innerHTML = src.innerHTML; - } - - } else if ( nodeName === "input" && manipulation_rcheckableType.test( src.type ) ) { - // IE6-8 fails to persist the checked state of a cloned checkbox - // or radio button. Worse, IE6-7 fail to give the cloned element - // a checked appearance if the defaultChecked value isn't also set - - dest.defaultChecked = dest.checked = src.checked; - - // IE6-7 get confused and end up setting the value of a cloned - // checkbox/radio button to an empty string instead of "on" - if ( dest.value !== src.value ) { - dest.value = src.value; - } - - // IE6-8 fails to return the selected option to the default selected - // state when cloning options - } else if ( nodeName === "option" ) { - dest.defaultSelected = dest.selected = src.defaultSelected; - - // IE6-8 fails to set the defaultValue to the correct value when - // cloning other types of input fields - } else if ( nodeName === "input" || nodeName === "textarea" ) { - dest.defaultValue = src.defaultValue; - } -} - -jQuery.each({ - appendTo: "append", - prependTo: "prepend", - insertBefore: "before", - insertAfter: "after", - replaceAll: "replaceWith" -}, function( name, original ) { - jQuery.fn[ name ] = function( selector ) { - var elems, - i = 0, - ret = [], - insert = jQuery( selector ), - last = insert.length - 1; - - for ( ; i <= last; i++ ) { - elems = i === last ? this : this.clone(true); - jQuery( insert[i] )[ original ]( elems ); - - // Modern browsers can apply jQuery collections as arrays, but oldIE needs a .get() - core_push.apply( ret, elems.get() ); - } - - return this.pushStack( ret ); - }; -}); - -function getAll( context, tag ) { - var elems, elem, - i = 0, - found = typeof context.getElementsByTagName !== core_strundefined ? context.getElementsByTagName( tag || "*" ) : - typeof context.querySelectorAll !== core_strundefined ? context.querySelectorAll( tag || "*" ) : - undefined; - - if ( !found ) { - for ( found = [], elems = context.childNodes || context; (elem = elems[i]) != null; i++ ) { - if ( !tag || jQuery.nodeName( elem, tag ) ) { - found.push( elem ); - } else { - jQuery.merge( found, getAll( elem, tag ) ); - } - } - } - - return tag === undefined || tag && jQuery.nodeName( context, tag ) ? - jQuery.merge( [ context ], found ) : - found; -} - -// Used in buildFragment, fixes the defaultChecked property -function fixDefaultChecked( elem ) { - if ( manipulation_rcheckableType.test( elem.type ) ) { - elem.defaultChecked = elem.checked; - } -} - -jQuery.extend({ - clone: function( elem, dataAndEvents, deepDataAndEvents ) { - var destElements, node, clone, i, srcElements, - inPage = jQuery.contains( elem.ownerDocument, elem ); - - if ( jQuery.support.html5Clone || jQuery.isXMLDoc(elem) || !rnoshimcache.test( "<" + elem.nodeName + ">" ) ) { - clone = elem.cloneNode( true ); - - // IE<=8 does not properly clone detached, unknown element nodes - } else { - fragmentDiv.innerHTML = elem.outerHTML; - fragmentDiv.removeChild( clone = fragmentDiv.firstChild ); - } - - if ( (!jQuery.support.noCloneEvent || !jQuery.support.noCloneChecked) && - (elem.nodeType === 1 || elem.nodeType === 11) && !jQuery.isXMLDoc(elem) ) { - - // We eschew Sizzle here for performance reasons: http://jsperf.com/getall-vs-sizzle/2 - destElements = getAll( clone ); - srcElements = getAll( elem ); - - // Fix all IE cloning issues - for ( i = 0; (node = srcElements[i]) != null; ++i ) { - // Ensure that the destination node is not null; Fixes #9587 - if ( destElements[i] ) { - fixCloneNodeIssues( node, destElements[i] ); - } - } - } - - // Copy the events from the original to the clone - if ( dataAndEvents ) { - if ( deepDataAndEvents ) { - srcElements = srcElements || getAll( elem ); - destElements = destElements || getAll( clone ); - - for ( i = 0; (node = srcElements[i]) != null; i++ ) { - cloneCopyEvent( node, destElements[i] ); - } - } else { - cloneCopyEvent( elem, clone ); - } - } - - // Preserve script evaluation history - destElements = getAll( clone, "script" ); - if ( destElements.length > 0 ) { - setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); - } - - destElements = srcElements = node = null; - - // Return the cloned set - return clone; - }, - - buildFragment: function( elems, context, scripts, selection ) { - var j, elem, contains, - tmp, tag, tbody, wrap, - l = elems.length, - - // Ensure a safe fragment - safe = createSafeFragment( context ), - - nodes = [], - i = 0; - - for ( ; i < l; i++ ) { - elem = elems[ i ]; - - if ( elem || elem === 0 ) { - - // Add nodes directly - if ( jQuery.type( elem ) === "object" ) { - jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); - - // Convert non-html into a text node - } else if ( !rhtml.test( elem ) ) { - nodes.push( context.createTextNode( elem ) ); - - // Convert html into DOM nodes } else { - tmp = tmp || safe.appendChild( context.createElement("div") ); - - // Deserialize a standard representation - tag = ( rtagName.exec( elem ) || ["", ""] )[1].toLowerCase(); - wrap = wrapMap[ tag ] || wrapMap._default; - - tmp.innerHTML = wrap[1] + elem.replace( rxhtmlTag, "<$1>" ) + wrap[2]; - - // Descend through wrappers to the right content - j = wrap[0]; - while ( j-- ) { - tmp = tmp.lastChild; - } - - // Manually add leading whitespace removed by IE - if ( !jQuery.support.leadingWhitespace && rleadingWhitespace.test( elem ) ) { - nodes.push( context.createTextNode( rleadingWhitespace.exec( elem )[0] ) ); - } - - // Remove IE's autoinserted from table fragments - if ( !jQuery.support.tbody ) { - - // String was a , *may* have spurious - elem = tag === "table" && !rtbody.test( elem ) ? - tmp.firstChild : - - // String was a bare or - wrap[1] === "
" && !rtbody.test( elem ) ? - tmp : - 0; - - j = elem && elem.childNodes.length; - while ( j-- ) { - if ( jQuery.nodeName( (tbody = elem.childNodes[j]), "tbody" ) && !tbody.childNodes.length ) { - elem.removeChild( tbody ); - } - } - } - - jQuery.merge( nodes, tmp.childNodes ); - - // Fix #12392 for WebKit and IE > 9 - tmp.textContent = ""; - - // Fix #12392 for oldIE - while ( tmp.firstChild ) { - tmp.removeChild( tmp.firstChild ); - } - - // Remember the top-level container for proper cleanup - tmp = safe.lastChild; - } - } - } - - // Fix #11356: Clear elements from fragment - if ( tmp ) { - safe.removeChild( tmp ); - } - - // Reset defaultChecked for any radios and checkboxes - // about to be appended to the DOM in IE 6/7 (#8060) - if ( !jQuery.support.appendChecked ) { - jQuery.grep( getAll( nodes, "input" ), fixDefaultChecked ); - } - - i = 0; - while ( (elem = nodes[ i++ ]) ) { - - // #4087 - If origin and destination elements are the same, and this is - // that element, do not do anything - if ( selection && jQuery.inArray( elem, selection ) !== -1 ) { - continue; - } - - contains = jQuery.contains( elem.ownerDocument, elem ); - - // Append to fragment - tmp = getAll( safe.appendChild( elem ), "script" ); - - // Preserve script evaluation history - if ( contains ) { - setGlobalEval( tmp ); - } - - // Capture executables - if ( scripts ) { - j = 0; - while ( (elem = tmp[ j++ ]) ) { - if ( rscriptType.test( elem.type || "" ) ) { - scripts.push( elem ); - } - } - } - } - - tmp = null; - - return safe; - }, - - cleanData: function( elems, /* internal */ acceptData ) { - var elem, type, id, data, - i = 0, - internalKey = jQuery.expando, - cache = jQuery.cache, - deleteExpando = jQuery.support.deleteExpando, - special = jQuery.event.special; - - for ( ; (elem = elems[i]) != null; i++ ) { - - if ( acceptData || jQuery.acceptData( elem ) ) { - - id = elem[ internalKey ]; - data = id && cache[ id ]; - - if ( data ) { - if ( data.events ) { - for ( type in data.events ) { - if ( special[ type ] ) { - jQuery.event.remove( elem, type ); - - // This is a shortcut to avoid jQuery.event.remove's overhead - } else { - jQuery.removeEvent( elem, type, data.handle ); - } - } - } - - // Remove cache only if it was not already removed by jQuery.event.remove - if ( cache[ id ] ) { - - delete cache[ id ]; - - // IE does not allow us to delete expando properties from nodes, - // nor does it have a removeAttribute function on Document nodes; - // we must handle all of these cases - if ( deleteExpando ) { - delete elem[ internalKey ]; - - } else if ( typeof elem.removeAttribute !== core_strundefined ) { - elem.removeAttribute( internalKey ); - - } else { - elem[ internalKey ] = null; - } - - core_deletedIds.push( id ); - } - } - } - } - }, - - _evalUrl: function( url ) { - return jQuery.ajax({ - url: url, - type: "GET", - dataType: "script", - async: false, - global: false, - "throws": true - }); - } -}); -jQuery.fn.extend({ - wrapAll: function( html ) { - if ( jQuery.isFunction( html ) ) { - return this.each(function(i) { - jQuery(this).wrapAll( html.call(this, i) ); - }); - } - - if ( this[0] ) { - // The elements to wrap the target around - var wrap = jQuery( html, this[0].ownerDocument ).eq(0).clone(true); - - if ( this[0].parentNode ) { - wrap.insertBefore( this[0] ); - } - - wrap.map(function() { - var elem = this; - - while ( elem.firstChild && elem.firstChild.nodeType === 1 ) { - elem = elem.firstChild; - } - - return elem; - }).append( this ); - } - - return this; - }, - - wrapInner: function( html ) { - if ( jQuery.isFunction( html ) ) { - return this.each(function(i) { - jQuery(this).wrapInner( html.call(this, i) ); - }); - } - - return this.each(function() { - var self = jQuery( this ), - contents = self.contents(); - - if ( contents.length ) { - contents.wrapAll( html ); - - } else { - self.append( html ); - } - }); - }, - - wrap: function( html ) { - var isFunction = jQuery.isFunction( html ); - - return this.each(function(i) { - jQuery( this ).wrapAll( isFunction ? html.call(this, i) : html ); - }); - }, - - unwrap: function() { - return this.parent().each(function() { - if ( !jQuery.nodeName( this, "body" ) ) { - jQuery( this ).replaceWith( this.childNodes ); - } - }).end(); - } -}); -var iframe, getStyles, curCSS, - ralpha = /alpha\([^)]*\)/i, - ropacity = /opacity\s*=\s*([^)]*)/, - rposition = /^(top|right|bottom|left)$/, - // swappable if display is none or starts with table except "table", "table-cell", or "table-caption" - // see here for display values: https://developer.mozilla.org/en-US/docs/CSS/display - rdisplayswap = /^(none|table(?!-c[ea]).+)/, - rmargin = /^margin/, - rnumsplit = new RegExp( "^(" + core_pnum + ")(.*)$", "i" ), - rnumnonpx = new RegExp( "^(" + core_pnum + ")(?!px)[a-z%]+$", "i" ), - rrelNum = new RegExp( "^([+-])=(" + core_pnum + ")", "i" ), - elemdisplay = { BODY: "block" }, - - cssShow = { position: "absolute", visibility: "hidden", display: "block" }, - cssNormalTransform = { - letterSpacing: 0, - fontWeight: 400 - }, - - cssExpand = [ "Top", "Right", "Bottom", "Left" ], - cssPrefixes = [ "Webkit", "O", "Moz", "ms" ]; - -// return a css property mapped to a potentially vendor prefixed property -function vendorPropName( style, name ) { - - // shortcut for names that are not vendor prefixed - if ( name in style ) { - return name; - } - - // check for vendor prefixed names - var capName = name.charAt(0).toUpperCase() + name.slice(1), - origName = name, - i = cssPrefixes.length; - - while ( i-- ) { - name = cssPrefixes[ i ] + capName; - if ( name in style ) { - return name; - } - } - - return origName; -} - -function isHidden( elem, el ) { - // isHidden might be called from jQuery#filter function; - // in that case, element will be second argument - elem = el || elem; - return jQuery.css( elem, "display" ) === "none" || !jQuery.contains( elem.ownerDocument, elem ); -} - -function showHide( elements, show ) { - var display, elem, hidden, - values = [], - index = 0, - length = elements.length; - - for ( ; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - - values[ index ] = jQuery._data( elem, "olddisplay" ); - display = elem.style.display; - if ( show ) { - // Reset the inline display of this element to learn if it is - // being hidden by cascaded rules or not - if ( !values[ index ] && display === "none" ) { - elem.style.display = ""; - } - - // Set elements which have been overridden with display: none - // in a stylesheet to whatever the default browser style is - // for such an element - if ( elem.style.display === "" && isHidden( elem ) ) { - values[ index ] = jQuery._data( elem, "olddisplay", css_defaultDisplay(elem.nodeName) ); - } - } else { - - if ( !values[ index ] ) { - hidden = isHidden( elem ); - - if ( display && display !== "none" || !hidden ) { - jQuery._data( elem, "olddisplay", hidden ? display : jQuery.css( elem, "display" ) ); - } - } - } - } - - // Set the display of most of the elements in a second loop - // to avoid the constant reflow - for ( index = 0; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - if ( !show || elem.style.display === "none" || elem.style.display === "" ) { - elem.style.display = show ? values[ index ] || "" : "none"; - } - } - - return elements; -} - -jQuery.fn.extend({ - css: function( name, value ) { - return jQuery.access( this, function( elem, name, value ) { - var len, styles, - map = {}, - i = 0; - - if ( jQuery.isArray( name ) ) { - styles = getStyles( elem ); - len = name.length; - - for ( ; i < len; i++ ) { - map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); - } - - return map; - } - - return value !== undefined ? - jQuery.style( elem, name, value ) : - jQuery.css( elem, name ); - }, name, value, arguments.length > 1 ); - }, - show: function() { - return showHide( this, true ); - }, - hide: function() { - return showHide( this ); - }, - toggle: function( state ) { - if ( typeof state === "boolean" ) { - return state ? this.show() : this.hide(); - } - - return this.each(function() { - if ( isHidden( this ) ) { - jQuery( this ).show(); - } else { - jQuery( this ).hide(); - } - }); - } -}); - -jQuery.extend({ - // Add in style property hooks for overriding the default - // behavior of getting and setting a style property - cssHooks: { - opacity: { - get: function( elem, computed ) { - if ( computed ) { - // We should always get a number back from opacity - var ret = curCSS( elem, "opacity" ); - return ret === "" ? "1" : ret; - } - } - } - }, - - // Don't automatically add "px" to these possibly-unitless properties - cssNumber: { - "columnCount": true, - "fillOpacity": true, - "fontWeight": true, - "lineHeight": true, - "opacity": true, - "order": true, - "orphans": true, - "widows": true, - "zIndex": true, - "zoom": true - }, - - // Add in properties whose names you wish to fix before - // setting or getting the value - cssProps: { - // normalize float css property - "float": jQuery.support.cssFloat ? "cssFloat" : "styleFloat" - }, - - // Get and set the style property on a DOM Node - style: function( elem, name, value, extra ) { - // Don't set styles on text and comment nodes - if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { - return; - } - - // Make sure that we're working with the right name - var ret, type, hooks, - origName = jQuery.camelCase( name ), - style = elem.style; - - name = jQuery.cssProps[ origName ] || ( jQuery.cssProps[ origName ] = vendorPropName( style, origName ) ); - - // gets hook for the prefixed version - // followed by the unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // Check if we're setting a value - if ( value !== undefined ) { - type = typeof value; - - // convert relative number strings (+= or -=) to relative numbers. #7345 - if ( type === "string" && (ret = rrelNum.exec( value )) ) { - value = ( ret[1] + 1 ) * ret[2] + parseFloat( jQuery.css( elem, name ) ); - // Fixes bug #9237 - type = "number"; - } - - // Make sure that NaN and null values aren't set. See: #7116 - if ( value == null || type === "number" && isNaN( value ) ) { - return; - } - - // If a number was passed in, add 'px' to the (except for certain CSS properties) - if ( type === "number" && !jQuery.cssNumber[ origName ] ) { - value += "px"; - } - - // Fixes #8908, it can be done more correctly by specifing setters in cssHooks, - // but it would mean to define eight (for every problematic property) identical functions - if ( !jQuery.support.clearCloneStyle && value === "" && name.indexOf("background") === 0 ) { - style[ name ] = "inherit"; - } - - // If a hook was provided, use that value, otherwise just set the specified value - if ( !hooks || !("set" in hooks) || (value = hooks.set( elem, value, extra )) !== undefined ) { - - // Wrapped to prevent IE from throwing errors when 'invalid' values are provided - // Fixes bug #5509 - try { - style[ name ] = value; - } catch(e) {} - } - - } else { - // If a hook was provided get the non-computed value from there - if ( hooks && "get" in hooks && (ret = hooks.get( elem, false, extra )) !== undefined ) { - return ret; - } - - // Otherwise just get the value from the style object - return style[ name ]; - } - }, - - css: function( elem, name, extra, styles ) { - var num, val, hooks, - origName = jQuery.camelCase( name ); - - // Make sure that we're working with the right name - name = jQuery.cssProps[ origName ] || ( jQuery.cssProps[ origName ] = vendorPropName( elem.style, origName ) ); - - // gets hook for the prefixed version - // followed by the unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // If a hook was provided get the computed value from there - if ( hooks && "get" in hooks ) { - val = hooks.get( elem, true, extra ); - } - - // Otherwise, if a way to get the computed value exists, use that - if ( val === undefined ) { - val = curCSS( elem, name, styles ); - } - - //convert "normal" to computed value - if ( val === "normal" && name in cssNormalTransform ) { - val = cssNormalTransform[ name ]; - } - - // Return, converting to number if forced or a qualifier was provided and val looks numeric - if ( extra === "" || extra ) { - num = parseFloat( val ); - return extra === true || jQuery.isNumeric( num ) ? num || 0 : val; - } - return val; - } -}); - -// NOTE: we've included the "window" in window.getComputedStyle -// because jsdom on node.js will break without it. -if ( window.getComputedStyle ) { - getStyles = function( elem ) { - return window.getComputedStyle( elem, null ); - }; - - curCSS = function( elem, name, _computed ) { - var width, minWidth, maxWidth, - computed = _computed || getStyles( elem ), - - // getPropertyValue is only needed for .css('filter') in IE9, see #12537 - ret = computed ? computed.getPropertyValue( name ) || computed[ name ] : undefined, - style = elem.style; - - if ( computed ) { - - if ( ret === "" && !jQuery.contains( elem.ownerDocument, elem ) ) { - ret = jQuery.style( elem, name ); - } - - // A tribute to the "awesome hack by Dean Edwards" - // Chrome < 17 and Safari 5.0 uses "computed value" instead of "used value" for margin-right - // Safari 5.1.7 (at least) returns percentage for a larger set of values, but width seems to be reliably pixels - // this is against the CSSOM draft spec: http://dev.w3.org/csswg/cssom/#resolved-values - if ( rnumnonpx.test( ret ) && rmargin.test( name ) ) { - - // Remember the original values - width = style.width; - minWidth = style.minWidth; - maxWidth = style.maxWidth; - - // Put in the new values to get a computed value out - style.minWidth = style.maxWidth = style.width = ret; - ret = computed.width; - - // Revert the changed values - style.width = width; - style.minWidth = minWidth; - style.maxWidth = maxWidth; - } - } - - return ret; - }; -} else if ( document.documentElement.currentStyle ) { - getStyles = function( elem ) { - return elem.currentStyle; - }; - - curCSS = function( elem, name, _computed ) { - var left, rs, rsLeft, - computed = _computed || getStyles( elem ), - ret = computed ? computed[ name ] : undefined, - style = elem.style; - - // Avoid setting ret to empty string here - // so we don't default to auto - if ( ret == null && style && style[ name ] ) { - ret = style[ name ]; - } - - // From the awesome hack by Dean Edwards - // http://erik.eae.net/archives/2007/07/27/18.54.15/#comment-102291 - - // If we're not dealing with a regular pixel number - // but a number that has a weird ending, we need to convert it to pixels - // but not position css attributes, as those are proportional to the parent element instead - // and we can't measure the parent instead because it might trigger a "stacking dolls" problem - if ( rnumnonpx.test( ret ) && !rposition.test( name ) ) { - - // Remember the original values - left = style.left; - rs = elem.runtimeStyle; - rsLeft = rs && rs.left; - - // Put in the new values to get a computed value out - if ( rsLeft ) { - rs.left = elem.currentStyle.left; - } - style.left = name === "fontSize" ? "1em" : ret; - ret = style.pixelLeft + "px"; - - // Revert the changed values - style.left = left; - if ( rsLeft ) { - rs.left = rsLeft; - } - } - - return ret === "" ? "auto" : ret; - }; -} - -function setPositiveNumber( elem, value, subtract ) { - var matches = rnumsplit.exec( value ); - return matches ? - // Guard against undefined "subtract", e.g., when used as in cssHooks - Math.max( 0, matches[ 1 ] - ( subtract || 0 ) ) + ( matches[ 2 ] || "px" ) : - value; -} - -function augmentWidthOrHeight( elem, name, extra, isBorderBox, styles ) { - var i = extra === ( isBorderBox ? "border" : "content" ) ? - // If we already have the right measurement, avoid augmentation - 4 : - // Otherwise initialize for horizontal or vertical properties - name === "width" ? 1 : 0, - - val = 0; - - for ( ; i < 4; i += 2 ) { - // both box models exclude margin, so add it if we want it - if ( extra === "margin" ) { - val += jQuery.css( elem, extra + cssExpand[ i ], true, styles ); - } - - if ( isBorderBox ) { - // border-box includes padding, so remove it if we want content - if ( extra === "content" ) { - val -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - } - - // at this point, extra isn't border nor margin, so remove border - if ( extra !== "margin" ) { - val -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } else { - // at this point, extra isn't content, so add padding - val += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - - // at this point, extra isn't content nor padding, so add border - if ( extra !== "padding" ) { - val += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } - } - - return val; -} - -function getWidthOrHeight( elem, name, extra ) { - - // Start with offset property, which is equivalent to the border-box value - var valueIsBorderBox = true, - val = name === "width" ? elem.offsetWidth : elem.offsetHeight, - styles = getStyles( elem ), - isBorderBox = jQuery.support.boxSizing && jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; - - // some non-html elements return undefined for offsetWidth, so check for null/undefined - // svg - https://bugzilla.mozilla.org/show_bug.cgi?id=649285 - // MathML - https://bugzilla.mozilla.org/show_bug.cgi?id=491668 - if ( val <= 0 || val == null ) { - // Fall back to computed then uncomputed css if necessary - val = curCSS( elem, name, styles ); - if ( val < 0 || val == null ) { - val = elem.style[ name ]; - } - - // Computed unit is not pixels. Stop here and return. - if ( rnumnonpx.test(val) ) { - return val; - } - - // we need the check for style in case a browser which returns unreliable values - // for getComputedStyle silently falls back to the reliable elem.style - valueIsBorderBox = isBorderBox && ( jQuery.support.boxSizingReliable || val === elem.style[ name ] ); - - // Normalize "", auto, and prepare for extra - val = parseFloat( val ) || 0; - } - - // use the active box-sizing model to add/subtract irrelevant styles - return ( val + - augmentWidthOrHeight( - elem, - name, - extra || ( isBorderBox ? "border" : "content" ), - valueIsBorderBox, - styles - ) - ) + "px"; -} - -// Try to determine the default display value of an element -function css_defaultDisplay( nodeName ) { - var doc = document, - display = elemdisplay[ nodeName ]; - - if ( !display ) { - display = actualDisplay( nodeName, doc ); - - // If the simple way fails, read from inside an iframe - if ( display === "none" || !display ) { - // Use the already-created iframe if possible - iframe = ( iframe || - jQuery("