From 4c197b5d5669fc3453ffc82b7ea76ab18772ec69 Mon Sep 17 00:00:00 2001 From: Konstantin Boudnik Date: Thu, 23 Jan 2014 22:29:23 +0000 Subject: [PATCH 01/11] HADOOP-10167. Mark hadoop-common source as UTF-8 in Maven pom files / refactoring. Contributed by Mikhail Antonov. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1560831 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ hadoop-mapreduce-project/pom.xml | 1 - hadoop-project/pom.xml | 4 ---- hadoop-tools/hadoop-distcp/pom.xml | 1 - hadoop-tools/hadoop-openstack/pom.xml | 1 - hadoop-yarn-project/hadoop-yarn/pom.xml | 1 - hadoop-yarn-project/pom.xml | 1 - pom.xml | 4 ++++ 8 files changed, 7 insertions(+), 9 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 72685218ce0..2e5a435b791 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -631,6 +631,9 @@ Release 2.3.0 - UNRELEASED HADOOP-10112. har file listing doesn't work with wild card. (brandonli) + HADOOP-10167. Mark hadoop-common source as UTF-8 in Maven pom files / refactoring + (Mikhail Antonov via cos) + Release 2.2.0 - 2013-10-13 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/pom.xml b/hadoop-mapreduce-project/pom.xml index bce1a244882..fdd817427b8 100644 --- a/hadoop-mapreduce-project/pom.xml +++ b/hadoop-mapreduce-project/pom.xml @@ -29,7 +29,6 @@ http://hadoop.apache.org/mapreduce/ - UTF-8 true 600000 once diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 84858c386db..3a6519c17c2 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -39,10 +39,6 @@ 4 - - UTF-8 - UTF-8 - 1.0.9 diff --git a/hadoop-tools/hadoop-distcp/pom.xml b/hadoop-tools/hadoop-distcp/pom.xml index 9284592ca66..294ca882b3a 100644 --- a/hadoop-tools/hadoop-distcp/pom.xml +++ b/hadoop-tools/hadoop-distcp/pom.xml @@ -33,7 +33,6 @@ UTF-8 true - UTF-8 diff --git a/hadoop-tools/hadoop-openstack/pom.xml b/hadoop-tools/hadoop-openstack/pom.xml index bc6e391b162..a8afc9ed011 100644 --- a/hadoop-tools/hadoop-openstack/pom.xml +++ b/hadoop-tools/hadoop-openstack/pom.xml @@ -35,7 +35,6 @@ UTF-8 true - UTF-8 diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml b/hadoop-yarn-project/hadoop-yarn/pom.xml index 4e5e64a21d5..f6479bcb076 100644 --- a/hadoop-yarn-project/hadoop-yarn/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/pom.xml @@ -26,7 +26,6 @@ hadoop-yarn - UTF-8 true 600000 ${basedir} diff --git a/hadoop-yarn-project/pom.xml b/hadoop-yarn-project/pom.xml index f8c3dacf9cb..01f7cd06bcb 100644 --- a/hadoop-yarn-project/pom.xml +++ b/hadoop-yarn-project/pom.xml @@ -29,7 +29,6 @@ http://hadoop.apache.org/yarn/ - UTF-8 true 600000 once diff --git a/pom.xml b/pom.xml index 73a5b6bd8e8..27db19ba353 100644 --- a/pom.xml +++ b/pom.xml @@ -85,6 +85,10 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs apache.staging.https Apache Release Distribution Repository https://repository.apache.org/service/local/staging/deploy/maven2 + + + UTF-8 + UTF-8 From 817be26c17cc69443a35bd8e5923d22e3b26a570 Mon Sep 17 00:00:00 2001 From: Uma Maheswara Rao G Date: Fri, 24 Jan 2014 06:27:40 +0000 Subject: [PATCH 02/11] HADOOP-10248. Property name should be included in the exception where property value is null. Contributed by Akira AJISAKA. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1560906 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../src/main/java/org/apache/hadoop/conf/Configuration.java | 2 +- .../test/java/org/apache/hadoop/conf/TestConfiguration.java | 3 +++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 2e5a435b791..4d18a94c4a1 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -556,6 +556,9 @@ Release 2.3.0 - UNRELEASED HADOOP-10132. RPC#stopProxy() should log the class of proxy when IllegalArgumentException is encountered (Ted yu via umamahesh) + HADOOP-10248. Property name should be included in the exception where property value + is null (Akira AJISAKA via umamahesh) + OPTIMIZATIONS HADOOP-10142. Avoid groups lookup for unprivileged users such as "dr.who" diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java index 456a8d2cf10..c0d82004c0d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java @@ -963,7 +963,7 @@ public void set(String name, String value, String source) { "Property name must not be null"); Preconditions.checkArgument( value != null, - "Property value must not be null"); + "The value of property " + name + " must not be null"); DeprecationContext deprecations = deprecationContext.get(); if (deprecations.getDeprecatedKeyMap().isEmpty()) { getProps(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java index 87ebb61f49e..1ce0b010851 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java @@ -1183,6 +1183,8 @@ public void testSettingValueNull() throws Exception { fail("Should throw an IllegalArgumentException exception "); } catch (Exception e) { assertTrue(e instanceof IllegalArgumentException); + assertEquals(e.getMessage(), + "The value of property testClassName must not be null"); } } @@ -1193,6 +1195,7 @@ public void testSettingKeyNull() throws Exception { fail("Should throw an IllegalArgumentException exception "); } catch (Exception e) { assertTrue(e instanceof IllegalArgumentException); + assertEquals(e.getMessage(), "Property name must not be null"); } } From 20176840f6287fb426090820d5a3319c7e120bea Mon Sep 17 00:00:00 2001 From: Jason Darrell Lowe Date: Fri, 24 Jan 2014 15:45:28 +0000 Subject: [PATCH 03/11] Addendum patch for HADOOP-9652 to fix performance problems. Contributed by Andrew Wang git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1561038 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 6 +++--- .../java/org/apache/hadoop/fs/RawLocalFileSystem.java | 11 ++++++++++- .../java/org/apache/hadoop/fs/TestSymlinkLocalFS.java | 5 +++++ 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 4d18a94c4a1..7595563d4e0 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -424,6 +424,9 @@ Release 2.4.0 - UNRELEASED HADOOP-10143 replace WritableFactories's hashmap with ConcurrentHashMap (Liang Xie via stack) + HADOOP-9652. Allow RawLocalFs#getFileLinkStatus to fill in the link owner + and mode if requested. (Andrew Wang via Colin Patrick McCabe) + OPTIMIZATIONS HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn) @@ -450,9 +453,6 @@ Release 2.4.0 - UNRELEASED HADOOP-9817. FileSystem#globStatus and FileContext#globStatus need to work with symlinks. (Colin Patrick McCabe via Andrew Wang) - HADOOP-9652. RawLocalFs#getFileLinkStatus does not fill in the link owner - and mode. (Andrew Wang via Colin Patrick McCabe) - HADOOP-9875. TestDoAsEffectiveUser can fail on JDK 7. (Aaron T. Myers via Colin Patrick McCabe) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java index 7d70ada73b4..bb5d8aada3e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java @@ -16,8 +16,11 @@ * limitations under the License. */ + package org.apache.hadoop.fs; +import com.google.common.annotations.VisibleForTesting; + import java.io.BufferedOutputStream; import java.io.DataOutput; import java.io.File; @@ -51,7 +54,13 @@ public class RawLocalFileSystem extends FileSystem { static final URI NAME = URI.create("file:///"); private Path workingDir; - private static final boolean useDeprecatedFileStatus = !Stat.isAvailable(); + // Temporary workaround for HADOOP-9652. + private static boolean useDeprecatedFileStatus = true; + + @VisibleForTesting + public static void useStatIfAvailable() { + useDeprecatedFileStatus = !Stat.isAvailable(); + } public RawLocalFileSystem() { workingDir = getInitialWorkingDirectory(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java index c82dcc8a124..64e34af64bb 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java @@ -38,6 +38,11 @@ * Test symbolic links using LocalFs. */ abstract public class TestSymlinkLocalFS extends SymlinkBaseTest { + + // Workaround for HADOOP-9652 + static { + RawLocalFileSystem.useStatIfAvailable(); + } @Override protected String getScheme() { From 3497e76e1956fc742e95753f79138e8e096795d5 Mon Sep 17 00:00:00 2001 From: Jason Darrell Lowe Date: Fri, 24 Jan 2014 18:54:48 +0000 Subject: [PATCH 04/11] YARN-1575. Public localizer crashes with "Localized unkown resource". Contributed by Jason Lowe git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1561110 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 +++ .../localizer/ResourceLocalizationService.java | 12 ++++++++---- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 828f1f8a33b..f2491aec07b 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -366,6 +366,9 @@ Release 2.4.0 - UNRELEASED YARN-1607. TestRM relies on the scheduler assigning multiple containers in a single node update (Sandy Ryza) + YARN-1575. Public localizer crashes with "Localized unkown resource" + (jlowe) + Release 2.3.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java index 4cbc37d5053..97c68aae9e0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java @@ -638,8 +638,8 @@ class PublicLocalizer extends Thread { super("Public Localizer"); this.lfs = getLocalFileContext(conf); this.conf = conf; - this.pending = - new ConcurrentHashMap, LocalizerResourceRequestEvent>(); + this.pending = Collections.synchronizedMap( + new HashMap, LocalizerResourceRequestEvent>()); this.threadPool = createLocalizerExecutor(conf); this.queue = new ExecutorCompletionService(threadPool); } @@ -675,8 +675,12 @@ public void addResource(LocalizerResourceRequestEvent request) { publicDirDestPath = new Path(publicDirDestPath, Long.toString(publicRsrc .nextUniqueNumber())); - pending.put(queue.submit(new FSDownload(lfs, null, conf, - publicDirDestPath, resource)), request); + // explicitly synchronize pending here to avoid future task + // completing and being dequeued before pending updated + synchronized (pending) { + pending.put(queue.submit(new FSDownload(lfs, null, conf, + publicDirDestPath, resource)), request); + } } catch (IOException e) { rsrc.unlock(); // TODO Need to Fix IO Exceptions - Notifying resource From dd1bc7e1c7e0712a690034f044ab4cf5eaf98ca6 Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Fri, 24 Jan 2014 22:56:05 +0000 Subject: [PATCH 05/11] HDFS-5728. Block recovery will fail if the metafile does not have crc for all chunks of the block. Contributed by Vinay. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1561223 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../fsdataset/impl/BlockPoolSlice.java | 23 ++++++-- .../apache/hadoop/hdfs/TestLeaseRecovery.java | 59 +++++++++++++++++++ 3 files changed, 81 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index b221d9d215f..8862269cb35 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -594,6 +594,9 @@ Release 2.4.0 - UNRELEASED HDFS-5806. balancer should set SoTimeout to avoid indefinite hangs. (Nathan Roberts via Andrew Wang). + HDFS-5728. Block recovery will fail if the metafile does not have crc + for all chunks of the block (Vinay via kihwal) + BREAKDOWN OF HDFS-2832 SUBTASKS AND RELATED JIRAS HDFS-4985. Add storage type to the protocol and expose it in block report diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java index ce3dccde71b..ed9ba589dc6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java @@ -23,6 +23,7 @@ import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; +import java.io.RandomAccessFile; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.DU; @@ -191,7 +192,7 @@ void addToReplicasMap(ReplicaMap volumeMap, File dir, boolean isFinalized blockFile.length(), genStamp, volume, blockFile.getParentFile()); } else { newReplica = new ReplicaWaitingToBeRecovered(blockId, - validateIntegrity(blockFile, genStamp), + validateIntegrityAndSetLength(blockFile, genStamp), genStamp, volume, blockFile.getParentFile()); } @@ -214,7 +215,7 @@ void addToReplicasMap(ReplicaMap volumeMap, File dir, boolean isFinalized * @param genStamp generation stamp of the block * @return the number of valid bytes */ - private long validateIntegrity(File blockFile, long genStamp) { + private long validateIntegrityAndSetLength(File blockFile, long genStamp) { DataInputStream checksumIn = null; InputStream blockIn = null; try { @@ -257,11 +258,25 @@ private long validateIntegrity(File blockFile, long genStamp) { IOUtils.readFully(blockIn, buf, 0, lastChunkSize); checksum.update(buf, 0, lastChunkSize); + long validFileLength; if (checksum.compare(buf, lastChunkSize)) { // last chunk matches crc - return lastChunkStartPos + lastChunkSize; + validFileLength = lastChunkStartPos + lastChunkSize; } else { // last chunck is corrupt - return lastChunkStartPos; + validFileLength = lastChunkStartPos; } + + // truncate if extra bytes are present without CRC + if (blockFile.length() > validFileLength) { + RandomAccessFile blockRAF = new RandomAccessFile(blockFile, "rw"); + try { + // truncate blockFile + blockRAF.setLength(validFileLength); + } finally { + blockRAF.close(); + } + } + + return validFileLength; } catch (IOException e) { FsDatasetImpl.LOG.warn(e); return 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java index 2dbe6e82a7f..91c9ba80b04 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java @@ -19,20 +19,28 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import java.io.File; import java.io.IOException; +import java.io.RandomAccessFile; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestInterDatanodeProtocol; import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.Test; public class TestLeaseRecovery { @@ -148,4 +156,55 @@ public void testBlockSynchronization() throws Exception { if (cluster != null) {cluster.shutdown();} } } + + /** + * Block Recovery when the meta file not having crcs for all chunks in block + * file + */ + @Test + public void testBlockRecoveryWithLessMetafile() throws Exception { + Configuration conf = new Configuration(); + conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY, + UserGroupInformation.getCurrentUser().getShortUserName()); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) + .build(); + Path file = new Path("/testRecoveryFile"); + DistributedFileSystem dfs = cluster.getFileSystem(); + FSDataOutputStream out = dfs.create(file); + int count = 0; + while (count < 2 * 1024 * 1024) { + out.writeBytes("Data"); + count += 4; + } + out.hsync(); + // abort the original stream + ((DFSOutputStream) out.getWrappedStream()).abort(); + + LocatedBlocks locations = cluster.getNameNodeRpc().getBlockLocations( + file.toString(), 0, count); + ExtendedBlock block = locations.get(0).getBlock(); + DataNode dn = cluster.getDataNodes().get(0); + BlockLocalPathInfo localPathInfo = dn.getBlockLocalPathInfo(block, null); + File metafile = new File(localPathInfo.getMetaPath()); + assertTrue(metafile.exists()); + + // reduce the block meta file size + RandomAccessFile raf = new RandomAccessFile(metafile, "rw"); + raf.setLength(metafile.length() - 20); + raf.close(); + + // restart DN to make replica to RWR + DataNodeProperties dnProp = cluster.stopDataNode(0); + cluster.restartDataNode(dnProp, true); + + // try to recover the lease + DistributedFileSystem newdfs = (DistributedFileSystem) FileSystem + .newInstance(cluster.getConfiguration(0)); + count = 0; + while (++count < 10 && !newdfs.recoverLease(file)) { + Thread.sleep(1000); + } + assertTrue("File should be closed", newdfs.recoverLease(file)); + + } } From befb254e61a34352d146be79656d656044432dd1 Mon Sep 17 00:00:00 2001 From: Uma Maheswara Rao G Date: Sat, 25 Jan 2014 15:50:42 +0000 Subject: [PATCH 06/11] HDFS-5343. When cat command is issued on snapshot files, getting unexpected result. Contributed by Sathish. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1561325 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../apache/hadoop/hdfs/DFSInputStream.java | 3 + .../snapshot/TestSnapshotFileLength.java | 67 ++++++++++++++++++- 3 files changed, 70 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 8862269cb35..aea37d4ef55 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1137,6 +1137,9 @@ Release 2.3.0 - UNRELEASED HDFS-5789. Some of snapshot APIs missing checkOperation double check in fsn. (umamahesh) + HDFS-5343. When cat command is issued on snapshot files getting unexpected result. + (Sathish via umamahesh) + Release 2.2.0 - 2013-10-13 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index d90317d2bf0..73861bc8ade 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -792,6 +792,9 @@ private int readWithStrategy(ReaderStrategy strategy, int off, int len) throws I currentNode = blockSeekTo(pos); } int realLen = (int) Math.min(len, (blockEnd - pos + 1L)); + if (locatedBlocks.isLastBlockComplete()) { + realLen = (int) Math.min(realLen, locatedBlocks.getFileLength()); + } int result = readBuffer(strategy, off, realLen, corruptedBlockMap); if (result >= 0) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java index cd316ab9440..817a2de882d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java @@ -17,22 +17,26 @@ */ package org.apache.hadoop.hdfs.server.namenode.snapshot; -import java.util.Random; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; + import org.apache.hadoop.fs.FileStatus; import org.junit.After; import org.junit.Before; import org.junit.Test; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; +import static org.junit.Assert.*; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.util.ToolRunner; public class TestSnapshotFileLength { @@ -112,4 +116,61 @@ public void testSnapshotfileLength() throws Exception { assertThat(bytesRead, is(BLOCKSIZE)); fis.close(); } + + /** + * Adding as part of jira HDFS-5343 + * Test for checking the cat command on snapshot path it + * cannot read a file beyond snapshot file length + * @throws Exception + */ + @Test (timeout = 600000) + public void testSnapshotFileLengthWithCatCommand() throws Exception { + + FSDataInputStream fis = null; + FileStatus fileStatus = null; + + int bytesRead; + byte[] buffer = new byte[BLOCKSIZE * 8]; + + hdfs.mkdirs(sub); + Path file1 = new Path(sub, file1Name); + DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, SEED); + + hdfs.allowSnapshot(sub); + hdfs.createSnapshot(sub, snapshot1); + + DFSTestUtil.appendFile(hdfs, file1, BLOCKSIZE); + + // Make sure we can read the entire file via its non-snapshot path. + fileStatus = hdfs.getFileStatus(file1); + assertEquals(fileStatus.getLen(), BLOCKSIZE * 2); + fis = hdfs.open(file1); + bytesRead = fis.read(buffer, 0, buffer.length); + assertEquals(bytesRead, BLOCKSIZE * 2); + fis.close(); + + Path file1snap1 = + SnapshotTestHelper.getSnapshotPath(sub, snapshot1, file1Name); + fis = hdfs.open(file1snap1); + fileStatus = hdfs.getFileStatus(file1snap1); + assertEquals(fileStatus.getLen(), BLOCKSIZE); + // Make sure we can only read up to the snapshot length. + bytesRead = fis.read(buffer, 0, buffer.length); + assertEquals(bytesRead, BLOCKSIZE); + fis.close(); + + PrintStream psBackup = System.out; + ByteArrayOutputStream bao = new ByteArrayOutputStream(); + System.setOut(new PrintStream(bao)); + System.setErr(new PrintStream(bao)); + // Make sure we can cat the file upto to snapshot length + FsShell shell = new FsShell(); + try{ + ToolRunner.run(conf, shell, new String[] { "-cat", + "/TestSnapshotFileLength/sub1/.snapshot/snapshot1/file1" }); + assertEquals(bao.size(), BLOCKSIZE); + }finally{ + System.setOut(psBackup); + } + } } From edb6dc5f303093c2604cd07b0c0dacf12dbce5de Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Sat, 25 Jan 2014 20:01:26 +0000 Subject: [PATCH 07/11] HDFS-5138. Support HDFS upgrade in HA. Contributed by Aaron T. Myers. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1561381 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../dev-support/findbugsExcludeFile.xml | 5 + .../bkjournal/BookKeeperJournalManager.java | 33 + .../TestBookKeeperAsHASharedDir.java | 2 +- .../java/org/apache/hadoop/hdfs/DFSUtil.java | 45 +- .../java/org/apache/hadoop/hdfs/HAUtil.java | 58 ++ .../hdfs/qjournal/client/AsyncLogger.java | 14 + .../hdfs/qjournal/client/AsyncLoggerSet.java | 68 ++ .../qjournal/client/IPCLoggerChannel.java | 68 ++ .../qjournal/client/QuorumJournalManager.java | 140 +++- .../qjournal/protocol/QJournalProtocol.java | 14 + ...JournalProtocolServerSideTranslatorPB.java | 99 ++- .../QJournalProtocolTranslatorPB.java | 93 ++- .../server/GetJournalEditServlet.java | 29 +- .../hdfs/qjournal/server/JNStorage.java | 11 +- .../hadoop/hdfs/qjournal/server/Journal.java | 70 +- .../hdfs/qjournal/server/JournalNode.java | 28 + .../qjournal/server/JournalNodeRpcServer.java | 32 + .../hadoop/hdfs/server/common/Storage.java | 209 ++--- .../hdfs/server/common/StorageInfo.java | 119 +++ .../datanode/BlockPoolSliceStorage.java | 2 +- .../server/namenode/BackupJournalManager.java | 33 + .../hdfs/server/namenode/BackupNode.java | 6 +- .../hdfs/server/namenode/FSEditLog.java | 60 +- .../hadoop/hdfs/server/namenode/FSImage.java | 209 +++-- .../hdfs/server/namenode/FSNamesystem.java | 23 +- .../server/namenode/FileJournalManager.java | 48 +- .../hdfs/server/namenode/JournalManager.java | 51 ++ .../hdfs/server/namenode/JournalSet.java | 59 +- .../hdfs/server/namenode/NNStorage.java | 6 +- .../hdfs/server/namenode/NNUpgradeUtil.java | 174 +++++ .../hadoop/hdfs/server/namenode/NameNode.java | 51 +- .../server/namenode/ha/BootstrapStandby.java | 3 +- .../apache/hadoop/hdfs/tools/DFSAdmin.java | 21 +- .../src/main/proto/QJournalProtocol.proto | 78 ++ .../apt/HDFSHighAvailabilityWithQJM.apt.vm | 46 ++ .../apache/hadoop/hdfs/MiniDFSCluster.java | 66 +- .../apache/hadoop/hdfs/TestDFSRollback.java | 45 +- .../org/apache/hadoop/hdfs/TestDFSUtil.java | 34 + .../hdfs/qjournal/MiniJournalCluster.java | 10 +- .../hdfs/qjournal/MiniQJMHACluster.java | 9 + .../namenode/TestGenericJournalConf.java | 25 + .../namenode/ha/TestBootstrapStandby.java | 2 +- .../namenode/ha/TestDFSUpgradeWithHA.java | 725 ++++++++++++++++-- .../ha/TestInitializeSharedEdits.java | 4 +- 45 files changed, 2507 insertions(+), 422 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index aea37d4ef55..09eaf6bd6e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -148,6 +148,8 @@ Trunk (Unreleased) HDFS-5721. sharedEditsImage in Namenode#initializeSharedEdits() should be closed before method returns. (Ted Yu via junping_du) + HDFS-5138. Support HDFS upgrade in HA. (atm via todd) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml index f97110705ac..028e64cad94 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml @@ -361,5 +361,10 @@ + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java index 5dc12811fe7..91ccc54832b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java @@ -18,6 +18,8 @@ package org.apache.hadoop.contrib.bkjournal; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.Storage; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.JournalManager; import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream; @@ -659,6 +661,37 @@ public void purgeLogsOlderThan(long minTxIdToKeep) } } + @Override + public void doPreUpgrade() throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void doUpgrade(Storage storage) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public long getJournalCTime() throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void doFinalize() throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean canRollBack(StorageInfo storage, StorageInfo prevStorage, + int targetLayoutVersion) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void doRollback() throws IOException { + throw new UnsupportedOperationException(); + } + @Override public void close() throws IOException { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java index 0a14e785758..5611bb88a26 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java @@ -316,7 +316,7 @@ private void assertCanNotStartNamenode(MiniDFSCluster cluster, int nnIndex) { } catch (IOException ioe) { LOG.info("Got expected exception", ioe); GenericTestUtils.assertExceptionContains( - "Cannot start an HA namenode with name dirs that need recovery", ioe); + "storage directory does not exist or is not accessible", ioe); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 1d0421e397b..abddf1fe9c1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -43,6 +43,7 @@ import java.security.SecureRandom; import java.text.SimpleDateFormat; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Comparator; @@ -574,10 +575,24 @@ public static String addKeySuffixes(String key, String... suffixes) { } return ret; } + + /** + * Get all of the RPC addresses of the individual NNs in a given nameservice. + * + * @param conf Configuration + * @param nsId the nameservice whose NNs addresses we want. + * @param defaultValue default address to return in case key is not found. + * @return A map from nnId -> RPC address of each NN in the nameservice. + */ + public static Map getRpcAddressesForNameserviceId( + Configuration conf, String nsId, String defaultValue) { + return getAddressesForNameserviceId(conf, nsId, defaultValue, + DFS_NAMENODE_RPC_ADDRESS_KEY); + } private static Map getAddressesForNameserviceId( Configuration conf, String nsId, String defaultValue, - String[] keys) { + String... keys) { Collection nnIds = getNameNodeIds(conf, nsId); Map ret = Maps.newHashMap(); for (String nnId : emptyAsSingletonNull(nnIds)) { @@ -1670,4 +1685,32 @@ public static HttpServer.Builder httpServerTemplateForNNAndJN( } return builder; } + + /** + * Assert that all objects in the collection are equal. Returns silently if + * so, throws an AssertionError if any object is not equal. All null values + * are considered equal. + * + * @param objects the collection of objects to check for equality. + */ + public static void assertAllResultsEqual(Collection objects) { + Object[] resultsArray = objects.toArray(); + + if (resultsArray.length == 0) + return; + + for (int i = 0; i < resultsArray.length; i++) { + if (i == 0) + continue; + else { + Object currElement = resultsArray[i]; + Object lastElement = resultsArray[i - 1]; + if ((currElement == null && currElement != lastElement) || + (currElement != null && !currElement.equals(lastElement))) { + throw new AssertionError("Not all elements match in results: " + + Arrays.toString(resultsArray)); + } + } + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java index 7d53fb991d8..47ea821eeb7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java @@ -26,22 +26,29 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.UnresolvedLinkException; +import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.ipc.StandbyException; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; +import java.io.FileNotFoundException; import java.io.IOException; import java.net.InetSocketAddress; import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; import java.util.Collection; +import java.util.List; import java.util.Map; import static org.apache.hadoop.hdfs.DFSConfigKeys.*; @@ -305,4 +312,55 @@ public static InetSocketAddress getAddressOfActive(FileSystem fs) DFSClient dfsClient = dfs.getClient(); return RPC.getServerAddress(dfsClient.getNamenode()); } + + /** + * Get an RPC proxy for each NN in an HA nameservice. Used when a given RPC + * call should be made on every NN in an HA nameservice, not just the active. + * + * @param conf configuration + * @param nsId the nameservice to get all of the proxies for. + * @return a list of RPC proxies for each NN in the nameservice. + * @throws IOException in the event of error. + */ + public static List getProxiesForAllNameNodesInNameservice( + Configuration conf, String nsId) throws IOException { + Map nnAddresses = + DFSUtil.getRpcAddressesForNameserviceId(conf, nsId, null); + + List namenodes = new ArrayList(); + for (InetSocketAddress nnAddress : nnAddresses.values()) { + NameNodeProxies.ProxyAndInfo proxyInfo = null; + proxyInfo = NameNodeProxies.createNonHAProxy(conf, + nnAddress, ClientProtocol.class, + UserGroupInformation.getCurrentUser(), false); + namenodes.add(proxyInfo.getProxy()); + } + return namenodes; + } + + /** + * Used to ensure that at least one of the given HA NNs is currently in the + * active state.. + * + * @param namenodes list of RPC proxies for each NN to check. + * @return true if at least one NN is active, false if all are in the standby state. + * @throws IOException in the event of error. + */ + public static boolean isAtLeastOneActive(List namenodes) + throws IOException { + for (ClientProtocol namenode : namenodes) { + try { + namenode.getFileInfo("/"); + return true; + } catch (RemoteException re) { + IOException cause = re.unwrapRemoteException(); + if (cause instanceof StandbyException) { + // This is expected to happen for a standby NN. + } else { + throw re; + } + } + } + return false; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java index 2f6baa3e4c8..a3a63871b0d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto; import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; @@ -151,4 +152,17 @@ public ListenableFuture acceptRecovery(SegmentStateProto log, * StringBuilder. This is displayed on the NN web UI. */ public void appendReport(StringBuilder sb); + + public ListenableFuture doPreUpgrade(); + + public ListenableFuture doUpgrade(StorageInfo sInfo); + + public ListenableFuture doFinalize(); + + public ListenableFuture canRollBack(StorageInfo storage, + StorageInfo prevStorage, int targetLayoutVersion); + + public ListenableFuture doRollback(); + + public ListenableFuture getJournalCTime(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java index d8918584d68..66a03c988ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; @@ -308,4 +309,71 @@ QuorumCall format(NamespaceInfo nsInfo) { } return QuorumCall.create(calls); } + + QuorumCall doPreUpgrade() { + Map> calls = + Maps.newHashMap(); + for (AsyncLogger logger : loggers) { + ListenableFuture future = + logger.doPreUpgrade(); + calls.put(logger, future); + } + return QuorumCall.create(calls); + } + + public QuorumCall doUpgrade(StorageInfo sInfo) { + Map> calls = + Maps.newHashMap(); + for (AsyncLogger logger : loggers) { + ListenableFuture future = + logger.doUpgrade(sInfo); + calls.put(logger, future); + } + return QuorumCall.create(calls); + } + + public QuorumCall doFinalize() { + Map> calls = + Maps.newHashMap(); + for (AsyncLogger logger : loggers) { + ListenableFuture future = + logger.doFinalize(); + calls.put(logger, future); + } + return QuorumCall.create(calls); + } + + public QuorumCall canRollBack(StorageInfo storage, + StorageInfo prevStorage, int targetLayoutVersion) { + Map> calls = + Maps.newHashMap(); + for (AsyncLogger logger : loggers) { + ListenableFuture future = + logger.canRollBack(storage, prevStorage, targetLayoutVersion); + calls.put(logger, future); + } + return QuorumCall.create(calls); + } + + public QuorumCall doRollback() { + Map> calls = + Maps.newHashMap(); + for (AsyncLogger logger : loggers) { + ListenableFuture future = + logger.doRollback(); + calls.put(logger, future); + } + return QuorumCall.create(calls); + } + + public QuorumCall getJournalCTime() { + Map> calls = + Maps.newHashMap(); + for (AsyncLogger logger : loggers) { + ListenableFuture future = logger.getJournalCTime(); + calls.put(logger, future); + } + return QuorumCall.create(calls); + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java index 3731f5a7af7..2f1bff12750 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.qjournal.protocolPB.QJournalProtocolPB; import org.apache.hadoop.hdfs.qjournal.protocolPB.QJournalProtocolTranslatorPB; import org.apache.hadoop.hdfs.qjournal.server.GetJournalEditServlet; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.ipc.ProtobufRpcEngine; @@ -564,6 +565,72 @@ public Void call() throws IOException { } }); } + + @Override + public ListenableFuture doPreUpgrade() { + return executor.submit(new Callable() { + @Override + public Void call() throws IOException { + getProxy().doPreUpgrade(journalId); + return null; + } + }); + } + + @Override + public ListenableFuture doUpgrade(final StorageInfo sInfo) { + return executor.submit(new Callable() { + @Override + public Void call() throws IOException { + getProxy().doUpgrade(journalId, sInfo); + return null; + } + }); + } + + @Override + public ListenableFuture doFinalize() { + return executor.submit(new Callable() { + @Override + public Void call() throws IOException { + getProxy().doFinalize(journalId); + return null; + } + }); + } + + @Override + public ListenableFuture canRollBack(final StorageInfo storage, + final StorageInfo prevStorage, final int targetLayoutVersion) { + return executor.submit(new Callable() { + @Override + public Boolean call() throws IOException { + return getProxy().canRollBack(journalId, storage, prevStorage, + targetLayoutVersion); + } + }); + } + + @Override + public ListenableFuture doRollback() { + return executor.submit(new Callable() { + @Override + public Void call() throws IOException { + getProxy().doRollback(journalId); + return null; + } + }); + } + + @Override + public ListenableFuture getJournalCTime() { + return executor.submit(new Callable() { + @Override + public Long call() throws IOException { + return getProxy().getJournalCTime(journalId); + } + }); + } @Override public String toString() { @@ -636,4 +703,5 @@ private URL getHttpServerURI(String scheme, int port) { private boolean hasHttpServerEndPoint() { return httpServerURL != null; } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java index 9f2cd56b867..befb876f95e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java @@ -34,10 +34,13 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto; +import org.apache.hadoop.hdfs.server.common.Storage; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream; import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream; import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; @@ -77,8 +80,14 @@ public class QuorumJournalManager implements JournalManager { // Since these don't occur during normal operation, we can // use rather lengthy timeouts, and don't need to make them // configurable. - private static final int FORMAT_TIMEOUT_MS = 60000; - private static final int HASDATA_TIMEOUT_MS = 60000; + private static final int FORMAT_TIMEOUT_MS = 60000; + private static final int HASDATA_TIMEOUT_MS = 60000; + private static final int CAN_ROLL_BACK_TIMEOUT_MS = 60000; + private static final int FINALIZE_TIMEOUT_MS = 60000; + private static final int PRE_UPGRADE_TIMEOUT_MS = 60000; + private static final int ROLL_BACK_TIMEOUT_MS = 60000; + private static final int UPGRADE_TIMEOUT_MS = 60000; + private static final int GET_JOURNAL_CTIME_TIMEOUT_MS = 60000; private final Configuration conf; private final URI uri; @@ -492,4 +501,131 @@ AsyncLoggerSet getLoggerSetForTests() { return loggers; } + @Override + public void doPreUpgrade() throws IOException { + QuorumCall call = loggers.doPreUpgrade(); + try { + call.waitFor(loggers.size(), loggers.size(), 0, PRE_UPGRADE_TIMEOUT_MS, + "doPreUpgrade"); + + if (call.countExceptions() > 0) { + call.rethrowException("Could not do pre-upgrade of one or more JournalNodes"); + } + } catch (InterruptedException e) { + throw new IOException("Interrupted waiting for doPreUpgrade() response"); + } catch (TimeoutException e) { + throw new IOException("Timed out waiting for doPreUpgrade() response"); + } + } + + @Override + public void doUpgrade(Storage storage) throws IOException { + QuorumCall call = loggers.doUpgrade(storage); + try { + call.waitFor(loggers.size(), loggers.size(), 0, UPGRADE_TIMEOUT_MS, + "doUpgrade"); + + if (call.countExceptions() > 0) { + call.rethrowException("Could not perform upgrade of one or more JournalNodes"); + } + } catch (InterruptedException e) { + throw new IOException("Interrupted waiting for doUpgrade() response"); + } catch (TimeoutException e) { + throw new IOException("Timed out waiting for doUpgrade() response"); + } + } + + @Override + public void doFinalize() throws IOException { + QuorumCall call = loggers.doFinalize(); + try { + call.waitFor(loggers.size(), loggers.size(), 0, FINALIZE_TIMEOUT_MS, + "doFinalize"); + + if (call.countExceptions() > 0) { + call.rethrowException("Could not finalize one or more JournalNodes"); + } + } catch (InterruptedException e) { + throw new IOException("Interrupted waiting for doFinalize() response"); + } catch (TimeoutException e) { + throw new IOException("Timed out waiting for doFinalize() response"); + } + } + + @Override + public boolean canRollBack(StorageInfo storage, StorageInfo prevStorage, + int targetLayoutVersion) throws IOException { + QuorumCall call = loggers.canRollBack(storage, + prevStorage, targetLayoutVersion); + try { + call.waitFor(loggers.size(), loggers.size(), 0, CAN_ROLL_BACK_TIMEOUT_MS, + "lockSharedStorage"); + + if (call.countExceptions() > 0) { + call.rethrowException("Could not check if roll back possible for" + + " one or more JournalNodes"); + } + + // Either they all return the same thing or this call fails, so we can + // just return the first result. + DFSUtil.assertAllResultsEqual(call.getResults().values()); + for (Boolean result : call.getResults().values()) { + return result; + } + } catch (InterruptedException e) { + throw new IOException("Interrupted waiting for lockSharedStorage() " + + "response"); + } catch (TimeoutException e) { + throw new IOException("Timed out waiting for lockSharedStorage() " + + "response"); + } + + throw new AssertionError("Unreachable code."); + } + + @Override + public void doRollback() throws IOException { + QuorumCall call = loggers.doRollback(); + try { + call.waitFor(loggers.size(), loggers.size(), 0, ROLL_BACK_TIMEOUT_MS, + "doRollback"); + + if (call.countExceptions() > 0) { + call.rethrowException("Could not perform rollback of one or more JournalNodes"); + } + } catch (InterruptedException e) { + throw new IOException("Interrupted waiting for doFinalize() response"); + } catch (TimeoutException e) { + throw new IOException("Timed out waiting for doFinalize() response"); + } + } + + @Override + public long getJournalCTime() throws IOException { + QuorumCall call = loggers.getJournalCTime(); + try { + call.waitFor(loggers.size(), loggers.size(), 0, + GET_JOURNAL_CTIME_TIMEOUT_MS, "getJournalCTime"); + + if (call.countExceptions() > 0) { + call.rethrowException("Could not journal CTime for one " + + "more JournalNodes"); + } + + // Either they all return the same thing or this call fails, so we can + // just return the first result. + DFSUtil.assertAllResultsEqual(call.getResults().values()); + for (Long result : call.getResults().values()) { + return result; + } + } catch (InterruptedException e) { + throw new IOException("Interrupted waiting for getJournalCTime() " + + "response"); + } catch (TimeoutException e) { + throw new IOException("Timed out waiting for getJournalCTime() " + + "response"); + } + + throw new AssertionError("Unreachable code."); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java index 41600acdc92..c7ab691b44c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto; import org.apache.hadoop.hdfs.qjournal.server.JournalNode; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.JournalManager; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.security.KerberosInfo; @@ -143,4 +144,17 @@ public PrepareRecoveryResponseProto prepareRecovery(RequestInfo reqInfo, */ public void acceptRecovery(RequestInfo reqInfo, SegmentStateProto stateToAccept, URL fromUrl) throws IOException; + + public void doPreUpgrade(String journalId) throws IOException; + + public void doUpgrade(String journalId, StorageInfo sInfo) throws IOException; + + public void doFinalize(String journalId) throws IOException; + + public Boolean canRollBack(String journalId, StorageInfo storage, + StorageInfo prevStorage, int targetLayoutVersion) throws IOException; + + public void doRollback(String journalId) throws IOException; + + public Long getJournalCTime(String journalId) throws IOException; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java index 47d81005952..b118aba3e5c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java @@ -17,17 +17,35 @@ */ package org.apache.hadoop.hdfs.qjournal.protocolPB; +import java.io.IOException; +import java.net.URL; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB; import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto; @@ -39,8 +57,6 @@ import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto; -import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto; -import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto; @@ -48,13 +64,11 @@ import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo; +import org.apache.hadoop.hdfs.server.protocol.JournalProtocol; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; -import java.io.IOException; -import java.net.URL; - /** * Implementation for protobuf service that forwards requests * received on {@link JournalProtocolPB} to the @@ -244,4 +258,79 @@ private RequestInfo convert( reqInfo.hasCommittedTxId() ? reqInfo.getCommittedTxId() : HdfsConstants.INVALID_TXID); } + + + @Override + public DoPreUpgradeResponseProto doPreUpgrade(RpcController controller, + DoPreUpgradeRequestProto request) throws ServiceException { + try { + impl.doPreUpgrade(convert(request.getJid())); + return DoPreUpgradeResponseProto.getDefaultInstance(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public DoUpgradeResponseProto doUpgrade(RpcController controller, + DoUpgradeRequestProto request) throws ServiceException { + try { + impl.doUpgrade(convert(request.getJid()), + PBHelper.convert(request.getSInfo())); + return DoUpgradeResponseProto.getDefaultInstance(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public DoFinalizeResponseProto doFinalize(RpcController controller, + DoFinalizeRequestProto request) throws ServiceException { + try { + impl.doFinalize(convert(request.getJid())); + return DoFinalizeResponseProto.getDefaultInstance(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public CanRollBackResponseProto canRollBack(RpcController controller, + CanRollBackRequestProto request) throws ServiceException { + try { + Boolean result = impl.canRollBack(convert(request.getJid()), + PBHelper.convert(request.getStorage()), + PBHelper.convert(request.getPrevStorage()), + request.getTargetLayoutVersion()); + return CanRollBackResponseProto.newBuilder() + .setCanRollBack(result) + .build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public DoRollbackResponseProto doRollback(RpcController controller, DoRollbackRequestProto request) + throws ServiceException { + try { + impl.doRollback(convert(request.getJid())); + return DoRollbackResponseProto.getDefaultInstance(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public GetJournalCTimeResponseProto getJournalCTime(RpcController controller, + GetJournalCTimeRequestProto request) throws ServiceException { + try { + Long resultCTime = impl.getJournalCTime(convert(request.getJid())); + return GetJournalCTimeResponseProto.newBuilder() + .setResultCTime(resultCTime) + .build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java index f111933842e..61274785ad9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java @@ -23,13 +23,23 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB; import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto; @@ -39,7 +49,6 @@ import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto; -import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto; @@ -47,6 +56,7 @@ import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.JournalProtocol; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.ipc.ProtobufHelper; @@ -277,4 +287,85 @@ public boolean isMethodSupported(String methodName) throws IOException { RPC.getProtocolVersion(QJournalProtocolPB.class), methodName); } + @Override + public void doPreUpgrade(String jid) throws IOException { + try { + rpcProxy.doPreUpgrade(NULL_CONTROLLER, + DoPreUpgradeRequestProto.newBuilder() + .setJid(convertJournalId(jid)) + .build()); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override + public void doUpgrade(String journalId, StorageInfo sInfo) throws IOException { + try { + rpcProxy.doUpgrade(NULL_CONTROLLER, + DoUpgradeRequestProto.newBuilder() + .setJid(convertJournalId(journalId)) + .setSInfo(PBHelper.convert(sInfo)) + .build()); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override + public void doFinalize(String jid) throws IOException { + try { + rpcProxy.doFinalize(NULL_CONTROLLER, + DoFinalizeRequestProto.newBuilder() + .setJid(convertJournalId(jid)) + .build()); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override + public Boolean canRollBack(String journalId, StorageInfo storage, + StorageInfo prevStorage, int targetLayoutVersion) throws IOException { + try { + CanRollBackResponseProto response = rpcProxy.canRollBack( + NULL_CONTROLLER, + CanRollBackRequestProto.newBuilder() + .setJid(convertJournalId(journalId)) + .setStorage(PBHelper.convert(storage)) + .setPrevStorage(PBHelper.convert(prevStorage)) + .setTargetLayoutVersion(targetLayoutVersion) + .build()); + return response.getCanRollBack(); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override + public void doRollback(String journalId) throws IOException { + try { + rpcProxy.doRollback(NULL_CONTROLLER, + DoRollbackRequestProto.newBuilder() + .setJid(convertJournalId(journalId)) + .build()); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override + public Long getJournalCTime(String journalId) throws IOException { + try { + GetJournalCTimeResponseProto response = rpcProxy.getJournalCTime( + NULL_CONTROLLER, + GetJournalCTimeRequestProto.newBuilder() + .setJid(convertJournalId(journalId)) + .build()); + return response.getResultCTime(); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java index 36135cba648..25716706932 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager; import org.apache.hadoop.hdfs.server.common.JspHelper; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.FileJournalManager; import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; import org.apache.hadoop.hdfs.server.namenode.GetImageServlet; @@ -139,20 +140,26 @@ private boolean checkRequestorOrSendError(Configuration conf, private boolean checkStorageInfoOrSendError(JNStorage storage, HttpServletRequest request, HttpServletResponse response) throws IOException { - String myStorageInfoString = storage.toColonSeparatedString(); + int myNsId = storage.getNamespaceID(); + String myClusterId = storage.getClusterID(); + String theirStorageInfoString = StringEscapeUtils.escapeHtml( request.getParameter(STORAGEINFO_PARAM)); - if (theirStorageInfoString != null - && !myStorageInfoString.equals(theirStorageInfoString)) { - String msg = "This node has storage info '" + myStorageInfoString - + "' but the requesting node expected '" - + theirStorageInfoString + "'"; - - response.sendError(HttpServletResponse.SC_FORBIDDEN, msg); - LOG.warn("Received an invalid request file transfer request from " + - request.getRemoteAddr() + ": " + msg); - return false; + if (theirStorageInfoString != null) { + int theirNsId = StorageInfo.getNsIdFromColonSeparatedString( + theirStorageInfoString); + String theirClusterId = StorageInfo.getClusterIdFromColonSeparatedString( + theirStorageInfoString); + if (myNsId != theirNsId || !myClusterId.equals(theirClusterId)) { + String msg = "This node has namespaceId '" + myNsId + " and clusterId '" + + myClusterId + "' but the requesting node expected '" + theirNsId + + "' and '" + theirClusterId + "'"; + response.sendError(HttpServletResponse.SC_FORBIDDEN, msg); + LOG.warn("Received an invalid request file transfer request from " + + request.getRemoteAddr() + ": " + msg); + return false; + } } return true; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java index 347ac53a1d8..e972fe03af1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java @@ -130,6 +130,10 @@ File getPaxosDir() { return new File(sd.getCurrentDir(), "paxos"); } + File getRoot() { + return sd.getRoot(); + } + /** * Remove any log files and associated paxos files which are older than * the given txid. @@ -182,12 +186,15 @@ void format(NamespaceInfo nsInfo) throws IOException { unlockAll(); sd.clearDirectory(); writeProperties(sd); + createPaxosDir(); + analyzeStorage(); + } + + void createPaxosDir() throws IOException { if (!getPaxosDir().mkdirs()) { throw new IOException("Could not create paxos dir: " + getPaxosDir()); } - analyzeStorage(); } - void analyzeStorage() throws IOException { this.state = sd.analyzeStorage(StartupOption.REGULAR, this); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java index cf83cef92c4..e1bb69d72cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java @@ -37,12 +37,14 @@ import org.apache.hadoop.hdfs.qjournal.protocol.JournalNotFormattedException; import org.apache.hadoop.hdfs.qjournal.protocol.JournalOutOfSyncException; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto; import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo; import org.apache.hadoop.hdfs.server.common.StorageErrorReporter; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; import org.apache.hadoop.hdfs.server.namenode.FileJournalManager; import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; @@ -73,7 +75,7 @@ * Each such journal is entirely independent despite being hosted by * the same JVM. */ -class Journal implements Closeable { +public class Journal implements Closeable { static final Log LOG = LogFactory.getLog(Journal.class); @@ -122,8 +124,8 @@ class Journal implements Closeable { */ private BestEffortLongFile committedTxnId; - private static final String LAST_PROMISED_FILENAME = "last-promised-epoch"; - private static final String LAST_WRITER_EPOCH = "last-writer-epoch"; + public static final String LAST_PROMISED_FILENAME = "last-promised-epoch"; + public static final String LAST_WRITER_EPOCH = "last-writer-epoch"; private static final String COMMITTED_TXID_FILENAME = "committed-txid"; private final FileJournalManager fjm; @@ -627,7 +629,7 @@ private void purgePaxosDecision(long segmentTxId) throws IOException { } /** - * @see QJournalProtocol#getEditLogManifest(String, long) + * @see QJournalProtocol#getEditLogManifest(String, long, boolean) */ public RemoteEditLogManifest getEditLogManifest(long sinceTxId, boolean inProgressOk) throws IOException { @@ -728,7 +730,7 @@ public synchronized PrepareRecoveryResponseProto prepareRecovery( } /** - * @see QJournalProtocol#acceptRecovery(RequestInfo, SegmentStateProto, URL) + * @see QJournalProtocol#acceptRecovery(RequestInfo, QJournalProtocolProtos.SegmentStateProto, URL) */ public synchronized void acceptRecovery(RequestInfo reqInfo, SegmentStateProto segment, URL fromUrl) @@ -980,4 +982,62 @@ private void persistPaxosData(long segmentTxId, } } } + + public synchronized void doPreUpgrade() throws IOException { + storage.getJournalManager().doPreUpgrade(); + } + + public synchronized void doUpgrade(StorageInfo sInfo) throws IOException { + long oldCTime = storage.getCTime(); + storage.cTime = sInfo.cTime; + int oldLV = storage.getLayoutVersion(); + storage.layoutVersion = sInfo.layoutVersion; + LOG.info("Starting upgrade of edits directory: " + + ".\n old LV = " + oldLV + + "; old CTime = " + oldCTime + + ".\n new LV = " + storage.getLayoutVersion() + + "; new CTime = " + storage.getCTime()); + storage.getJournalManager().doUpgrade(storage); + storage.createPaxosDir(); + + // Copy over the contents of the epoch data files to the new dir. + File currentDir = storage.getSingularStorageDir().getCurrentDir(); + File previousDir = storage.getSingularStorageDir().getPreviousDir(); + + PersistentLongFile prevLastPromisedEpoch = new PersistentLongFile( + new File(previousDir, LAST_PROMISED_FILENAME), 0); + PersistentLongFile prevLastWriterEpoch = new PersistentLongFile( + new File(previousDir, LAST_WRITER_EPOCH), 0); + + lastPromisedEpoch = new PersistentLongFile( + new File(currentDir, LAST_PROMISED_FILENAME), 0); + lastWriterEpoch = new PersistentLongFile( + new File(currentDir, LAST_WRITER_EPOCH), 0); + + lastPromisedEpoch.set(prevLastPromisedEpoch.get()); + lastWriterEpoch.set(prevLastWriterEpoch.get()); + } + + public synchronized void doFinalize() throws IOException { + LOG.info("Finalizing upgrade for journal " + + storage.getRoot() + "." + + (storage.getLayoutVersion()==0 ? "" : + "\n cur LV = " + storage.getLayoutVersion() + + "; cur CTime = " + storage.getCTime())); + storage.getJournalManager().doFinalize(); + } + + public Boolean canRollBack(StorageInfo storage, StorageInfo prevStorage, + int targetLayoutVersion) throws IOException { + return this.storage.getJournalManager().canRollBack(storage, prevStorage, + targetLayoutVersion); + } + + public void doRollback() throws IOException { + storage.getJournalManager().doRollback(); + } + + public Long getJournalCTime() throws IOException { + return storage.getJournalManager().getJournalCTime(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java index c43edb9d84c..9f1665b4d16 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager; import org.apache.hadoop.hdfs.server.common.StorageErrorReporter; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.source.JvmMetrics; @@ -285,4 +286,31 @@ public static void main(String[] args) throws Exception { StringUtils.startupShutdownMessage(JournalNode.class, args, LOG); System.exit(ToolRunner.run(new JournalNode(), args)); } + + public void doPreUpgrade(String journalId) throws IOException { + getOrCreateJournal(journalId).doPreUpgrade(); + } + + public void doUpgrade(String journalId, StorageInfo sInfo) throws IOException { + getOrCreateJournal(journalId).doUpgrade(sInfo); + } + + public void doFinalize(String journalId) throws IOException { + getOrCreateJournal(journalId).doFinalize(); + } + + public Boolean canRollBack(String journalId, StorageInfo storage, + StorageInfo prevStorage, int targetLayoutVersion) throws IOException { + return getOrCreateJournal(journalId).canRollBack(storage, prevStorage, + targetLayoutVersion); + } + + public void doRollback(String journalId) throws IOException { + getOrCreateJournal(journalId).doRollback(); + } + + public Long getJournalCTime(String journalId) throws IOException { + return getOrCreateJournal(journalId).getJournalCTime(); + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java index 5749cc17e47..2dbda7e223d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo; import org.apache.hadoop.hdfs.qjournal.protocolPB.QJournalProtocolPB; import org.apache.hadoop.hdfs.qjournal.protocolPB.QJournalProtocolServerSideTranslatorPB; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.ipc.ProtobufRpcEngine; @@ -205,4 +206,35 @@ public void acceptRecovery(RequestInfo reqInfo, SegmentStateProto log, .acceptRecovery(reqInfo, log, fromUrl); } + @Override + public void doPreUpgrade(String journalId) throws IOException { + jn.doPreUpgrade(journalId); + } + + @Override + public void doUpgrade(String journalId, StorageInfo sInfo) throws IOException { + jn.doUpgrade(journalId, sInfo); + } + + @Override + public void doFinalize(String journalId) throws IOException { + jn.doFinalize(journalId); + } + + @Override + public Boolean canRollBack(String journalId, StorageInfo storage, + StorageInfo prevStorage, int targetLayoutVersion) + throws IOException { + return jn.canRollBack(journalId, storage, prevStorage, targetLayoutVersion); + } + + @Override + public void doRollback(String journalId) throws IOException { + jn.doRollback(journalId); + } + + @Override + public Long getJournalCTime(String journalId) throws IOException { + return jn.getJournalCTime(journalId); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java index 3d661f1b3a6..f6189b8fe9e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdfs.server.common; import java.io.File; -import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.RandomAccessFile; @@ -26,26 +25,23 @@ import java.nio.channels.FileLock; import java.nio.channels.OverlappingFileLockException; import java.util.ArrayList; -import java.util.List; import java.util.Iterator; +import java.util.List; import java.util.Properties; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.protocol.LayoutVersion; -import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.VersionInfo; -import com.google.common.base.Preconditions; - import com.google.common.base.Charsets; +import com.google.common.base.Preconditions; @@ -82,7 +78,6 @@ public abstract class Storage extends StorageInfo { public static final int[] LAYOUT_VERSIONS_203 = {-19, -31}; public static final String STORAGE_FILE_LOCK = "in_use.lock"; - protected static final String STORAGE_FILE_VERSION = "VERSION"; public static final String STORAGE_DIR_CURRENT = "current"; public static final String STORAGE_DIR_PREVIOUS = "previous"; public static final String STORAGE_TMP_REMOVED = "removed.tmp"; @@ -126,22 +121,24 @@ public interface StorageDirType { private class DirIterator implements Iterator { StorageDirType dirType; + boolean includeShared; int prevIndex; // for remove() int nextIndex; // for next() - DirIterator(StorageDirType dirType) { + DirIterator(StorageDirType dirType, boolean includeShared) { this.dirType = dirType; this.nextIndex = 0; this.prevIndex = 0; + this.includeShared = includeShared; } @Override public boolean hasNext() { if (storageDirs.isEmpty() || nextIndex >= storageDirs.size()) return false; - if (dirType != null) { + if (dirType != null || !includeShared) { while (nextIndex < storageDirs.size()) { - if (getStorageDir(nextIndex).getStorageDirType().isOfType(dirType)) + if (shouldReturnNextDir()) break; nextIndex++; } @@ -156,9 +153,9 @@ public StorageDirectory next() { StorageDirectory sd = getStorageDir(nextIndex); prevIndex = nextIndex; nextIndex++; - if (dirType != null) { + if (dirType != null || !includeShared) { while (nextIndex < storageDirs.size()) { - if (getStorageDir(nextIndex).getStorageDirType().isOfType(dirType)) + if (shouldReturnNextDir()) break; nextIndex++; } @@ -172,6 +169,12 @@ public void remove() { storageDirs.remove(prevIndex); // remove last returned element hasNext(); // reset nextIndex to correct place } + + private boolean shouldReturnNextDir() { + StorageDirectory sd = getStorageDir(nextIndex); + return (dirType == null || sd.getStorageDirType().isOfType(dirType)) && + (includeShared || !sd.isShared()); + } } /** @@ -203,7 +206,27 @@ public Iterator dirIterator() { * them via the Iterator */ public Iterator dirIterator(StorageDirType dirType) { - return new DirIterator(dirType); + return dirIterator(dirType, true); + } + + /** + * Return all entries in storageDirs, potentially excluding shared dirs. + * @param includeShared whether or not to include shared dirs. + * @return an iterator over the configured storage dirs. + */ + public Iterator dirIterator(boolean includeShared) { + return dirIterator(null, includeShared); + } + + /** + * @param dirType all entries will be of this type of dir + * @param includeShared true to include any shared directories, + * false otherwise + * @return an iterator over the configured storage dirs. + */ + public Iterator dirIterator(StorageDirType dirType, + boolean includeShared) { + return new DirIterator(dirType, includeShared); } public Iterable dirIterable(final StorageDirType dirType) { @@ -233,7 +256,9 @@ public String listStorageDirectories() { @InterfaceAudience.Private public static class StorageDirectory implements FormatConfirmable { final File root; // root directory - final boolean useLock; // flag to enable storage lock + // whether or not this dir is shared between two separate NNs for HA, or + // between multiple block pools in the case of federation. + final boolean isShared; final StorageDirType dirType; // storage dir type FileLock lock; // storage lock @@ -241,11 +266,11 @@ public static class StorageDirectory implements FormatConfirmable { public StorageDirectory(File dir) { // default dirType is null - this(dir, null, true); + this(dir, null, false); } public StorageDirectory(File dir, StorageDirType dirType) { - this(dir, dirType, true); + this(dir, dirType, false); } public void setStorageUuid(String storageUuid) { @@ -260,14 +285,14 @@ public String getStorageUuid() { * Constructor * @param dir directory corresponding to the storage * @param dirType storage directory type - * @param useLock true - enables locking on the storage directory and false - * disables locking + * @param isShared whether or not this dir is shared between two NNs. true + * disables locking on the storage directory, false enables locking */ - public StorageDirectory(File dir, StorageDirType dirType, boolean useLock) { + public StorageDirectory(File dir, StorageDirType dirType, boolean isShared) { this.root = dir; this.lock = null; this.dirType = dirType; - this.useLock = useLock; + this.isShared = isShared; } /** @@ -621,6 +646,10 @@ public boolean hasSomeData() throws IOException { return true; } + + public boolean isShared() { + return isShared; + } /** @@ -635,7 +664,7 @@ public boolean hasSomeData() throws IOException { * @throws IOException if locking fails */ public void lock() throws IOException { - if (!useLock) { + if (isShared()) { LOG.info("Locking is disabled"); return; } @@ -889,22 +918,6 @@ public interface FormatConfirmable { public String toString(); } - /** - * Get common storage fields. - * Should be overloaded if additional fields need to be get. - * - * @param props - * @throws IOException - */ - protected void setFieldsFromProperties( - Properties props, StorageDirectory sd) throws IOException { - setLayoutVersion(props, sd); - setNamespaceID(props, sd); - setStorageType(props, sd); - setcTime(props, sd); - setClusterId(props, layoutVersion, sd); - } - /** * Set common storage fields into the given properties object. * Should be overloaded if additional fields need to be set. @@ -923,22 +936,29 @@ protected void setPropertiesFromFields(Properties props, } props.setProperty("cTime", String.valueOf(cTime)); } - + /** - * Read properties from the VERSION file in the given storage directory. + * Get common storage fields. + * Should be overloaded if additional fields need to be get. + * + * @param props + * @throws IOException */ - public void readProperties(StorageDirectory sd) throws IOException { - Properties props = readPropertiesFile(sd.getVersionFile()); - setFieldsFromProperties(props, sd); + protected void setFieldsFromProperties( + Properties props, StorageDirectory sd) throws IOException { + super.setFieldsFromProperties(props, sd); + setStorageType(props, sd); } - - /** - * Read properties from the the previous/VERSION file in the given storage directory. - */ - public void readPreviousVersionProperties(StorageDirectory sd) - throws IOException { - Properties props = readPropertiesFile(sd.getPreviousVersionFile()); - setFieldsFromProperties(props, sd); + + /** Validate and set storage type from {@link Properties}*/ + protected void setStorageType(Properties props, StorageDirectory sd) + throws InconsistentFSStateException { + NodeType type = NodeType.valueOf(getProperty(props, sd, "storageType")); + if (!storageType.equals(type)) { + throw new InconsistentFSStateException(sd.root, + "node type is incompatible with others."); + } + storageType = type; } /** @@ -947,10 +967,15 @@ public void readPreviousVersionProperties(StorageDirectory sd) public void writeProperties(StorageDirectory sd) throws IOException { writeProperties(sd.getVersionFile(), sd); } - + public void writeProperties(File to, StorageDirectory sd) throws IOException { Properties props = new Properties(); setPropertiesFromFields(props, sd); + writeProperties(to, sd, props); + } + + public static void writeProperties(File to, StorageDirectory sd, + Properties props) throws IOException { RandomAccessFile file = new RandomAccessFile(to, "rws"); FileOutputStream out = null; try { @@ -977,23 +1002,6 @@ public void writeProperties(File to, StorageDirectory sd) throws IOException { file.close(); } } - - public static Properties readPropertiesFile(File from) throws IOException { - RandomAccessFile file = new RandomAccessFile(from, "rws"); - FileInputStream in = null; - Properties props = new Properties(); - try { - in = new FileInputStream(file.getFD()); - file.seek(0); - props.load(in); - } finally { - if (in != null) { - in.close(); - } - file.close(); - } - return props; - } public static void rename(File from, File to) throws IOException { if (!from.renameTo(to)) @@ -1044,69 +1052,6 @@ public static String getRegistrationID(StorageInfo storage) { + "-" + Long.toString(storage.getCTime()); } - String getProperty(Properties props, StorageDirectory sd, - String name) throws InconsistentFSStateException { - String property = props.getProperty(name); - if (property == null) { - throw new InconsistentFSStateException(sd.root, "file " - + STORAGE_FILE_VERSION + " has " + name + " missing."); - } - return property; - } - - /** Validate and set storage type from {@link Properties}*/ - protected void setStorageType(Properties props, StorageDirectory sd) - throws InconsistentFSStateException { - NodeType type = NodeType.valueOf(getProperty(props, sd, "storageType")); - if (!storageType.equals(type)) { - throw new InconsistentFSStateException(sd.root, - "node type is incompatible with others."); - } - storageType = type; - } - - /** Validate and set ctime from {@link Properties}*/ - protected void setcTime(Properties props, StorageDirectory sd) - throws InconsistentFSStateException { - cTime = Long.parseLong(getProperty(props, sd, "cTime")); - } - - /** Validate and set clusterId from {@link Properties}*/ - protected void setClusterId(Properties props, int layoutVersion, - StorageDirectory sd) throws InconsistentFSStateException { - // Set cluster ID in version that supports federation - if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) { - String cid = getProperty(props, sd, "clusterID"); - if (!(clusterID.equals("") || cid.equals("") || clusterID.equals(cid))) { - throw new InconsistentFSStateException(sd.getRoot(), - "cluster Id is incompatible with others."); - } - clusterID = cid; - } - } - - /** Validate and set layout version from {@link Properties}*/ - protected void setLayoutVersion(Properties props, StorageDirectory sd) - throws IncorrectVersionException, InconsistentFSStateException { - int lv = Integer.parseInt(getProperty(props, sd, "layoutVersion")); - if (lv < HdfsConstants.LAYOUT_VERSION) { // future version - throw new IncorrectVersionException(lv, "storage directory " - + sd.root.getAbsolutePath()); - } - layoutVersion = lv; - } - - /** Validate and set namespaceID version from {@link Properties}*/ - protected void setNamespaceID(Properties props, StorageDirectory sd) - throws InconsistentFSStateException { - int nsId = Integer.parseInt(getProperty(props, sd, "namespaceID")); - if (namespaceID != 0 && nsId != 0 && namespaceID != nsId) { - throw new InconsistentFSStateException(sd.root, - "namespaceID is incompatible with others."); - } - namespaceID = nsId; - } - public static boolean is203LayoutVersion(int layoutVersion) { for (int lv203 : LAYOUT_VERSIONS_203) { if (lv203 == layoutVersion) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java index 1dc83405303..59f3d994e6c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java @@ -17,9 +17,17 @@ */ package org.apache.hadoop.hdfs.server.common; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.util.Properties; + import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; +import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import com.google.common.base.Joiner; @@ -34,6 +42,8 @@ public class StorageInfo { public int namespaceID; // id of the file system public String clusterID; // id of the cluster public long cTime; // creation time of the file system state + + protected static final String STORAGE_FILE_VERSION = "VERSION"; public StorageInfo () { this(0, 0, "", 0L); @@ -96,4 +106,113 @@ public String toColonSeparatedString() { return Joiner.on(":").join( layoutVersion, namespaceID, cTime, clusterID); } + + public static int getNsIdFromColonSeparatedString(String in) { + return Integer.parseInt(in.split(":")[1]); + } + + public static String getClusterIdFromColonSeparatedString(String in) { + return in.split(":")[3]; + } + + /** + * Read properties from the VERSION file in the given storage directory. + */ + public void readProperties(StorageDirectory sd) throws IOException { + Properties props = readPropertiesFile(sd.getVersionFile()); + setFieldsFromProperties(props, sd); + } + + /** + * Read properties from the the previous/VERSION file in the given storage directory. + */ + public void readPreviousVersionProperties(StorageDirectory sd) + throws IOException { + Properties props = readPropertiesFile(sd.getPreviousVersionFile()); + setFieldsFromProperties(props, sd); + } + + /** + * Get common storage fields. + * Should be overloaded if additional fields need to be get. + * + * @param props + * @throws IOException + */ + protected void setFieldsFromProperties( + Properties props, StorageDirectory sd) throws IOException { + setLayoutVersion(props, sd); + setNamespaceID(props, sd); + setcTime(props, sd); + setClusterId(props, layoutVersion, sd); + } + + /** Validate and set ctime from {@link Properties}*/ + protected void setcTime(Properties props, StorageDirectory sd) + throws InconsistentFSStateException { + cTime = Long.parseLong(getProperty(props, sd, "cTime")); + } + + /** Validate and set clusterId from {@link Properties}*/ + protected void setClusterId(Properties props, int layoutVersion, + StorageDirectory sd) throws InconsistentFSStateException { + // Set cluster ID in version that supports federation + if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) { + String cid = getProperty(props, sd, "clusterID"); + if (!(clusterID.equals("") || cid.equals("") || clusterID.equals(cid))) { + throw new InconsistentFSStateException(sd.getRoot(), + "cluster Id is incompatible with others."); + } + clusterID = cid; + } + } + + /** Validate and set layout version from {@link Properties}*/ + protected void setLayoutVersion(Properties props, StorageDirectory sd) + throws IncorrectVersionException, InconsistentFSStateException { + int lv = Integer.parseInt(getProperty(props, sd, "layoutVersion")); + if (lv < HdfsConstants.LAYOUT_VERSION) { // future version + throw new IncorrectVersionException(lv, "storage directory " + + sd.root.getAbsolutePath()); + } + layoutVersion = lv; + } + + /** Validate and set namespaceID version from {@link Properties}*/ + protected void setNamespaceID(Properties props, StorageDirectory sd) + throws InconsistentFSStateException { + int nsId = Integer.parseInt(getProperty(props, sd, "namespaceID")); + if (namespaceID != 0 && nsId != 0 && namespaceID != nsId) { + throw new InconsistentFSStateException(sd.root, + "namespaceID is incompatible with others."); + } + namespaceID = nsId; + } + + static String getProperty(Properties props, StorageDirectory sd, + String name) throws InconsistentFSStateException { + String property = props.getProperty(name); + if (property == null) { + throw new InconsistentFSStateException(sd.root, "file " + + STORAGE_FILE_VERSION + " has " + name + " missing."); + } + return property; + } + + public static Properties readPropertiesFile(File from) throws IOException { + RandomAccessFile file = new RandomAccessFile(from, "rws"); + FileInputStream in = null; + Properties props = new Properties(); + try { + in = new FileInputStream(file.getFD()); + file.seek(0); + props.load(in); + } finally { + if (in != null) { + in.close(); + } + file.close(); + } + return props; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java index 8558e95d3f6..2497621327f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java @@ -103,7 +103,7 @@ void recoverTransitionRead(DataNode datanode, NamespaceInfo nsInfo, dataDirs.size()); for (Iterator it = dataDirs.iterator(); it.hasNext();) { File dataDir = it.next(); - StorageDirectory sd = new StorageDirectory(dataDir, null, false); + StorageDirectory sd = new StorageDirectory(dataDir, null, true); StorageState curState; try { curState = sd.analyzeStorage(startOpt, this); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java index 5420b129cab..2547d880770 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java @@ -20,6 +20,8 @@ import java.io.IOException; import java.util.Collection; +import org.apache.hadoop.hdfs.server.common.Storage; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.JournalInfo; import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; @@ -97,4 +99,35 @@ public boolean matchesRegistration(NamenodeRegistration bnReg) { public String toString() { return "BackupJournalManager"; } + + @Override + public void doPreUpgrade() throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void doUpgrade(Storage storage) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void doFinalize() throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean canRollBack(StorageInfo storage, StorageInfo prevStorage, + int targetLayoutVersion) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void doRollback() throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public long getJournalCTime() throws IOException { + throw new UnsupportedOperationException(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java index 5e7740c1791..6a1afb7929d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB; import org.apache.hadoop.hdfs.protocolPB.JournalProtocolServerSideTranslatorPB; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.namenode.ha.HAState; import org.apache.hadoop.hdfs.server.protocol.FenceResponse; @@ -367,7 +368,7 @@ private void registerWith(NamespaceInfo nsInfo) throws IOException { } else { nsInfo.validateStorage(storage); } - bnImage.initEditLog(); + bnImage.initEditLog(StartupOption.REGULAR); setRegistration(); NamenodeRegistration nnReg = null; while(!isStopRequested()) { @@ -423,7 +424,8 @@ protected String getNameServiceId(Configuration conf) { return DFSUtil.getBackupNameServiceId(conf); } - protected HAState createHAState() { + @Override + protected HAState createHAState(StartupOption startOpt) { return new BackupState(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index d98dad196a8..d43a55bfb00 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; +import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddBlockOp; @@ -252,10 +253,12 @@ private synchronized void initJournals(List dirs) { if (u.getScheme().equals(NNStorage.LOCAL_URI_SCHEME)) { StorageDirectory sd = storage.getStorageDirectory(u); if (sd != null) { - journalSet.add(new FileJournalManager(conf, sd, storage), required); + journalSet.add(new FileJournalManager(conf, sd, storage), + required, sharedEditsDirs.contains(u)); } } else { - journalSet.add(createJournal(u), required); + journalSet.add(createJournal(u), required, + sharedEditsDirs.contains(u)); } } @@ -1339,6 +1342,58 @@ synchronized void recoverUnclosedStreams() { } } + public long getSharedLogCTime() throws IOException { + for (JournalAndStream jas : journalSet.getAllJournalStreams()) { + if (jas.isShared()) { + return jas.getManager().getJournalCTime(); + } + } + throw new IOException("No shared log found."); + } + + public synchronized void doPreUpgradeOfSharedLog() throws IOException { + for (JournalAndStream jas : journalSet.getAllJournalStreams()) { + if (jas.isShared()) { + jas.getManager().doPreUpgrade(); + } + } + } + + public synchronized void doUpgradeOfSharedLog() throws IOException { + for (JournalAndStream jas : journalSet.getAllJournalStreams()) { + if (jas.isShared()) { + jas.getManager().doUpgrade(storage); + } + } + } + + public synchronized void doFinalizeOfSharedLog() throws IOException { + for (JournalAndStream jas : journalSet.getAllJournalStreams()) { + if (jas.isShared()) { + jas.getManager().doFinalize(); + } + } + } + + public synchronized boolean canRollBackSharedLog(Storage prevStorage, + int targetLayoutVersion) throws IOException { + for (JournalAndStream jas : journalSet.getAllJournalStreams()) { + if (jas.isShared()) { + return jas.getManager().canRollBack(storage, prevStorage, + targetLayoutVersion); + } + } + throw new IOException("No shared log found."); + } + + public synchronized void doRollback() throws IOException { + for (JournalAndStream jas : journalSet.getAllJournalStreams()) { + if (jas.isShared()) { + jas.getManager().doRollback(); + } + } + } + @Override public void selectInputStreams(Collection streams, long fromTxId, boolean inProgressOk) throws IOException { @@ -1470,4 +1525,5 @@ private JournalManager createJournal(URI uri) { + uri, e); } } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index cc4ca0c7772..166ffb2fd9b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -178,7 +178,8 @@ boolean confirmFormat(boolean force, boolean interactive) throws IOException { * @return true if the image needs to be saved or false otherwise */ boolean recoverTransitionRead(StartupOption startOpt, FSNamesystem target, - MetaRecoveryContext recovery) throws IOException { + MetaRecoveryContext recovery) + throws IOException { assert startOpt != StartupOption.FORMAT : "NameNode formatting should be performed before reading the image"; @@ -252,14 +253,14 @@ boolean recoverTransitionRead(StartupOption startOpt, FSNamesystem target, doImportCheckpoint(target); return false; // import checkpoint saved image already case ROLLBACK: - doRollback(); - break; + throw new AssertionError("Rollback is now a standalone command, " + + "NameNode should not be starting with this option."); case REGULAR: default: // just load the image } - return loadFSImage(target, recovery); + return loadFSImage(target, recovery, startOpt); } /** @@ -272,17 +273,15 @@ boolean recoverTransitionRead(StartupOption startOpt, FSNamesystem target, private boolean recoverStorageDirs(StartupOption startOpt, Map dataDirStates) throws IOException { boolean isFormatted = false; + // This loop needs to be over all storage dirs, even shared dirs, to make + // sure that we properly examine their state, but we make sure we don't + // mutate the shared dir below in the actual loop. for (Iterator it = storage.dirIterator(); it.hasNext();) { StorageDirectory sd = it.next(); StorageState curState; try { curState = sd.analyzeStorage(startOpt, storage); - String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf); - if (curState != StorageState.NORMAL && HAUtil.isHAEnabled(conf, nameserviceId)) { - throw new IOException("Cannot start an HA namenode with name dirs " + - "that need recovery. Dir: " + sd + " state: " + curState); - } // sd is locked but not opened switch(curState) { case NON_EXISTENT: @@ -294,7 +293,7 @@ private boolean recoverStorageDirs(StartupOption startOpt, case NORMAL: break; default: // recovery is possible - sd.doRecover(curState); + sd.doRecover(curState); } if (curState != StorageState.NOT_FORMATTED && startOpt != StartupOption.ROLLBACK) { @@ -315,10 +314,10 @@ private boolean recoverStorageDirs(StartupOption startOpt, return isFormatted; } - private void doUpgrade(FSNamesystem target) throws IOException { + void doUpgrade(FSNamesystem target) throws IOException { // Upgrade is allowed only if there are - // no previous fs states in any of the directories - for (Iterator it = storage.dirIterator(); it.hasNext();) { + // no previous fs states in any of the local directories + for (Iterator it = storage.dirIterator(false); it.hasNext();) { StorageDirectory sd = it.next(); if (sd.getPreviousDir().exists()) throw new InconsistentFSStateException(sd.getRoot(), @@ -327,9 +326,9 @@ private void doUpgrade(FSNamesystem target) throws IOException { } // load the latest image - this.loadFSImage(target, null); - // Do upgrade for each directory + this.loadFSImage(target, null, StartupOption.UPGRADE); + long oldCTime = storage.getCTime(); storage.cTime = now(); // generate new cTime for the state int oldLV = storage.getLayoutVersion(); @@ -337,28 +336,17 @@ private void doUpgrade(FSNamesystem target) throws IOException { List errorSDs = Collections.synchronizedList(new ArrayList()); - for (Iterator it = storage.dirIterator(); it.hasNext();) { + assert !editLog.isSegmentOpen() : "Edits log must not be open."; + LOG.info("Starting upgrade of local storage directories." + + "\n old LV = " + oldLV + + "; old CTime = " + oldCTime + + ".\n new LV = " + storage.getLayoutVersion() + + "; new CTime = " + storage.getCTime()); + // Do upgrade for each directory + for (Iterator it = storage.dirIterator(false); it.hasNext();) { StorageDirectory sd = it.next(); - LOG.info("Starting upgrade of image directory " + sd.getRoot() - + ".\n old LV = " + oldLV - + "; old CTime = " + oldCTime - + ".\n new LV = " + storage.getLayoutVersion() - + "; new CTime = " + storage.getCTime()); try { - File curDir = sd.getCurrentDir(); - File prevDir = sd.getPreviousDir(); - File tmpDir = sd.getPreviousTmp(); - assert curDir.exists() : "Current directory must exist."; - assert !prevDir.exists() : "previous directory must not exist."; - assert !tmpDir.exists() : "previous.tmp directory must not exist."; - assert !editLog.isSegmentOpen() : "Edits log must not be open."; - - // rename current to tmp - NNStorage.rename(curDir, tmpDir); - - if (!curDir.mkdir()) { - throw new IOException("Cannot create directory " + curDir); - } + NNUpgradeUtil.doPreUpgrade(sd); } catch (Exception e) { LOG.error("Failed to move aside pre-upgrade storage " + "in image directory " + sd.getRoot(), e); @@ -366,41 +354,38 @@ private void doUpgrade(FSNamesystem target) throws IOException { continue; } } + if (target.isHaEnabled()) { + editLog.doPreUpgradeOfSharedLog(); + } storage.reportErrorsOnDirectories(errorSDs); errorSDs.clear(); saveFSImageInAllDirs(target, editLog.getLastWrittenTxId()); - for (Iterator it = storage.dirIterator(); it.hasNext();) { + for (Iterator it = storage.dirIterator(false); it.hasNext();) { StorageDirectory sd = it.next(); try { - // Write the version file, since saveFsImage above only makes the - // fsimage_, and the directory is otherwise empty. - storage.writeProperties(sd); - - File prevDir = sd.getPreviousDir(); - File tmpDir = sd.getPreviousTmp(); - // rename tmp to previous - NNStorage.rename(tmpDir, prevDir); + NNUpgradeUtil.doUpgrade(sd, storage); } catch (IOException ioe) { - LOG.error("Unable to rename temp to previous for " + sd.getRoot(), ioe); errorSDs.add(sd); continue; } - LOG.info("Upgrade of " + sd.getRoot() + " is complete."); + } + if (target.isHaEnabled()) { + editLog.doUpgradeOfSharedLog(); } storage.reportErrorsOnDirectories(errorSDs); - + isUpgradeFinalized = false; if (!storage.getRemovedStorageDirs().isEmpty()) { - //during upgrade, it's a fatal error to fail any storage directory + // during upgrade, it's a fatal error to fail any storage directory throw new IOException("Upgrade failed in " + storage.getRemovedStorageDirs().size() + " storage directory(ies), previously logged."); } } - private void doRollback() throws IOException { + void doRollback(FSNamesystem fsns) throws IOException { // Rollback is allowed only if there is // a previous fs states in at least one of the storage directories. // Directories that don't have previous state do not rollback @@ -408,85 +393,46 @@ private void doRollback() throws IOException { FSImage prevState = new FSImage(conf); try { prevState.getStorage().layoutVersion = HdfsConstants.LAYOUT_VERSION; - for (Iterator it = storage.dirIterator(); it.hasNext();) { + for (Iterator it = storage.dirIterator(false); it.hasNext();) { StorageDirectory sd = it.next(); - File prevDir = sd.getPreviousDir(); - if (!prevDir.exists()) { // use current directory then - LOG.info("Storage directory " + sd.getRoot() - + " does not contain previous fs state."); - // read and verify consistency with other directories - storage.readProperties(sd); + if (!NNUpgradeUtil.canRollBack(sd, storage, prevState.getStorage(), + HdfsConstants.LAYOUT_VERSION)) { continue; } - - // read and verify consistency of the prev dir - prevState.getStorage().readPreviousVersionProperties(sd); - - if (prevState.getLayoutVersion() != HdfsConstants.LAYOUT_VERSION) { - throw new IOException( - "Cannot rollback to storage version " + - prevState.getLayoutVersion() + - " using this version of the NameNode, which uses storage version " + - HdfsConstants.LAYOUT_VERSION + ". " + - "Please use the previous version of HDFS to perform the rollback."); - } canRollback = true; } + + if (fsns.isHaEnabled()) { + // If HA is enabled, check if the shared log can be rolled back as well. + editLog.initJournalsForWrite(); + canRollback |= editLog.canRollBackSharedLog(prevState.getStorage(), + HdfsConstants.LAYOUT_VERSION); + } + if (!canRollback) throw new IOException("Cannot rollback. None of the storage " + "directories contain previous fs state."); - + // Now that we know all directories are going to be consistent // Do rollback for each directory containing previous state - for (Iterator it = storage.dirIterator(); it.hasNext();) { + for (Iterator it = storage.dirIterator(false); it.hasNext();) { StorageDirectory sd = it.next(); - File prevDir = sd.getPreviousDir(); - if (!prevDir.exists()) - continue; - LOG.info("Rolling back storage directory " + sd.getRoot() - + ".\n new LV = " + prevState.getStorage().getLayoutVersion() - + "; new CTime = " + prevState.getStorage().getCTime()); - File tmpDir = sd.getRemovedTmp(); - assert !tmpDir.exists() : "removed.tmp directory must not exist."; - // rename current to tmp - File curDir = sd.getCurrentDir(); - assert curDir.exists() : "Current directory must exist."; - NNStorage.rename(curDir, tmpDir); - // rename previous to current - NNStorage.rename(prevDir, curDir); - - // delete tmp dir - NNStorage.deleteDir(tmpDir); - LOG.info("Rollback of " + sd.getRoot()+ " is complete."); + + ".\n new LV = " + prevState.getStorage().getLayoutVersion() + + "; new CTime = " + prevState.getStorage().getCTime()); + NNUpgradeUtil.doRollBack(sd); } + if (fsns.isHaEnabled()) { + // If HA is enabled, try to roll back the shared log as well. + editLog.doRollback(); + } + isUpgradeFinalized = true; } finally { prevState.close(); } } - private void doFinalize(StorageDirectory sd) throws IOException { - File prevDir = sd.getPreviousDir(); - if (!prevDir.exists()) { // already discarded - LOG.info("Directory " + prevDir + " does not exist."); - LOG.info("Finalize upgrade for " + sd.getRoot()+ " is not required."); - return; - } - LOG.info("Finalizing upgrade for storage directory " - + sd.getRoot() + "." - + (storage.getLayoutVersion()==0 ? "" : - "\n cur LV = " + storage.getLayoutVersion() - + "; cur CTime = " + storage.getCTime())); - assert sd.getCurrentDir().exists() : "Current directory must exist."; - final File tmpDir = sd.getFinalizedTmp(); - // rename previous to tmp and remove - NNStorage.rename(prevDir, tmpDir); - NNStorage.deleteDir(tmpDir); - isUpgradeFinalized = true; - LOG.info("Finalize upgrade for " + sd.getRoot()+ " is complete."); - } - /** * Load image from a checkpoint directory and save it into the current one. * @param target the NameSystem to import into @@ -521,7 +467,7 @@ void doImportCheckpoint(FSNamesystem target) throws IOException { // return back the real image realImage.getStorage().setStorageInfo(ckptImage.getStorage()); realImage.getEditLog().setNextTxId(ckptImage.getEditLog().getLastWrittenTxId()+1); - realImage.initEditLog(); + realImage.initEditLog(StartupOption.IMPORT); target.dir.fsImage = realImage; realImage.getStorage().setBlockPoolID(ckptImage.getBlockPoolID()); @@ -530,12 +476,23 @@ void doImportCheckpoint(FSNamesystem target) throws IOException { saveNamespace(target); getStorage().writeAll(); } - - void finalizeUpgrade() throws IOException { - for (Iterator it = storage.dirIterator(); it.hasNext();) { + + void finalizeUpgrade(boolean finalizeEditLog) throws IOException { + LOG.info("Finalizing upgrade for local dirs. " + + (storage.getLayoutVersion() == 0 ? "" : + "\n cur LV = " + storage.getLayoutVersion() + + "; cur CTime = " + storage.getCTime())); + for (Iterator it = storage.dirIterator(false); it.hasNext();) { StorageDirectory sd = it.next(); - doFinalize(sd); + NNUpgradeUtil.doFinalize(sd); } + if (finalizeEditLog) { + // We only do this in the case that HA is enabled and we're active. In any + // other case the NN will have done the upgrade of the edits directories + // already by virtue of the fact that they're local. + editLog.doFinalizeOfSharedLog(); + } + isUpgradeFinalized = true; } boolean isUpgradeFinalized() { @@ -582,8 +539,8 @@ void reloadFromImageFile(File file, FSNamesystem target) throws IOException { * @return whether the image should be saved * @throws IOException */ - boolean loadFSImage(FSNamesystem target, MetaRecoveryContext recovery) - throws IOException { + boolean loadFSImage(FSNamesystem target, MetaRecoveryContext recovery, + StartupOption startOpt) throws IOException { FSImageStorageInspector inspector = storage.readAndInspectDirs(); FSImageFile imageFile = null; @@ -600,7 +557,7 @@ boolean loadFSImage(FSNamesystem target, MetaRecoveryContext recovery) Iterable editStreams = null; - initEditLog(); + initEditLog(startOpt); if (LayoutVersion.supports(Feature.TXID_BASED_LAYOUT, getLayoutVersion())) { @@ -682,14 +639,30 @@ void loadFSImageFile(FSNamesystem target, MetaRecoveryContext recovery, } } - public void initEditLog() { + public void initEditLog(StartupOption startOpt) throws IOException { Preconditions.checkState(getNamespaceID() != 0, "Must know namespace ID before initting edit log"); String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf); if (!HAUtil.isHAEnabled(conf, nameserviceId)) { + // If this NN is not HA editLog.initJournalsForWrite(); editLog.recoverUnclosedStreams(); + } else if (HAUtil.isHAEnabled(conf, nameserviceId) && + startOpt == StartupOption.UPGRADE) { + // This NN is HA, but we're doing an upgrade so init the edit log for + // write. + editLog.initJournalsForWrite(); + long sharedLogCTime = editLog.getSharedLogCTime(); + if (this.storage.getCTime() < sharedLogCTime) { + throw new IOException("It looks like the shared log is already " + + "being upgraded but this NN has not been upgraded yet. You " + + "should restart this NameNode with the '" + + StartupOption.BOOTSTRAPSTANDBY.getName() + "' option to bring " + + "this NN in sync with the other."); + } + editLog.recoverUnclosedStreams(); } else { + // This NN is HA and we're not doing an upgrade. editLog.initSharedJournalsForRead(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 537aa72b279..f1bf5d5d8c7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -549,6 +549,10 @@ LeaseManager getLeaseManager() { return leaseManager; } + boolean isHaEnabled() { + return haEnabled; + } + /** * Check the supplied configuration for correctness. * @param conf Supplies the configuration to validate. @@ -878,7 +882,7 @@ void loadFSImage(StartupOption startOpt, FSImage fsImage, boolean haEnabled) } // This will start a new log segment and write to the seen_txid file, so // we shouldn't do it when coming up in standby state - if (!haEnabled) { + if (!haEnabled || (haEnabled && startOpt == StartupOption.UPGRADE)) { fsImage.openEditLogForWrite(); } success = true; @@ -1005,6 +1009,7 @@ void startActiveServices() throws IOException { dir.fsImage.editLog.openForWrite(); } + if (haEnabled) { // Renew all of the leases before becoming active. // This is because, while we were in standby mode, @@ -1031,14 +1036,17 @@ void startActiveServices() throws IOException { } } + private boolean inActiveState() { + return haContext != null && + haContext.getState().getServiceState() == HAServiceState.ACTIVE; + } + /** * @return Whether the namenode is transitioning to active state and is in the * middle of the {@link #startActiveServices()} */ public boolean inTransitionToActive() { - return haEnabled && haContext != null - && haContext.getState().getServiceState() == HAServiceState.ACTIVE - && startingActiveService; + return haEnabled && inActiveState() && startingActiveService; } private boolean shouldUseDelegationTokens() { @@ -4512,11 +4520,11 @@ Date getStartTime() { void finalizeUpgrade() throws IOException { checkSuperuserPrivilege(); - checkOperation(OperationCategory.WRITE); + checkOperation(OperationCategory.UNCHECKED); writeLock(); try { - checkOperation(OperationCategory.WRITE); - getFSImage().finalizeUpgrade(); + checkOperation(OperationCategory.UNCHECKED); + getFSImage().finalizeUpgrade(this.isHaEnabled() && inActiveState()); } finally { writeUnlock(); } @@ -7421,5 +7429,6 @@ private static void enableAsyncAuditLog() { logger.addAppender(asyncAppender); } } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java index 09bddefc441..4c78add075e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java @@ -33,14 +33,15 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.common.StorageErrorReporter; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger; import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; - import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; @@ -489,4 +490,49 @@ public String toString() { isInProgress(), hasCorruptHeader); } } + + @Override + public void doPreUpgrade() throws IOException { + LOG.info("Starting upgrade of edits directory " + sd.getRoot()); + try { + NNUpgradeUtil.doPreUpgrade(sd); + } catch (IOException ioe) { + LOG.error("Failed to move aside pre-upgrade storage " + + "in image directory " + sd.getRoot(), ioe); + throw ioe; + } + } + + /** + * This method assumes that the fields of the {@link Storage} object have + * already been updated to the appropriate new values for the upgrade. + */ + @Override + public void doUpgrade(Storage storage) throws IOException { + NNUpgradeUtil.doUpgrade(sd, storage); + } + + @Override + public void doFinalize() throws IOException { + NNUpgradeUtil.doFinalize(sd); + } + + @Override + public boolean canRollBack(StorageInfo storage, StorageInfo prevStorage, + int targetLayoutVersion) throws IOException { + return NNUpgradeUtil.canRollBack(sd, storage, + prevStorage, targetLayoutVersion); + } + + @Override + public void doRollback() throws IOException { + NNUpgradeUtil.doRollBack(sd); + } + + @Override + public long getJournalCTime() throws IOException { + StorageInfo sInfo = new StorageInfo(); + sInfo.readProperties(sd); + return sInfo.getCTime(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java index 785c1fecac0..a50b3aa27ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java @@ -22,7 +22,9 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; /** @@ -64,6 +66,54 @@ public interface JournalManager extends Closeable, FormatConfirmable, * Recover segments which have not been finalized. */ void recoverUnfinalizedSegments() throws IOException; + + /** + * Perform any steps that must succeed across all JournalManagers involved in + * an upgrade before proceeding onto the actual upgrade stage. If a call to + * any JM's doPreUpgrade method fails, then doUpgrade will not be called for + * any JM. + */ + void doPreUpgrade() throws IOException; + + /** + * Perform the actual upgrade of the JM. After this is completed, the NN can + * begin to use the new upgraded metadata. This metadata may later be either + * finalized or rolled back to the previous state. + * + * @param storage info about the new upgraded versions. + */ + void doUpgrade(Storage storage) throws IOException; + + /** + * Finalize the upgrade. JMs should purge any state that they had been keeping + * around during the upgrade process. After this is completed, rollback is no + * longer allowed. + */ + void doFinalize() throws IOException; + + /** + * Return true if this JM can roll back to the previous storage state, false + * otherwise. The NN will refuse to run the rollback operation unless at least + * one JM or fsimage storage directory can roll back. + * + * @param storage the storage info for the current state + * @param prevStorage the storage info for the previous (unupgraded) state + * @param targetLayoutVersion the layout version we intend to roll back to + * @return true if this JM can roll back, false otherwise. + */ + boolean canRollBack(StorageInfo storage, StorageInfo prevStorage, + int targetLayoutVersion) throws IOException; + + /** + * Perform the rollback to the previous FS state. JMs which do not need to + * roll back their state should just return without error. + */ + void doRollback() throws IOException; + + /** + * @return the CTime of the journal manager. + */ + long getJournalCTime() throws IOException; /** * Close the journal manager, freeing any resources it may hold. @@ -84,4 +134,5 @@ public CorruptionException(String reason) { super(reason); } } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java index b117606f8cb..7bf5015ca17 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java @@ -33,6 +33,8 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hdfs.server.common.Storage; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; @@ -77,11 +79,14 @@ static class JournalAndStream implements CheckableNameNodeResource { private final JournalManager journal; private boolean disabled = false; private EditLogOutputStream stream; - private boolean required = false; + private final boolean required; + private final boolean shared; - public JournalAndStream(JournalManager manager, boolean required) { + public JournalAndStream(JournalManager manager, boolean required, + boolean shared) { this.journal = manager; this.required = required; + this.shared = shared; } public void startLogSegment(long txId) throws IOException { @@ -163,6 +168,10 @@ public boolean isResourceAvailable() { public boolean isRequired() { return required; } + + public boolean isShared() { + return shared; + } } // COW implementation is necessary since some users (eg the web ui) call @@ -178,7 +187,7 @@ public boolean isRequired() { @Override public void format(NamespaceInfo nsInfo) throws IOException { - // The iteration is done by FSEditLog itself + // The operation is done by FSEditLog itself throw new UnsupportedOperationException(); } @@ -537,9 +546,13 @@ List getJournalManagers() { } return jList; } - + void add(JournalManager j, boolean required) { - JournalAndStream jas = new JournalAndStream(j, required); + add(j, required, false); + } + + void add(JournalManager j, boolean required, boolean shared) { + JournalAndStream jas = new JournalAndStream(j, required, shared); journals.add(jas); } @@ -655,4 +668,40 @@ String getSyncTimes() { } return buf.toString(); } + + @Override + public void doPreUpgrade() throws IOException { + // This operation is handled by FSEditLog directly. + throw new UnsupportedOperationException(); + } + + @Override + public void doUpgrade(Storage storage) throws IOException { + // This operation is handled by FSEditLog directly. + throw new UnsupportedOperationException(); + } + + @Override + public void doFinalize() throws IOException { + // This operation is handled by FSEditLog directly. + throw new UnsupportedOperationException(); + } + + @Override + public boolean canRollBack(StorageInfo storage, StorageInfo prevStorage, int targetLayoutVersion) throws IOException { + // This operation is handled by FSEditLog directly. + throw new UnsupportedOperationException(); + } + + @Override + public void doRollback() throws IOException { + // This operation is handled by FSEditLog directly. + throw new UnsupportedOperationException(); + } + + @Override + public long getJournalCTime() throws IOException { + // This operation is handled by FSEditLog directly. + throw new UnsupportedOperationException(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java index 21c56c2f669..ce23c576ca3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java @@ -299,7 +299,7 @@ synchronized void setStorageDirectories(Collection fsNameDirs, if(dirName.getScheme().compareTo("file") == 0) { this.addStorageDir(new StorageDirectory(new File(dirName.getPath()), dirType, - !sharedEditsDirs.contains(dirName))); // Don't lock the dir if it's shared. + sharedEditsDirs.contains(dirName))); // Don't lock the dir if it's shared. } } @@ -310,7 +310,7 @@ synchronized void setStorageDirectories(Collection fsNameDirs, // URI is of type file:// if(dirName.getScheme().compareTo("file") == 0) this.addStorageDir(new StorageDirectory(new File(dirName.getPath()), - NameNodeDirType.EDITS, !sharedEditsDirs.contains(dirName))); + NameNodeDirType.EDITS, sharedEditsDirs.contains(dirName))); } } @@ -976,7 +976,7 @@ FSImageStorageInspector readAndInspectDirs() StringBuilder layoutVersions = new StringBuilder(); // First determine what range of layout versions we're going to inspect - for (Iterator it = dirIterator(); + for (Iterator it = dirIterator(false); it.hasNext();) { StorageDirectory sd = it.next(); if (!sd.getVersionFile().exists()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java new file mode 100644 index 00000000000..1c491e5dcea --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java @@ -0,0 +1,174 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.File; +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hdfs.server.common.Storage; +import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; +import org.apache.hadoop.hdfs.server.common.StorageInfo; + +abstract class NNUpgradeUtil { + + private static final Log LOG = LogFactory.getLog(NNUpgradeUtil.class); + + /** + * Return true if this storage dir can roll back to the previous storage + * state, false otherwise. The NN will refuse to run the rollback operation + * unless at least one JM or fsimage storage directory can roll back. + * + * @param storage the storage info for the current state + * @param prevStorage the storage info for the previous (unupgraded) state + * @param targetLayoutVersion the layout version we intend to roll back to + * @return true if this JM can roll back, false otherwise. + * @throws IOException in the event of error + */ + static boolean canRollBack(StorageDirectory sd, StorageInfo storage, + StorageInfo prevStorage, int targetLayoutVersion) throws IOException { + File prevDir = sd.getPreviousDir(); + if (!prevDir.exists()) { // use current directory then + LOG.info("Storage directory " + sd.getRoot() + + " does not contain previous fs state."); + // read and verify consistency with other directories + storage.readProperties(sd); + return false; + } + + // read and verify consistency of the prev dir + prevStorage.readPreviousVersionProperties(sd); + + if (prevStorage.getLayoutVersion() != targetLayoutVersion) { + throw new IOException( + "Cannot rollback to storage version " + + prevStorage.getLayoutVersion() + + " using this version of the NameNode, which uses storage version " + + targetLayoutVersion + ". " + + "Please use the previous version of HDFS to perform the rollback."); + } + + return true; + } + + /** + * Finalize the upgrade. The previous dir, if any, will be renamed and + * removed. After this is completed, rollback is no longer allowed. + * + * @param sd the storage directory to finalize + * @throws IOException in the event of error + */ + static void doFinalize(StorageDirectory sd) throws IOException { + File prevDir = sd.getPreviousDir(); + if (!prevDir.exists()) { // already discarded + LOG.info("Directory " + prevDir + " does not exist."); + LOG.info("Finalize upgrade for " + sd.getRoot()+ " is not required."); + return; + } + LOG.info("Finalizing upgrade of storage directory " + sd.getRoot()); + assert sd.getCurrentDir().exists() : "Current directory must exist."; + final File tmpDir = sd.getFinalizedTmp(); + // rename previous to tmp and remove + NNStorage.rename(prevDir, tmpDir); + NNStorage.deleteDir(tmpDir); + LOG.info("Finalize upgrade for " + sd.getRoot()+ " is complete."); + } + + /** + * Perform any steps that must succeed across all storage dirs/JournalManagers + * involved in an upgrade before proceeding onto the actual upgrade stage. If + * a call to any JM's or local storage dir's doPreUpgrade method fails, then + * doUpgrade will not be called for any JM. The existing current dir is + * renamed to previous.tmp, and then a new, empty current dir is created. + * + * @param sd the storage directory to perform the pre-upgrade procedure. + * @throws IOException in the event of error + */ + static void doPreUpgrade(StorageDirectory sd) throws IOException { + LOG.info("Starting upgrade of storage directory " + sd.getRoot()); + File curDir = sd.getCurrentDir(); + File prevDir = sd.getPreviousDir(); + File tmpDir = sd.getPreviousTmp(); + assert curDir.exists() : "Current directory must exist."; + assert !prevDir.exists() : "previous directory must not exist."; + assert !tmpDir.exists() : "previous.tmp directory must not exist."; + + // rename current to tmp + NNStorage.rename(curDir, tmpDir); + + if (!curDir.mkdir()) { + throw new IOException("Cannot create directory " + curDir); + } + } + + /** + * Perform the upgrade of the storage dir to the given storage info. The new + * storage info is written into the current directory, and the previous.tmp + * directory is renamed to previous. + * + * @param sd the storage directory to upgrade + * @param storage info about the new upgraded versions. + * @throws IOException in the event of error + */ + static void doUpgrade(StorageDirectory sd, Storage storage) throws + IOException { + LOG.info("Performing upgrade of storage directory " + sd.getRoot()); + try { + // Write the version file, since saveFsImage only makes the + // fsimage_, and the directory is otherwise empty. + storage.writeProperties(sd); + + File prevDir = sd.getPreviousDir(); + File tmpDir = sd.getPreviousTmp(); + // rename tmp to previous + NNStorage.rename(tmpDir, prevDir); + } catch (IOException ioe) { + LOG.error("Unable to rename temp to previous for " + sd.getRoot(), ioe); + throw ioe; + } + } + + /** + * Perform rollback of the storage dir to the previous state. The existing + * current dir is removed, and the previous dir is renamed to current. + * + * @param sd the storage directory to roll back. + * @throws IOException in the event of error + */ + static void doRollBack(StorageDirectory sd) + throws IOException { + File prevDir = sd.getPreviousDir(); + if (!prevDir.exists()) + return; + + File tmpDir = sd.getRemovedTmp(); + assert !tmpDir.exists() : "removed.tmp directory must not exist."; + // rename current to tmp + File curDir = sd.getCurrentDir(); + assert curDir.exists() : "Current directory must exist."; + NNStorage.rename(curDir, tmpDir); + // rename previous to current + NNStorage.rename(prevDir, curDir); + + // delete tmp dir + NNStorage.deleteDir(tmpDir); + LOG.info("Rollback of " + sd.getRoot() + " is complete."); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index eb3755bdc4f..1d02e0149c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -648,7 +648,7 @@ protected NameNode(Configuration conf, NamenodeRole role) String nsId = getNameServiceId(conf); String namenodeId = HAUtil.getNameNodeId(conf, nsId); this.haEnabled = HAUtil.isHAEnabled(conf, nsId); - state = createHAState(); + state = createHAState(getStartupOption(conf)); this.allowStaleStandbyReads = HAUtil.shouldAllowStandbyReads(conf); this.haContext = createHAContext(); try { @@ -670,8 +670,12 @@ protected NameNode(Configuration conf, NamenodeRole role) } } - protected HAState createHAState() { - return !haEnabled ? ACTIVE_STATE : STANDBY_STATE; + protected HAState createHAState(StartupOption startOpt) { + if (!haEnabled || startOpt == StartupOption.UPGRADE) { + return ACTIVE_STATE; + } else { + return STANDBY_STATE; + } } protected HAContext createHAContext() { @@ -1023,26 +1027,28 @@ private static void copyEditLogSegmentsToSharedDir(FSNamesystem fsns, } } } - - private static boolean finalize(Configuration conf, - boolean isConfirmationNeeded - ) throws IOException { + + @VisibleForTesting + public static boolean doRollback(Configuration conf, + boolean isConfirmationNeeded) throws IOException { String nsId = DFSUtil.getNamenodeNameServiceId(conf); String namenodeId = HAUtil.getNameNodeId(conf, nsId); initializeGenericKeys(conf, nsId, namenodeId); FSNamesystem nsys = new FSNamesystem(conf, new FSImage(conf)); System.err.print( - "\"finalize\" will remove the previous state of the files system.\n" - + "Recent upgrade will become permanent.\n" - + "Rollback option will not be available anymore.\n"); + "\"rollBack\" will remove the current state of the file system,\n" + + "returning you to the state prior to initiating your recent.\n" + + "upgrade. This action is permanent and cannot be undone. If you\n" + + "are performing a rollback in an HA environment, you should be\n" + + "certain that no NameNode process is running on any host."); if (isConfirmationNeeded) { - if (!confirmPrompt("Finalize filesystem state?")) { - System.err.println("Finalize aborted."); + if (!confirmPrompt("Roll back file system state?")) { + System.err.println("Rollback aborted."); return true; } } - nsys.dir.fsImage.finalizeUpgrade(); + nsys.dir.fsImage.doRollback(nsys); return false; } @@ -1206,14 +1212,6 @@ public static NameNode createNameNode(String argv[], Configuration conf) return null; } setStartupOption(conf, startOpt); - - if (HAUtil.isHAEnabled(conf, DFSUtil.getNamenodeNameServiceId(conf)) && - (startOpt == StartupOption.UPGRADE || - startOpt == StartupOption.ROLLBACK || - startOpt == StartupOption.FINALIZE)) { - throw new HadoopIllegalArgumentException("Invalid startup option. " + - "Cannot perform DFS upgrade with HA enabled."); - } switch (startOpt) { case FORMAT: { @@ -1229,10 +1227,17 @@ public static NameNode createNameNode(String argv[], Configuration conf) return null; } case FINALIZE: { - boolean aborted = finalize(conf, true); - terminate(aborted ? 1 : 0); + System.err.println("Use of the argument '" + StartupOption.FINALIZE + + "' is no longer supported. To finalize an upgrade, start the NN " + + " and then run `hdfs dfsadmin -finalizeUpgrade'"); + terminate(1); return null; // avoid javac warning } + case ROLLBACK: { + boolean aborted = doRollback(conf, true); + terminate(aborted ? 1 : 0); + return null; // avoid warning + } case BOOTSTRAPSTANDBY: { String toolArgs[] = Arrays.copyOfRange(argv, 1, argv.length); int rc = BootstrapStandby.run(toolArgs, conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java index ac0761d41f8..29b43cfa661 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.Storage; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream; import org.apache.hadoop.hdfs.server.namenode.FSImage; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; @@ -192,7 +193,7 @@ private int doRun() throws IOException { FSImage image = new FSImage(conf); try { image.getStorage().setStorageInfo(storage); - image.initEditLog(); + image.initEditLog(StartupOption.REGULAR); assert image.getEditLog().isOpenForRead() : "Expected edit log to be open for read"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index 09a4d70d6c4..9b5b2abe90d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -20,6 +20,7 @@ import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; +import java.net.URI; import java.net.URL; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; @@ -46,6 +47,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; +import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; @@ -769,7 +771,24 @@ private void printHelp(String cmd) { */ public int finalizeUpgrade() throws IOException { DistributedFileSystem dfs = getDFS(); - dfs.finalizeUpgrade(); + + Configuration dfsConf = dfs.getConf(); + URI dfsUri = dfs.getUri(); + boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri); + if (isHaEnabled) { + // In the case of HA, run finalizeUpgrade for all NNs in this nameservice + String nsId = dfsUri.getHost(); + List namenodes = + HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId); + if (!HAUtil.isAtLeastOneActive(namenodes)) { + throw new IOException("Cannot finalize with no NameNode active"); + } + for (ClientProtocol haNn : namenodes) { + haNn.finalizeUpgrade(); + } + } else { + dfs.finalizeUpgrade(); + } return 0; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto index ae963751221..cff6439c79a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto @@ -133,6 +133,72 @@ message IsFormattedResponseProto { required bool isFormatted = 1; } +/** + * getJournalCTime() + */ +message GetJournalCTimeRequestProto { + required JournalIdProto jid = 1; +} + +message GetJournalCTimeResponseProto { + required int64 resultCTime = 1; +} + +/** + * doPreUpgrade() + */ +message DoPreUpgradeRequestProto { + required JournalIdProto jid = 1; +} + +message DoPreUpgradeResponseProto { +} + +/** + * doUpgrade() + */ +message DoUpgradeRequestProto { + required JournalIdProto jid = 1; + required StorageInfoProto sInfo = 2; +} + +message DoUpgradeResponseProto { +} + +/** + * doFinalize() + */ +message DoFinalizeRequestProto { + required JournalIdProto jid = 1; +} + +message DoFinalizeResponseProto { +} + +/** + * canRollBack() + */ +message CanRollBackRequestProto { + required JournalIdProto jid = 1; + required StorageInfoProto storage = 2; + required StorageInfoProto prevStorage = 3; + required int32 targetLayoutVersion = 4; +} + +message CanRollBackResponseProto { + required bool canRollBack = 1; +} + +/** + * doRollback() + */ +message DoRollbackRequestProto { + required JournalIdProto jid = 1; +} + +message DoRollbackResponseProto { +} + /** * getJournalState() */ @@ -236,6 +302,18 @@ message AcceptRecoveryResponseProto { service QJournalProtocolService { rpc isFormatted(IsFormattedRequestProto) returns (IsFormattedResponseProto); + rpc getJournalCTime(GetJournalCTimeRequestProto) returns (GetJournalCTimeResponseProto); + + rpc doPreUpgrade(DoPreUpgradeRequestProto) returns (DoPreUpgradeResponseProto); + + rpc doUpgrade(DoUpgradeRequestProto) returns (DoUpgradeResponseProto); + + rpc doFinalize(DoFinalizeRequestProto) returns (DoFinalizeResponseProto); + + rpc canRollBack(CanRollBackRequestProto) returns (CanRollBackResponseProto); + + rpc doRollback(DoRollbackRequestProto) returns (DoRollbackResponseProto); + rpc getJournalState(GetJournalStateRequestProto) returns (GetJournalStateResponseProto); rpc newEpoch(NewEpochRequestProto) returns (NewEpochResponseProto); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm index 2aefc3584c0..eccd705cd54 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm @@ -765,3 +765,49 @@ digest:hdfs-zkfcs:vlUvLnd8MlacsE80rDuu6ONESbM=:rwcda Even if automatic failover is configured, you may initiate a manual failover using the same <<>> command. It will perform a coordinated failover. + +* HDFS Upgrade/Finalization/Rollback with HA Enabled + + When moving between versions of HDFS, sometimes the newer software can simply + be installed and the cluster restarted. Sometimes, however, upgrading the + version of HDFS you're running may require changing on-disk data. In this case, + one must use the HDFS Upgrade/Finalize/Rollback facility after installing the + new software. This process is made more complex in an HA environment, since the + on-disk metadata that the NN relies upon is by definition distributed, both on + the two HA NNs in the pair, and on the JournalNodes in the case that QJM is + being used for the shared edits storage. This documentation section describes + the procedure to use the HDFS Upgrade/Finalize/Rollback facility in an HA setup. + + <>, the operator must do the following: + + [[1]] Shut down all of the NNs as normal, and install the newer software. + + [[2]] Start one of the NNs with the <<<'-upgrade'>>> flag. + + [[3]] On start, this NN will not enter the standby state as usual in an HA + setup. Rather, this NN will immediately enter the active state, perform an + upgrade of its local storage dirs, and also perform an upgrade of the shared + edit log. + + [[4]] At this point the other NN in the HA pair will be out of sync with + the upgraded NN. In order to bring it back in sync and once again have a highly + available setup, you should re-bootstrap this NameNode by running the NN with + the <<<'-bootstrapStandby'>>> flag. It is an error to start this second NN with + the <<<'-upgrade'>>> flag. + + Note that if at any time you want to restart the NameNodes before finalizing + or rolling back the upgrade, you should start the NNs as normal, i.e. without + any special startup flag. + + <>, the operator will use the <<<`hdfsadmin + dfsadmin -finalizeUpgrade'>>> command while the NNs are running and one of them + is active. The active NN at the time this happens will perform the finalization + of the shared log, and the NN whose local storage directories contain the + previous FS state will delete its local state. + + <> of an upgrade, both NNs should first be shut down. + The operator should run the roll back command on the NN where they initiated + the upgrade procedure, which will perform the rollback on the local dirs there, + as well as on the shared log, either NFS or on the JNs. Afterward, this NN + should be started and the operator should run <<<`-bootstrapStandby'>>> on the + other NN to bring the two NNs in sync with this rolled-back file system state. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index ea78be1d150..3e9b614c0b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -100,7 +100,6 @@ import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.ProxyUsers; -import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ToolRunner; @@ -147,6 +146,7 @@ public static class Builder { private boolean enableManagedDfsDirsRedundancy = true; private boolean manageDataDfsDirs = true; private StartupOption option = null; + private StartupOption dnOption = null; private String[] racks = null; private String [] hosts = null; private long [] simulatedCapacities = null; @@ -241,6 +241,14 @@ public Builder startupOption(StartupOption val) { this.option = val; return this; } + + /** + * Default: null + */ + public Builder dnStartupOption(StartupOption val) { + this.dnOption = val; + return this; + } /** * Default: null @@ -357,6 +365,7 @@ protected MiniDFSCluster(Builder builder) throws IOException { builder.enableManagedDfsDirsRedundancy, builder.manageDataDfsDirs, builder.option, + builder.dnOption, builder.racks, builder.hosts, builder.simulatedCapacities, @@ -406,18 +415,24 @@ public class DataNodeProperties { /** * Stores the information related to a namenode in the cluster */ - static class NameNodeInfo { + public static class NameNodeInfo { final NameNode nameNode; final Configuration conf; final String nameserviceId; final String nnId; + StartupOption startOpt; NameNodeInfo(NameNode nn, String nameserviceId, String nnId, - Configuration conf) { + StartupOption startOpt, Configuration conf) { this.nameNode = nn; this.nameserviceId = nameserviceId; this.nnId = nnId; + this.startOpt = startOpt; this.conf = conf; } + + public void setStartOpt(StartupOption startOpt) { + this.startOpt = startOpt; + } } /** @@ -603,8 +618,8 @@ public MiniDFSCluster(int nameNodePort, long[] simulatedCapacities) throws IOException { this.nameNodes = new NameNodeInfo[1]; // Single namenode in the cluster initMiniDFSCluster(conf, numDataNodes, StorageType.DEFAULT, format, - manageNameDfsDirs, true, manageDataDfsDirs, manageDataDfsDirs, - operation, racks, hosts, + manageNameDfsDirs, true, manageDataDfsDirs, manageDataDfsDirs, + operation, null, racks, hosts, simulatedCapacities, null, true, false, MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0), true, false, false); } @@ -613,7 +628,8 @@ private void initMiniDFSCluster( Configuration conf, int numDataNodes, StorageType storageType, boolean format, boolean manageNameDfsDirs, boolean manageNameDfsSharedDirs, boolean enableManagedDfsDirsRedundancy, - boolean manageDataDfsDirs, StartupOption operation, String[] racks, + boolean manageDataDfsDirs, StartupOption startOpt, + StartupOption dnStartOpt, String[] racks, String[] hosts, long[] simulatedCapacities, String clusterId, boolean waitSafeMode, boolean setupHostsFile, MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown, @@ -662,7 +678,7 @@ private void initMiniDFSCluster( createNameNodesAndSetConf( nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs, enableManagedDfsDirsRedundancy, - format, operation, clusterId, conf); + format, startOpt, clusterId, conf); } catch (IOException ioe) { LOG.error("IOE creating namenodes. Permissions dump:\n" + createPermissionsDiagnosisString(data_dir)); @@ -675,13 +691,15 @@ private void initMiniDFSCluster( } } - if (operation == StartupOption.RECOVER) { + if (startOpt == StartupOption.RECOVER) { return; } // Start the DataNodes - startDataNodes(conf, numDataNodes, storageType, manageDataDfsDirs, operation, racks, - hosts, simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, checkDataNodeHostConfig); + startDataNodes(conf, numDataNodes, storageType, manageDataDfsDirs, + dnStartOpt != null ? dnStartOpt : startOpt, + racks, hosts, simulatedCapacities, setupHostsFile, + checkDataNodeAddrConfig, checkDataNodeHostConfig); waitClusterUp(); //make sure ProxyUsers uses the latest conf ProxyUsers.refreshSuperUserGroupsConfiguration(conf); @@ -759,6 +777,8 @@ private void createNameNodesAndSetConf(MiniDFSNNTopology nnTopology, if (manageNameDfsSharedDirs) { URI sharedEditsUri = getSharedEditsDir(nnCounter, nnCounter+nnIds.size()-1); conf.set(DFS_NAMENODE_SHARED_EDITS_DIR_KEY, sharedEditsUri.toString()); + // Clean out the shared edits dir completely, including all subdirectories. + FileUtil.fullyDelete(new File(sharedEditsUri)); } } @@ -858,7 +878,8 @@ private void copyNameDirs(Collection srcDirs, Collection dstDirs, URI srcDir = Lists.newArrayList(srcDirs).get(0); FileSystem dstFS = FileSystem.getLocal(dstConf).getRaw(); for (URI dstDir : dstDirs) { - Preconditions.checkArgument(!dstDir.equals(srcDir)); + Preconditions.checkArgument(!dstDir.equals(srcDir), + "src and dst are the same: " + dstDir); File dstDirF = new File(dstDir); if (dstDirF.exists()) { if (!FileUtil.fullyDelete(dstDirF)) { @@ -892,6 +913,14 @@ private static void initNameNodeAddress(Configuration conf, conf.set(key, "127.0.0.1:" + nnConf.getIpcPort()); } + private static String[] createArgs(StartupOption operation) { + String[] args = (operation == null || + operation == StartupOption.FORMAT || + operation == StartupOption.REGULAR) ? + new String[] {} : new String[] {operation.getName()}; + return args; + } + private void createNameNode(int nnIndex, Configuration conf, int numDataNodes, boolean format, StartupOption operation, String clusterId, String nameserviceId, @@ -906,10 +935,7 @@ private void createNameNode(int nnIndex, Configuration conf, } // Start the NameNode - String[] args = (operation == null || - operation == StartupOption.FORMAT || - operation == StartupOption.REGULAR) ? - new String[] {} : new String[] {operation.getName()}; + String[] args = createArgs(operation); NameNode nn = NameNode.createNameNode(args, conf); if (operation == StartupOption.RECOVER) { return; @@ -931,7 +957,7 @@ private void createNameNode(int nnIndex, Configuration conf, DFSUtil.setGenericConf(conf, nameserviceId, nnId, DFS_NAMENODE_HTTP_ADDRESS_KEY); nameNodes[nnIndex] = new NameNodeInfo(nn, nameserviceId, nnId, - new Configuration(conf)); + operation, new Configuration(conf)); } /** @@ -1499,7 +1525,7 @@ public synchronized void shutdownNameNode(int nnIndex) { nn.stop(); nn.join(); Configuration conf = nameNodes[nnIndex].conf; - nameNodes[nnIndex] = new NameNodeInfo(null, null, null, conf); + nameNodes[nnIndex] = new NameNodeInfo(null, null, null, null, conf); } } @@ -1545,10 +1571,12 @@ public synchronized void restartNameNode(int nnIndex, boolean waitActive) throws IOException { String nameserviceId = nameNodes[nnIndex].nameserviceId; String nnId = nameNodes[nnIndex].nnId; + StartupOption startOpt = nameNodes[nnIndex].startOpt; Configuration conf = nameNodes[nnIndex].conf; shutdownNameNode(nnIndex); - NameNode nn = NameNode.createNameNode(new String[] {}, conf); - nameNodes[nnIndex] = new NameNodeInfo(nn, nameserviceId, nnId, conf); + NameNode nn = NameNode.createNameNode(createArgs(startOpt), conf); + nameNodes[nnIndex] = new NameNodeInfo(nn, nameserviceId, nnId, startOpt, + conf); if (waitActive) { waitClusterUp(); LOG.info("Restarted the namenode"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java index 771504186c6..f88edcd9106 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; +import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.util.StringUtils; import org.junit.After; import org.junit.Test; @@ -97,10 +98,10 @@ void checkResult(NodeType nodeType, String[] baseDirs) throws Exception { * Attempts to start a NameNode with the given operation. Starting * the NameNode should throw an exception. */ - void startNameNodeShouldFail(StartupOption operation, String searchString) { + void startNameNodeShouldFail(String searchString) { try { + NameNode.doRollback(conf, false); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) - .startupOption(operation) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) @@ -149,24 +150,19 @@ public void testRollback() throws Exception { log("Normal NameNode rollback", numDirs); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current"); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous"); - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) - .format(false) - .manageDataDfsDirs(false) - .manageNameDfsDirs(false) - .startupOption(StartupOption.ROLLBACK) - .build(); + NameNode.doRollback(conf, false); checkResult(NAME_NODE, nameNodeDirs); - cluster.shutdown(); UpgradeUtilities.createEmptyDirs(nameNodeDirs); log("Normal DataNode rollback", numDirs); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current"); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous"); + NameNode.doRollback(conf, false); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) - .startupOption(StartupOption.ROLLBACK) + .dnStartupOption(StartupOption.ROLLBACK) .build(); UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current"); UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous"); @@ -179,11 +175,12 @@ public void testRollback() throws Exception { log("Normal BlockPool rollback", numDirs); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current"); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous"); + NameNode.doRollback(conf, false); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) - .startupOption(StartupOption.ROLLBACK) + .dnStartupOption(StartupOption.ROLLBACK) .build(); UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current"); UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "current", @@ -217,10 +214,10 @@ public void testRollback() throws Exception { cluster.shutdown(); UpgradeUtilities.createEmptyDirs(nameNodeDirs); UpgradeUtilities.createEmptyDirs(dataNodeDirs); - + log("NameNode rollback without existing previous dir", numDirs); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current"); - startNameNodeShouldFail(StartupOption.ROLLBACK, + startNameNodeShouldFail( "None of the storage directories contain previous fs state"); UpgradeUtilities.createEmptyDirs(nameNodeDirs); @@ -237,15 +234,16 @@ public void testRollback() throws Exception { cluster.shutdown(); UpgradeUtilities.createEmptyDirs(nameNodeDirs); UpgradeUtilities.createEmptyDirs(dataNodeDirs); - + log("DataNode rollback with future stored layout version in previous", numDirs); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current"); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous"); + NameNode.doRollback(conf, false); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) - .startupOption(StartupOption.ROLLBACK) + .dnStartupOption(StartupOption.ROLLBACK) .build(); UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current"); baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous"); @@ -266,11 +264,12 @@ public void testRollback() throws Exception { log("DataNode rollback with newer fsscTime in previous", numDirs); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current"); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous"); + NameNode.doRollback(conf, false); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) - .startupOption(StartupOption.ROLLBACK) + .dnStartupOption(StartupOption.ROLLBACK) .build(); UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current"); @@ -287,21 +286,19 @@ public void testRollback() throws Exception { cluster.shutdown(); UpgradeUtilities.createEmptyDirs(nameNodeDirs); UpgradeUtilities.createEmptyDirs(dataNodeDirs); - + log("NameNode rollback with no edits file", numDirs); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current"); baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous"); deleteMatchingFiles(baseDirs, "edits.*"); - startNameNodeShouldFail(StartupOption.ROLLBACK, - "Gap in transactions"); + startNameNodeShouldFail("Gap in transactions"); UpgradeUtilities.createEmptyDirs(nameNodeDirs); log("NameNode rollback with no image file", numDirs); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current"); baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous"); deleteMatchingFiles(baseDirs, "fsimage_.*"); - startNameNodeShouldFail(StartupOption.ROLLBACK, - "No valid image files found"); + startNameNodeShouldFail("No valid image files found"); UpgradeUtilities.createEmptyDirs(nameNodeDirs); log("NameNode rollback with corrupt version file", numDirs); @@ -313,8 +310,7 @@ public void testRollback() throws Exception { "layoutVersion".getBytes(Charsets.UTF_8), "xxxxxxxxxxxxx".getBytes(Charsets.UTF_8)); } - startNameNodeShouldFail(StartupOption.ROLLBACK, - "file VERSION has layoutVersion missing"); + startNameNodeShouldFail("file VERSION has layoutVersion missing"); UpgradeUtilities.createEmptyDirs(nameNodeDirs); @@ -328,8 +324,7 @@ public void testRollback() throws Exception { UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs, storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster)); - startNameNodeShouldFail(StartupOption.ROLLBACK, - "Cannot rollback to storage version 1 using this version"); + startNameNodeShouldFail("Cannot rollback to storage version 1 using this version"); UpgradeUtilities.createEmptyDirs(nameNodeDirs); } // end numDir loop } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java index 3a0134ed393..cf72e7920e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java @@ -62,6 +62,7 @@ import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Shell; import org.junit.Assume; import org.junit.Before; @@ -764,4 +765,37 @@ public void testRelativeTimeConversion() throws Exception { assertEquals(4*24*60*60*1000l, DFSUtil.parseRelativeTime("4d")); assertEquals(999*24*60*60*1000l, DFSUtil.parseRelativeTime("999d")); } + + @Test + public void testAssertAllResultsEqual() { + checkAllResults(new Long[]{}, true); + checkAllResults(new Long[]{1l}, true); + checkAllResults(new Long[]{1l, 1l}, true); + checkAllResults(new Long[]{1l, 1l, 1l}, true); + checkAllResults(new Long[]{new Long(1), new Long(1)}, true); + checkAllResults(new Long[]{null, null, null}, true); + + checkAllResults(new Long[]{1l, 2l}, false); + checkAllResults(new Long[]{2l, 1l}, false); + checkAllResults(new Long[]{1l, 2l, 1l}, false); + checkAllResults(new Long[]{2l, 1l, 1l}, false); + checkAllResults(new Long[]{1l, 1l, 2l}, false); + checkAllResults(new Long[]{1l, null}, false); + checkAllResults(new Long[]{null, 1l}, false); + checkAllResults(new Long[]{1l, null, 1l}, false); + } + + private static void checkAllResults(Long[] toCheck, boolean shouldSucceed) { + if (shouldSucceed) { + DFSUtil.assertAllResultsEqual(Arrays.asList(toCheck)); + } else { + try { + DFSUtil.assertAllResultsEqual(Arrays.asList(toCheck)); + fail("Should not have succeeded with input: " + + Arrays.toString(toCheck)); + } catch (AssertionError ae) { + GenericTestUtils.assertExceptionContains("Not all elements match", ae); + } + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java index fab83b46357..a4f67e5f71c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java @@ -167,8 +167,16 @@ public File getStorageDir(int idx) { return new File(baseDir, "journalnode-" + idx).getAbsoluteFile(); } + public File getJournalDir(int idx, String jid) { + return new File(getStorageDir(idx), jid); + } + public File getCurrentDir(int idx, String jid) { - return new File(new File(getStorageDir(idx), jid), "current"); + return new File(getJournalDir(idx, jid), "current"); + } + + public File getPreviousDir(int idx, String jid) { + return new File(getJournalDir(idx, jid), "previous"); } public JournalNode getJournalNode(int i) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java index e5b636c562f..e782da2ff19 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; @@ -47,6 +48,7 @@ public class MiniQJMHACluster { public static class Builder { private final Configuration conf; + private StartupOption startOpt = null; private final MiniDFSCluster.Builder dfsBuilder; public Builder(Configuration conf) { @@ -61,6 +63,10 @@ public MiniDFSCluster.Builder getDfsBuilder() { public MiniQJMHACluster build() throws IOException { return new MiniQJMHACluster(this); } + + public void startupOption(StartupOption startOpt) { + this.startOpt = startOpt; + } } public static MiniDFSNNTopology createDefaultTopology() { @@ -95,6 +101,9 @@ private MiniQJMHACluster(Builder builder) throws IOException { Configuration confNN0 = cluster.getConfiguration(0); NameNode.initializeSharedEdits(confNN0, true); + cluster.getNameNodeInfos()[0].setStartOpt(builder.startOpt); + cluster.getNameNodeInfos()[1].setStartOpt(builder.startOpt); + // restart the cluster cluster.restartNameNodes(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java index 43bd7a4153a..28d1dac8209 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java @@ -27,6 +27,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.server.common.Storage; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.junit.Test; @@ -191,6 +193,29 @@ public boolean hasSomeData() throws IOException { shouldPromptCalled = true; return false; } + + @Override + public void doPreUpgrade() throws IOException {} + + @Override + public void doUpgrade(Storage storage) throws IOException {} + + @Override + public void doFinalize() throws IOException {} + + @Override + public boolean canRollBack(StorageInfo storage, StorageInfo prevStorage, int targetLayoutVersion) + throws IOException { + return false; + } + + @Override + public void doRollback() throws IOException {} + + @Override + public long getJournalCTime() throws IOException { + return -1; + } } public static class BadConstructorJournalManager extends DummyJournalManager { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java index 502c9de4096..7abc5024a9b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java @@ -91,7 +91,7 @@ public void testSuccessfulBaseCase() throws Exception { fail("Did not throw"); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains( - "Cannot start an HA namenode with name dirs that need recovery", + "storage directory does not exist or is not accessible", ioe); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java index 4f213b24055..c3a86741caa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java @@ -1,89 +1,506 @@ /** -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.hdfs.server.namenode.ha; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.File; import java.io.IOException; import java.net.URI; +import java.net.URISyntaxException; +import java.util.Collection; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster; +import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster.Builder; +import org.apache.hadoop.hdfs.qjournal.server.Journal; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.tools.DFSAdmin; +import org.apache.hadoop.hdfs.util.PersistentLongFile; import org.apache.hadoop.test.GenericTestUtils; +import org.junit.Before; import org.junit.Test; -import com.google.common.collect.Lists; +import com.google.common.base.Joiner; /** * Tests for upgrading with HA enabled. */ public class TestDFSUpgradeWithHA { - - private static final Log LOG = LogFactory.getLog(TestDFSUpgradeWithHA.class); + private static final Log LOG = LogFactory.getLog(TestDFSUpgradeWithHA.class); + + private Configuration conf; + + @Before + public void createConfiguration() { + conf = new HdfsConfiguration(); + // Turn off persistent IPC, so that the DFSClient can survive NN restart + conf.setInt( + CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, + 0); + } + + private static void assertCTimesEqual(MiniDFSCluster cluster) { + long nn1CTime = cluster.getNamesystem(0).getFSImage().getStorage().getCTime(); + long nn2CTime = cluster.getNamesystem(1).getFSImage().getStorage().getCTime(); + assertEquals(nn1CTime, nn2CTime); + } + + private static void checkClusterPreviousDirExistence(MiniDFSCluster cluster, + boolean shouldExist) { + for (int i = 0; i < 2; i++) { + checkNnPreviousDirExistence(cluster, i, shouldExist); + } + } + + private static void checkNnPreviousDirExistence(MiniDFSCluster cluster, + int index, boolean shouldExist) { + Collection nameDirs = cluster.getNameDirs(index); + for (URI nnDir : nameDirs) { + checkPreviousDirExistence(new File(nnDir), shouldExist); + } + } + + private static void checkJnPreviousDirExistence(MiniQJMHACluster jnCluster, + boolean shouldExist) throws IOException { + for (int i = 0; i < 3; i++) { + checkPreviousDirExistence( + jnCluster.getJournalCluster().getJournalDir(i, "ns1"), shouldExist); + } + if (shouldExist) { + assertEpochFilesCopied(jnCluster); + } + } + + private static void assertEpochFilesCopied(MiniQJMHACluster jnCluster) + throws IOException { + for (int i = 0; i < 3; i++) { + File journalDir = jnCluster.getJournalCluster().getJournalDir(i, "ns1"); + File currDir = new File(journalDir, "current"); + File prevDir = new File(journalDir, "previous"); + for (String fileName : new String[]{ Journal.LAST_PROMISED_FILENAME, + Journal.LAST_WRITER_EPOCH }) { + File prevFile = new File(prevDir, fileName); + // Possible the prev file doesn't exist, e.g. if there has never been a + // writer before the upgrade. + if (prevFile.exists()) { + PersistentLongFile prevLongFile = new PersistentLongFile(prevFile, -10); + PersistentLongFile currLongFile = new PersistentLongFile(new File(currDir, + fileName), -11); + assertTrue("Value in " + fileName + " has decreased on upgrade in " + + journalDir, prevLongFile.get() <= currLongFile.get()); + } + } + } + } + + private static void checkPreviousDirExistence(File rootDir, + boolean shouldExist) { + File previousDir = new File(rootDir, "previous"); + if (shouldExist) { + assertTrue(previousDir + " does not exist", previousDir.exists()); + } else { + assertFalse(previousDir + " does exist", previousDir.exists()); + } + } + + private void runFinalizeCommand(MiniDFSCluster cluster) + throws IOException { + HATestUtil.setFailoverConfigurations(cluster, conf); + new DFSAdmin(conf).finalizeUpgrade(); + } + /** - * Make sure that an HA NN refuses to start if given an upgrade-related - * startup option. + * Ensure that an admin cannot finalize an HA upgrade without at least one NN + * being active. */ @Test - public void testStartingWithUpgradeOptionsFails() throws IOException { - for (StartupOption startOpt : Lists.newArrayList(new StartupOption[] { - StartupOption.UPGRADE, StartupOption.FINALIZE, - StartupOption.ROLLBACK })) { - MiniDFSCluster cluster = null; + public void testCannotFinalizeIfNoActive() throws IOException, + URISyntaxException { + MiniDFSCluster cluster = null; + FileSystem fs = null; + try { + cluster = new MiniDFSCluster.Builder(conf) + .nnTopology(MiniDFSNNTopology.simpleHATopology()) + .numDataNodes(0) + .build(); + + File sharedDir = new File(cluster.getSharedEditsDir(0, 1)); + + // No upgrade is in progress at the moment. + checkClusterPreviousDirExistence(cluster, false); + assertCTimesEqual(cluster); + checkPreviousDirExistence(sharedDir, false); + + // Transition NN0 to active and do some FS ops. + cluster.transitionToActive(0); + fs = HATestUtil.configureFailoverFs(cluster, conf); + assertTrue(fs.mkdirs(new Path("/foo1"))); + + // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade + // flag. + cluster.shutdownNameNode(1); + cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); + cluster.restartNameNode(0, false); + + checkNnPreviousDirExistence(cluster, 0, true); + checkNnPreviousDirExistence(cluster, 1, false); + checkPreviousDirExistence(sharedDir, true); + + // NN0 should come up in the active state when given the -upgrade option, + // so no need to transition it to active. + assertTrue(fs.mkdirs(new Path("/foo2"))); + + // Restart NN0 without the -upgrade flag, to make sure that works. + cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR); + cluster.restartNameNode(0, false); + + // Make sure we can still do FS ops after upgrading. + cluster.transitionToActive(0); + assertTrue(fs.mkdirs(new Path("/foo3"))); + + // Now bootstrap the standby with the upgraded info. + int rc = BootstrapStandby.run( + new String[]{"-force"}, + cluster.getConfiguration(1)); + assertEquals(0, rc); + + // Now restart NN1 and make sure that we can do ops against that as well. + cluster.restartNameNode(1); + cluster.transitionToStandby(0); + cluster.transitionToActive(1); + assertTrue(fs.mkdirs(new Path("/foo4"))); + + assertCTimesEqual(cluster); + + // Now there's no active NN. + cluster.transitionToStandby(1); + try { - cluster = new MiniDFSCluster.Builder(new Configuration()) - .nnTopology(MiniDFSNNTopology.simpleHATopology()) - .startupOption(startOpt) - .numDataNodes(0) - .build(); - fail("Should not have been able to start an HA NN in upgrade mode"); - } catch (IllegalArgumentException iae) { + runFinalizeCommand(cluster); + fail("Should not have been able to finalize upgrade with no NN active"); + } catch (IOException ioe) { GenericTestUtils.assertExceptionContains( - "Cannot perform DFS upgrade with HA enabled.", iae); - LOG.info("Got expected exception", iae); - } finally { - if (cluster != null) { - cluster.shutdown(); - } + "Cannot finalize with no NameNode active", ioe); + } + } finally { + if (fs != null) { + fs.close(); + } + if (cluster != null) { + cluster.shutdown(); + } + } + } + + /** + * Make sure that an HA NN with NFS-based HA can successfully start and + * upgrade. + */ + @Test + public void testNfsUpgrade() throws IOException, URISyntaxException { + MiniDFSCluster cluster = null; + FileSystem fs = null; + try { + cluster = new MiniDFSCluster.Builder(conf) + .nnTopology(MiniDFSNNTopology.simpleHATopology()) + .numDataNodes(0) + .build(); + + File sharedDir = new File(cluster.getSharedEditsDir(0, 1)); + + // No upgrade is in progress at the moment. + checkClusterPreviousDirExistence(cluster, false); + assertCTimesEqual(cluster); + checkPreviousDirExistence(sharedDir, false); + + // Transition NN0 to active and do some FS ops. + cluster.transitionToActive(0); + fs = HATestUtil.configureFailoverFs(cluster, conf); + assertTrue(fs.mkdirs(new Path("/foo1"))); + + // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade + // flag. + cluster.shutdownNameNode(1); + cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); + cluster.restartNameNode(0, false); + + checkNnPreviousDirExistence(cluster, 0, true); + checkNnPreviousDirExistence(cluster, 1, false); + checkPreviousDirExistence(sharedDir, true); + + // NN0 should come up in the active state when given the -upgrade option, + // so no need to transition it to active. + assertTrue(fs.mkdirs(new Path("/foo2"))); + + // Restart NN0 without the -upgrade flag, to make sure that works. + cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR); + cluster.restartNameNode(0, false); + + // Make sure we can still do FS ops after upgrading. + cluster.transitionToActive(0); + assertTrue(fs.mkdirs(new Path("/foo3"))); + + // Now bootstrap the standby with the upgraded info. + int rc = BootstrapStandby.run( + new String[]{"-force"}, + cluster.getConfiguration(1)); + assertEquals(0, rc); + + // Now restart NN1 and make sure that we can do ops against that as well. + cluster.restartNameNode(1); + cluster.transitionToStandby(0); + cluster.transitionToActive(1); + assertTrue(fs.mkdirs(new Path("/foo4"))); + + assertCTimesEqual(cluster); + } finally { + if (fs != null) { + fs.close(); + } + if (cluster != null) { + cluster.shutdown(); + } + } + } + + /** + * Make sure that an HA NN can successfully upgrade when configured using + * JournalNodes. + */ + @Test + public void testUpgradeWithJournalNodes() throws IOException, + URISyntaxException { + MiniQJMHACluster qjCluster = null; + FileSystem fs = null; + try { + Builder builder = new MiniQJMHACluster.Builder(conf); + builder.getDfsBuilder() + .numDataNodes(0); + qjCluster = builder.build(); + + MiniDFSCluster cluster = qjCluster.getDfsCluster(); + + // No upgrade is in progress at the moment. + checkJnPreviousDirExistence(qjCluster, false); + checkClusterPreviousDirExistence(cluster, false); + assertCTimesEqual(cluster); + + // Transition NN0 to active and do some FS ops. + cluster.transitionToActive(0); + fs = HATestUtil.configureFailoverFs(cluster, conf); + assertTrue(fs.mkdirs(new Path("/foo1"))); + + // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade + // flag. + cluster.shutdownNameNode(1); + cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); + cluster.restartNameNode(0, false); + + checkNnPreviousDirExistence(cluster, 0, true); + checkNnPreviousDirExistence(cluster, 1, false); + checkJnPreviousDirExistence(qjCluster, true); + + // NN0 should come up in the active state when given the -upgrade option, + // so no need to transition it to active. + assertTrue(fs.mkdirs(new Path("/foo2"))); + + // Restart NN0 without the -upgrade flag, to make sure that works. + cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR); + cluster.restartNameNode(0, false); + + // Make sure we can still do FS ops after upgrading. + cluster.transitionToActive(0); + assertTrue(fs.mkdirs(new Path("/foo3"))); + + // Now bootstrap the standby with the upgraded info. + int rc = BootstrapStandby.run( + new String[]{"-force"}, + cluster.getConfiguration(1)); + assertEquals(0, rc); + + // Now restart NN1 and make sure that we can do ops against that as well. + cluster.restartNameNode(1); + cluster.transitionToStandby(0); + cluster.transitionToActive(1); + assertTrue(fs.mkdirs(new Path("/foo4"))); + + assertCTimesEqual(cluster); + } finally { + if (fs != null) { + fs.close(); + } + if (qjCluster != null) { + qjCluster.shutdown(); + } + } + } + + @Test + public void testFinalizeWithJournalNodes() throws IOException, + URISyntaxException { + MiniQJMHACluster qjCluster = null; + FileSystem fs = null; + try { + Builder builder = new MiniQJMHACluster.Builder(conf); + builder.getDfsBuilder() + .numDataNodes(0); + qjCluster = builder.build(); + + MiniDFSCluster cluster = qjCluster.getDfsCluster(); + + // No upgrade is in progress at the moment. + checkJnPreviousDirExistence(qjCluster, false); + checkClusterPreviousDirExistence(cluster, false); + assertCTimesEqual(cluster); + + // Transition NN0 to active and do some FS ops. + cluster.transitionToActive(0); + fs = HATestUtil.configureFailoverFs(cluster, conf); + assertTrue(fs.mkdirs(new Path("/foo1"))); + + // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade + // flag. + cluster.shutdownNameNode(1); + cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); + cluster.restartNameNode(0, false); + + assertTrue(fs.mkdirs(new Path("/foo2"))); + + checkNnPreviousDirExistence(cluster, 0, true); + checkNnPreviousDirExistence(cluster, 1, false); + checkJnPreviousDirExistence(qjCluster, true); + + // Now bootstrap the standby with the upgraded info. + int rc = BootstrapStandby.run( + new String[]{"-force"}, + cluster.getConfiguration(1)); + assertEquals(0, rc); + + cluster.restartNameNode(1); + + runFinalizeCommand(cluster); + + checkClusterPreviousDirExistence(cluster, false); + checkJnPreviousDirExistence(qjCluster, false); + assertCTimesEqual(cluster); + } finally { + if (fs != null) { + fs.close(); + } + if (qjCluster != null) { + qjCluster.shutdown(); } } } /** - * Make sure that an HA NN won't start if a previous upgrade was in progress. + * Make sure that even if the NN which initiated the upgrade is in the standby + * state that we're allowed to finalize. */ @Test - public void testStartingWithUpgradeInProgressFails() throws Exception { + public void testFinalizeFromSecondNameNodeWithJournalNodes() + throws IOException, URISyntaxException { + MiniQJMHACluster qjCluster = null; + FileSystem fs = null; + try { + Builder builder = new MiniQJMHACluster.Builder(conf); + builder.getDfsBuilder() + .numDataNodes(0); + qjCluster = builder.build(); + + MiniDFSCluster cluster = qjCluster.getDfsCluster(); + + // No upgrade is in progress at the moment. + checkJnPreviousDirExistence(qjCluster, false); + checkClusterPreviousDirExistence(cluster, false); + assertCTimesEqual(cluster); + + // Transition NN0 to active and do some FS ops. + cluster.transitionToActive(0); + fs = HATestUtil.configureFailoverFs(cluster, conf); + assertTrue(fs.mkdirs(new Path("/foo1"))); + + // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade + // flag. + cluster.shutdownNameNode(1); + cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); + cluster.restartNameNode(0, false); + + checkNnPreviousDirExistence(cluster, 0, true); + checkNnPreviousDirExistence(cluster, 1, false); + checkJnPreviousDirExistence(qjCluster, true); + + // Now bootstrap the standby with the upgraded info. + int rc = BootstrapStandby.run( + new String[]{"-force"}, + cluster.getConfiguration(1)); + assertEquals(0, rc); + + cluster.restartNameNode(1); + + // Make the second NN (not the one that initiated the upgrade) active when + // the finalize command is run. + cluster.transitionToStandby(0); + cluster.transitionToActive(1); + + runFinalizeCommand(cluster); + + checkClusterPreviousDirExistence(cluster, false); + checkJnPreviousDirExistence(qjCluster, false); + assertCTimesEqual(cluster); + } finally { + if (fs != null) { + fs.close(); + } + if (qjCluster != null) { + qjCluster.shutdown(); + } + } + } + + /** + * Make sure that an HA NN will start if a previous upgrade was in progress. + */ + @Test + public void testStartingWithUpgradeInProgressSucceeds() throws Exception { MiniDFSCluster cluster = null; try { - cluster = new MiniDFSCluster.Builder(new Configuration()) + cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(0) .build(); - + // Simulate an upgrade having started. for (int i = 0; i < 2; i++) { for (URI uri : cluster.getNameDirs(i)) { @@ -92,18 +509,226 @@ public void testStartingWithUpgradeInProgressFails() throws Exception { assertTrue(prevTmp.mkdirs()); } } - + cluster.restartNameNodes(); - fail("Should not have been able to start an HA NN with an in-progress upgrade"); - } catch (IOException ioe) { - GenericTestUtils.assertExceptionContains( - "Cannot start an HA namenode with name dirs that need recovery.", - ioe); - LOG.info("Got expected exception", ioe); } finally { if (cluster != null) { cluster.shutdown(); } } } + + /** + * Test rollback with NFS shared dir. + */ + @Test + public void testRollbackWithNfs() throws Exception { + MiniDFSCluster cluster = null; + FileSystem fs = null; + try { + cluster = new MiniDFSCluster.Builder(conf) + .nnTopology(MiniDFSNNTopology.simpleHATopology()) + .numDataNodes(0) + .build(); + + File sharedDir = new File(cluster.getSharedEditsDir(0, 1)); + + // No upgrade is in progress at the moment. + checkClusterPreviousDirExistence(cluster, false); + assertCTimesEqual(cluster); + checkPreviousDirExistence(sharedDir, false); + + // Transition NN0 to active and do some FS ops. + cluster.transitionToActive(0); + fs = HATestUtil.configureFailoverFs(cluster, conf); + assertTrue(fs.mkdirs(new Path("/foo1"))); + + // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade + // flag. + cluster.shutdownNameNode(1); + cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); + cluster.restartNameNode(0, false); + + checkNnPreviousDirExistence(cluster, 0, true); + checkNnPreviousDirExistence(cluster, 1, false); + checkPreviousDirExistence(sharedDir, true); + + // NN0 should come up in the active state when given the -upgrade option, + // so no need to transition it to active. + assertTrue(fs.mkdirs(new Path("/foo2"))); + + // Now bootstrap the standby with the upgraded info. + int rc = BootstrapStandby.run( + new String[]{"-force"}, + cluster.getConfiguration(1)); + assertEquals(0, rc); + + cluster.restartNameNode(1); + + checkNnPreviousDirExistence(cluster, 0, true); + checkNnPreviousDirExistence(cluster, 1, false); + checkPreviousDirExistence(sharedDir, true); + assertCTimesEqual(cluster); + + // Now shut down the cluster and do the rollback. + Collection nn1NameDirs = cluster.getNameDirs(0); + cluster.shutdown(); + + conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, Joiner.on(",").join(nn1NameDirs)); + NameNode.doRollback(conf, false); + + // The rollback operation should have rolled back the first NN's local + // dirs, and the shared dir, but not the other NN's dirs. Those have to be + // done by bootstrapping the standby. + checkNnPreviousDirExistence(cluster, 0, false); + checkPreviousDirExistence(sharedDir, false); + } finally { + if (fs != null) { + fs.close(); + } + if (cluster != null) { + cluster.shutdown(); + } + } + } + + @Test + public void testRollbackWithJournalNodes() throws IOException, + URISyntaxException { + MiniQJMHACluster qjCluster = null; + FileSystem fs = null; + try { + Builder builder = new MiniQJMHACluster.Builder(conf); + builder.getDfsBuilder() + .numDataNodes(0); + qjCluster = builder.build(); + + MiniDFSCluster cluster = qjCluster.getDfsCluster(); + + // No upgrade is in progress at the moment. + checkClusterPreviousDirExistence(cluster, false); + assertCTimesEqual(cluster); + checkJnPreviousDirExistence(qjCluster, false); + + // Transition NN0 to active and do some FS ops. + cluster.transitionToActive(0); + fs = HATestUtil.configureFailoverFs(cluster, conf); + assertTrue(fs.mkdirs(new Path("/foo1"))); + + // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade + // flag. + cluster.shutdownNameNode(1); + cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); + cluster.restartNameNode(0, false); + + checkNnPreviousDirExistence(cluster, 0, true); + checkNnPreviousDirExistence(cluster, 1, false); + checkJnPreviousDirExistence(qjCluster, true); + + // NN0 should come up in the active state when given the -upgrade option, + // so no need to transition it to active. + assertTrue(fs.mkdirs(new Path("/foo2"))); + + // Now bootstrap the standby with the upgraded info. + int rc = BootstrapStandby.run( + new String[]{"-force"}, + cluster.getConfiguration(1)); + assertEquals(0, rc); + + cluster.restartNameNode(1); + + checkNnPreviousDirExistence(cluster, 0, true); + checkNnPreviousDirExistence(cluster, 1, false); + checkJnPreviousDirExistence(qjCluster, true); + assertCTimesEqual(cluster); + + // Shut down the NNs, but deliberately leave the JNs up and running. + Collection nn1NameDirs = cluster.getNameDirs(0); + cluster.shutdown(); + + conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, Joiner.on(",").join(nn1NameDirs)); + NameNode.doRollback(conf, false); + + // The rollback operation should have rolled back the first NN's local + // dirs, and the shared dir, but not the other NN's dirs. Those have to be + // done by bootstrapping the standby. + checkNnPreviousDirExistence(cluster, 0, false); + checkJnPreviousDirExistence(qjCluster, false); + } finally { + if (fs != null) { + fs.close(); + } + if (qjCluster != null) { + qjCluster.shutdown(); + } + } + } + + /** + * Make sure that starting a second NN with the -upgrade flag fails if the + * other NN has already done that. + */ + @Test + public void testCannotUpgradeSecondNameNode() throws IOException, + URISyntaxException { + MiniDFSCluster cluster = null; + FileSystem fs = null; + try { + cluster = new MiniDFSCluster.Builder(conf) + .nnTopology(MiniDFSNNTopology.simpleHATopology()) + .numDataNodes(0) + .build(); + + File sharedDir = new File(cluster.getSharedEditsDir(0, 1)); + + // No upgrade is in progress at the moment. + checkClusterPreviousDirExistence(cluster, false); + assertCTimesEqual(cluster); + checkPreviousDirExistence(sharedDir, false); + + // Transition NN0 to active and do some FS ops. + cluster.transitionToActive(0); + fs = HATestUtil.configureFailoverFs(cluster, conf); + assertTrue(fs.mkdirs(new Path("/foo1"))); + + // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade + // flag. + cluster.shutdownNameNode(1); + cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); + cluster.restartNameNode(0, false); + + checkNnPreviousDirExistence(cluster, 0, true); + checkNnPreviousDirExistence(cluster, 1, false); + checkPreviousDirExistence(sharedDir, true); + + // NN0 should come up in the active state when given the -upgrade option, + // so no need to transition it to active. + assertTrue(fs.mkdirs(new Path("/foo2"))); + + // Restart NN0 without the -upgrade flag, to make sure that works. + cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR); + cluster.restartNameNode(0, false); + + // Make sure we can still do FS ops after upgrading. + cluster.transitionToActive(0); + assertTrue(fs.mkdirs(new Path("/foo3"))); + + // Make sure that starting the second NN with the -upgrade flag fails. + cluster.getNameNodeInfos()[1].setStartOpt(StartupOption.UPGRADE); + try { + cluster.restartNameNode(1, false); + fail("Should not have been able to start second NN with -upgrade"); + } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains( + "It looks like the shared log is already being upgraded", ioe); + } + } finally { + if (fs != null) { + fs.close(); + } + if (cluster != null) { + cluster.shutdown(); + } + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java index b534c03aa09..272e5436434 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java @@ -96,7 +96,7 @@ private void assertCannotStartNameNodes() { } catch (IOException ioe) { LOG.info("Got expected exception", ioe); GenericTestUtils.assertExceptionContains( - "Cannot start an HA namenode with name dirs that need recovery", ioe); + "storage directory does not exist or is not accessible", ioe); } try { cluster.restartNameNode(1, false); @@ -104,7 +104,7 @@ private void assertCannotStartNameNodes() { } catch (IOException ioe) { LOG.info("Got expected exception", ioe); GenericTestUtils.assertExceptionContains( - "Cannot start an HA namenode with name dirs that need recovery", ioe); + "storage directory does not exist or is not accessible", ioe); } } From cbee889711eddc5c67a61df4a6531b4ab3cd205a Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Sun, 26 Jan 2014 04:51:10 +0000 Subject: [PATCH 08/11] YARN-321. Merging YARN-321 branch to trunk. svn merge ../branches/YARN-321 git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1561452 13f79535-47bb-0310-9956-ffa450edef68 --- .../resources/assemblies/hadoop-yarn-dist.xml | 7 + .../hadoop/mapred/ResourceMgrDelegate.java | 29 + hadoop-yarn-project/CHANGES.txt | 88 ++ hadoop-yarn-project/hadoop-yarn/bin/yarn | 19 +- hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd | 33 +- .../hadoop-yarn/conf/yarn-env.sh | 9 + .../hadoop-yarn/hadoop-yarn-api/pom.xml | 2 + .../yarn/api/ApplicationHistoryProtocol.java | 334 +++++++ .../GetApplicationAttemptReportRequest.java | 75 ++ .../GetApplicationAttemptReportResponse.java | 74 ++ .../GetApplicationAttemptsRequest.java | 67 ++ .../GetApplicationAttemptsResponse.java | 76 ++ .../GetContainerReportRequest.java | 64 ++ .../GetContainerReportResponse.java | 63 ++ .../protocolrecords/GetContainersRequest.java | 67 ++ .../GetContainersResponse.java | 81 ++ .../api/records/ApplicationAttemptReport.java | 165 ++++ .../yarn/api/records/ContainerReport.java | 202 +++++ .../records/YarnApplicationAttemptState.java | 66 ++ .../hadoop/yarn/conf/YarnConfiguration.java | 68 ++ .../ApplicationAttemptNotFoundException.java | 48 + .../ApplicationNotFoundException.java | 9 +- .../ContainerNotFoundException.java | 48 + .../proto/application_history_client.proto | 39 + .../server/application_history_server.proto | 113 +++ .../src/main/proto/yarn_protos.proto | 38 + .../src/main/proto/yarn_service_protos.proto | 36 + .../hadoop/yarn/client/api/AHSClient.java | 180 ++++ .../hadoop/yarn/client/api/YarnClient.java | 76 ++ .../yarn/client/api/impl/AHSClientImpl.java | 155 ++++ .../yarn/client/api/impl/YarnClientImpl.java | 87 +- .../yarn/client/cli/ApplicationCLI.java | 313 +++++-- .../yarn/client/api/impl/TestAHSClient.java | 415 +++++++++ .../yarn/client/api/impl/TestYarnClient.java | 2 - .../hadoop/yarn/client/cli/TestYarnCLI.java | 176 +++- .../api/ApplicationHistoryProtocolPB.java | 33 + ...pplicationHistoryProtocolPBClientImpl.java | 230 +++++ ...plicationHistoryProtocolPBServiceImpl.java | 230 +++++ ...ApplicationAttemptReportRequestPBImpl.java | 140 +++ ...pplicationAttemptReportResponsePBImpl.java | 140 +++ .../GetApplicationAttemptsRequestPBImpl.java | 134 +++ .../GetApplicationAttemptsResponsePBImpl.java | 186 ++++ .../pb/GetContainerReportRequestPBImpl.java | 129 +++ .../pb/GetContainerReportResponsePBImpl.java | 127 +++ .../impl/pb/GetContainersRequestPBImpl.java | 131 +++ .../impl/pb/GetContainersResponsePBImpl.java | 180 ++++ .../pb/ApplicationAttemptReportPBImpl.java | 270 ++++++ .../impl/pb/ContainerReportPBImpl.java | 346 +++++++ .../yarn/api/records/impl/pb/ProtoUtils.java | 17 + .../apache/hadoop/yarn/client/AHSProxy.java | 57 ++ .../apache/hadoop/yarn/util/StringHelper.java | 4 + .../hadoop/yarn/webapp/YarnWebParams.java | 1 + .../hadoop/yarn/webapp/util/WebAppUtils.java | 21 + .../webapps/applicationhistory/.keep | 0 .../src/main/resources/yarn-default.xml | 79 ++ .../pom.xml | 172 ++++ .../ApplicationHistoryClientService.java | 211 +++++ .../ApplicationHistoryManager.java | 28 + .../ApplicationHistoryManagerImpl.java | 222 +++++ .../ApplicationHistoryReader.java | 117 +++ .../ApplicationHistoryServer.java | 159 ++++ .../ApplicationHistoryStore.java | 37 + .../ApplicationHistoryWriter.java | 112 +++ .../FileSystemApplicationHistoryStore.java | 841 ++++++++++++++++++ .../MemoryApplicationHistoryStore.java | 275 ++++++ .../NullApplicationHistoryStore.java | 127 +++ .../records/ApplicationAttemptFinishData.java | 95 ++ .../ApplicationAttemptHistoryData.java | 171 ++++ .../records/ApplicationAttemptStartData.java | 82 ++ .../records/ApplicationFinishData.java | 94 ++ .../records/ApplicationHistoryData.java | 213 +++++ .../records/ApplicationStartData.java | 106 +++ .../records/ContainerFinishData.java | 99 +++ .../records/ContainerHistoryData.java | 197 ++++ .../records/ContainerStartData.java | 92 ++ .../ApplicationAttemptFinishDataPBImpl.java | 239 +++++ .../pb/ApplicationAttemptStartDataPBImpl.java | 208 +++++ .../impl/pb/ApplicationFinishDataPBImpl.java | 226 +++++ .../impl/pb/ApplicationStartDataPBImpl.java | 229 +++++ .../impl/pb/ContainerFinishDataPBImpl.java | 223 +++++ .../impl/pb/ContainerStartDataPBImpl.java | 258 ++++++ .../webapp/AHSController.java | 55 ++ .../webapp/AHSLogsPage.java | 55 ++ .../webapp/AHSView.java | 90 ++ .../webapp/AHSWebApp.java | 52 ++ .../webapp/AHSWebServices.java | 162 ++++ .../webapp/AppAttemptPage.java | 69 ++ .../webapp/AppPage.java | 71 ++ .../webapp/ContainerPage.java | 41 + .../webapp/JAXBContextResolver.java | 64 ++ .../webapp/NavBlock.java | 51 ++ .../ApplicationHistoryStoreTestUtils.java | 85 ++ .../TestApplicationHistoryClientService.java | 195 ++++ .../TestApplicationHistoryManagerImpl.java | 74 ++ .../TestApplicationHistoryServer.java | 77 ++ ...TestFileSystemApplicationHistoryStore.java | 196 ++++ .../TestMemoryApplicationHistoryStore.java | 204 +++++ .../webapp/TestAHSWebApp.java | 182 ++++ .../webapp/TestAHSWebServices.java | 295 ++++++ .../yarn/server/api/ApplicationContext.java | 129 +++ .../yarn/server/webapp/AppAttemptBlock.java | 160 ++++ .../hadoop/yarn/server/webapp/AppBlock.java | 185 ++++ .../hadoop/yarn/server/webapp/AppsBlock.java | 142 +++ .../yarn/server/webapp/ContainerBlock.java | 105 +++ .../yarn/server/webapp/WebServices.java | 367 ++++++++ .../server/webapp/dao/AppAttemptInfo.java | 84 ++ .../server/webapp/dao/AppAttemptsInfo.java | 46 + .../yarn/server/webapp/dao/AppInfo.java | 169 ++++ .../yarn/server/webapp/dao/AppsInfo.java | 44 + .../yarn/server/webapp/dao/ContainerInfo.java | 117 +++ .../server/webapp/dao/ContainersInfo.java | 44 + .../pom.xml | 5 + .../server/resourcemanager/RMContext.java | 9 +- .../server/resourcemanager/RMContextImpl.java | 18 +- .../server/resourcemanager/RMServerUtils.java | 92 +- .../resourcemanager/ResourceManager.java | 10 + .../ahs/RMApplicationHistoryWriter.java | 345 +++++++ .../WritingApplicationAttemptFinishEvent.java | 51 ++ .../WritingApplicationAttemptStartEvent.java | 50 ++ .../ahs/WritingApplicationFinishEvent.java | 50 ++ .../ahs/WritingApplicationHistoryEvent.java | 30 + .../ahs/WritingApplicationStartEvent.java | 50 ++ .../ahs/WritingContainerFinishEvent.java | 49 + .../ahs/WritingContainerStartEvent.java | 49 + .../ahs/WritingHistoryEventType.java | 24 + .../resourcemanager/rmapp/RMAppImpl.java | 30 +- .../rmapp/attempt/RMAppAttempt.java | 18 + .../rmapp/attempt/RMAppAttemptImpl.java | 25 + .../rmcontainer/RMContainer.java | 19 + .../rmcontainer/RMContainerImpl.java | 108 ++- .../SchedulerApplicationAttempt.java | 3 +- .../common/fica/FiCaSchedulerApp.java | 5 +- .../scheduler/fair/FSSchedulerApp.java | 5 +- .../resourcemanager/TestAppManager.java | 6 +- .../resourcemanager/TestClientRMService.java | 11 +- .../TestRMNodeTransitions.java | 2 +- .../ahs/TestRMApplicationHistoryWriter.java | 517 +++++++++++ .../resourcetracker/TestNMExpiry.java | 2 +- .../resourcetracker/TestNMReconnect.java | 2 +- .../TestRMNMRPCResponseId.java | 2 +- .../rmapp/TestRMAppTransitions.java | 24 +- .../attempt/TestRMAppAttemptTransitions.java | 11 +- .../rmcontainer/TestRMContainerImpl.java | 45 +- .../capacity/TestCapacityScheduler.java | 10 +- .../capacity/TestChildQueueOrder.java | 10 +- .../scheduler/capacity/TestQueueParsing.java | 2 +- .../scheduler/capacity/TestUtils.java | 4 +- .../scheduler/fifo/TestFifoScheduler.java | 12 +- .../resourcemanager/webapp/TestRMWebApp.java | 4 +- .../hadoop-yarn/hadoop-yarn-server/pom.xml | 1 + 150 files changed, 15767 insertions(+), 165 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationHistoryProtocol.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationAttemptReportRequest.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationAttemptReportResponse.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationAttemptsRequest.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationAttemptsResponse.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainerReportRequest.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainerReportResponse.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainersRequest.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainersResponse.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationAttemptReport.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/YarnApplicationAttemptState.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ApplicationAttemptNotFoundException.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ContainerNotFoundException.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/application_history_client.proto create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/application_history_server.proto create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AHSClient.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AHSClientImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/ApplicationHistoryProtocolPB.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationHistoryProtocolPBClientImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ApplicationHistoryProtocolPBServiceImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationAttemptReportRequestPBImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationAttemptReportResponsePBImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationAttemptsRequestPBImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationAttemptsResponsePBImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerReportRequestPBImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerReportResponsePBImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainersRequestPBImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainersResponsePBImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationAttemptReportPBImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerReportPBImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AHSProxy.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/applicationhistory/.keep create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManager.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryReader.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStore.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryWriter.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/MemoryApplicationHistoryStore.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/NullApplicationHistoryStore.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptFinishData.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptHistoryData.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptStartData.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationFinishData.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationHistoryData.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationStartData.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerFinishData.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerHistoryData.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerStartData.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationAttemptFinishDataPBImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationAttemptStartDataPBImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationFinishDataPBImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationStartDataPBImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ContainerFinishDataPBImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ContainerStartDataPBImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSController.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSLogsPage.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebApp.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContainerPage.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/JAXBContextResolver.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStoreTestUtils.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestMemoryApplicationHistoryStore.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ApplicationContext.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppAttemptInfo.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppAttemptsInfo.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppsInfo.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainersInfo.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/RMApplicationHistoryWriter.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingApplicationAttemptFinishEvent.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingApplicationAttemptStartEvent.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingApplicationFinishEvent.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingApplicationHistoryEvent.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingApplicationStartEvent.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingContainerFinishEvent.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingContainerStartEvent.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingHistoryEventType.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/TestRMApplicationHistoryWriter.java diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml index 7f0906b52cd..a15e1243b24 100644 --- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml +++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml @@ -122,6 +122,13 @@ *-sources.jar + + hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/target + /share/hadoop/${hadoop.component}/sources + + *-sources.jar + + hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/target /share/hadoop/${hadoop.component}/sources diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java index 74b07c2f3db..fb3e47de353 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java @@ -44,9 +44,13 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerReport; import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; import org.apache.hadoop.yarn.api.records.NodeState; @@ -371,4 +375,29 @@ public List getQueueAclsInfo() throws YarnException, IOException { return client.getQueueAclsInfo(); } + + @Override + public ApplicationAttemptReport getApplicationAttemptReport( + ApplicationAttemptId appAttemptId) throws YarnException, IOException { + return client.getApplicationAttemptReport(appAttemptId); + } + + @Override + public List getApplicationAttempts( + ApplicationId appId) throws YarnException, IOException { + return client.getApplicationAttempts(appId); + } + + @Override + public ContainerReport getContainerReport(ContainerId containerId) + throws YarnException, IOException { + return client.getContainerReport(containerId); + } + + @Override + public List getContainers( + ApplicationAttemptId applicationAttemptId) throws YarnException, + IOException { + return client.getContainers(applicationAttemptId); + } } diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index f2491aec07b..a2dfdbb69cc 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -9,8 +9,67 @@ Trunk - Unreleased YARN-1496. Protocol additions to allow moving apps between queues (Sandy Ryza) + YARN-930. Bootstrapping ApplicationHistoryService module. (vinodkv) + + YARN-947. Implementing the data objects to be used by the History reader + and writer interfaces. (Zhijie Shen via vinodkv) + + YARN-934. Defined a Writer Interface for HistoryStorage. (Zhijie Shen via + vinodkv) + + YARN-925. Defined a Reader Interface for HistoryStorage. (Mayank Bansal via + vinodkv) + + YARN-978. Created ApplicationAttemptReport. (Mayank Bansal via vinodkv) + + YARN-956. Added a testable in-memory HistoryStorage. (Mayank Bansal via + vinodkv) + + YARN-975. Added a file-system implementation for HistoryStorage. (Zhijie Shen + via vinodkv) + + YARN-1123. Added a new ContainerReport and its Protobuf implementation. (Mayank + Bansal via vinodkv) + + YARN-979. Added more APIs for getting information about ApplicationAttempts + and Containers from ApplicationHistoryProtocol. (Mayank Bansal and Zhijie Shen + via vinodkv) + + YARN-953. Changed ResourceManager to start writing history data. (Zhijie Shen + via vinodkv) + + YARN-1266. Implemented PB service and client wrappers for + ApplicationHistoryProtocol. (Mayank Bansal via vinodkv) + + YARN-955. Implemented ApplicationHistoryProtocol handler. (Mayank Bansal via + vinodkv) + + YARN-1242. Changed yarn scripts to be able to start ApplicationHistoryServer + as an individual process. (Mayank Bansal via vinodkv) + + YARN-954. Implemented web UI for the ApplicationHistoryServer and wired it into + the HistoryStorage. (Zhijie Shen via vinodkv) + + YARN-967. Added the client and CLI interfaces for obtaining ApplicationHistory + data. (Mayank Bansal via vinodkv) + + YARN-1023. Added Webservices REST APIs support for Application History. (Zhijie + Shen via vinodkv) + + YARN-1413. Implemented serving of aggregated-logs in the ApplicationHistory + server. (Mayank Bansal via vinodkv) + IMPROVEMENTS + YARN-1007. Enhance History Reader interface for Containers. (Mayank Bansal via + devaraj) + + YARN-974. Added more information to RMContainer to be collected and recorded in + Application-History. (Zhijie Shen via vinodkv) + + YARN-987. Added ApplicationHistoryManager responsible for exposing reports to + all clients. (Mayank Bansal via vinodkv) + OPTIMIZATIONS BUG FIXES @@ -18,6 +77,35 @@ Trunk - Unreleased YARN-524 TestYarnVersionInfo failing if generated properties doesn't include an SVN URL. (stevel) + YARN-935. Correcting pom.xml to build applicationhistoryserver module + successfully. (Zhijie Shen via vinodkv) + + YARN-962. Fixed bug in application-history proto file and renamed it be just + a client proto file. (Zhijie Shen via vinodkv) + + YARN-984. Renamed the incorrectly named applicationhistoryservice.records.pb.impl + package to be applicationhistoryservice.records.impl.pb. (Devaraj K via vinodkv) + + YARN-1534. Fixed failure of test TestAHSWebApp. (Shinichi Yamashita via vinodkv) + + YARN-1555. Fixed test failures in applicationhistoryservice.* (Vinod Kumar + Vavilapalli via mayank) + + YARN-1594. Updated pom.xml of applicationhistoryservice sub-project according to + YARN-888. (Vinod Kumar Vavilapalli via zjshen) + + YARN-1596. Fixed Javadoc warnings on branch YARN-321. (Vinod Kumar Vavilapalli + via zjshen) + + YARN-1597. Fixed Findbugs warnings on branch YARN-321. (Vinod Kumar Vavilapalli + via zjshen) + + YARN-1595. Made enabling history service configurable and fixed test failures on + branch YARN-321. (Vinod Kumar Vavilapalli via zjshen) + + YARN-1605. Fixed formatting issues in the new module on branch YARN-321. (Vinod + Kumar Vavilapalli via zjshen) + Release 2.4.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn b/hadoop-yarn-project/hadoop-yarn/bin/yarn index 97c2afe992b..ac42a9a442a 100644 --- a/hadoop-yarn-project/hadoop-yarn/bin/yarn +++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn @@ -58,10 +58,13 @@ function print_usage(){ echo "where COMMAND is one of:" echo " resourcemanager run the ResourceManager" echo " nodemanager run a nodemanager on each slave" + echo " historyserver run the application history server" echo " rmadmin admin tools" echo " version print the version" echo " jar run a jar file" echo " application prints application(s) report/kill application" + echo " applicationattempt prints applicationattempt(s) report" + echo " container prints container(s) report" echo " node prints node report(s)" echo " logs dump container logs" echo " classpath prints the class path needed to get the" @@ -145,6 +148,10 @@ if [ -d "$HADOOP_YARN_HOME/yarn-server/yarn-server-common/target/classes" ]; the fi if [ -d "$HADOOP_YARN_HOME/yarn-server/yarn-server-resourcemanager/target/classes" ]; then CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-server/yarn-server-resourcemanager/target/classes + CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-server/yarn-server-applicationhistoryservice/target/classes +fi +if [ -d "$HADOOP_YARN_HOME/yarn-server/yarn-server-applicationhistoryservice/target/classes" ]; then + CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-server/yarn-server-applicationhistoryservice/target/classes fi if [ -d "$HADOOP_YARN_HOME/build/test/classes" ]; then CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/target/test/classes @@ -177,9 +184,12 @@ if [ "$COMMAND" = "classpath" ] ; then elif [ "$COMMAND" = "rmadmin" ] ; then CLASS='org.apache.hadoop.yarn.client.cli.RMAdminCLI' YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS" -elif [ "$COMMAND" = "application" ] ; then +elif [ "$COMMAND" = "application" ] || + [ "$COMMAND" = "applicationattempt" ] || + [ "$COMMAND" = "container" ]; then CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS" + set -- $COMMAND $@ elif [ "$COMMAND" = "node" ] ; then CLASS=org.apache.hadoop.yarn.client.cli.NodeCLI YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS" @@ -190,6 +200,13 @@ elif [ "$COMMAND" = "resourcemanager" ] ; then if [ "$YARN_RESOURCEMANAGER_HEAPSIZE" != "" ]; then JAVA_HEAP_MAX="-Xmx""$YARN_RESOURCEMANAGER_HEAPSIZE""m" fi +elif [ "$COMMAND" = "historyserver" ] ; then + CLASSPATH=${CLASSPATH}:$YARN_CONF_DIR/ahs-config/log4j.properties + CLASS='org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer' + YARN_OPTS="$YARN_OPTS $YARN_HISTORYSERVER_OPTS" + if [ "$YARN_RESOURCEMANAGER_HEAPSIZE" != "" ]; then + JAVA_HEAP_MAX="-Xmx""$YARN_HISTORYSERVER_HEAPSIZE""m" + fi elif [ "$COMMAND" = "nodemanager" ] ; then CLASSPATH=${CLASSPATH}:$YARN_CONF_DIR/nm-config/log4j.properties CLASS='org.apache.hadoop.yarn.server.nodemanager.NodeManager' diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd index 8afc6ea0564..121f864f838 100644 --- a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd +++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd @@ -120,8 +120,11 @@ if "%1" == "--config" ( if exist %HADOOP_YARN_HOME%\yarn-server\yarn-server-resourcemanager\target\classes ( set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-resourcemanager\target\classes + set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-applicationhistoryservice\target\classes + ) + if exist %HADOOP_YARN_HOME%\yarn-server\yarn-server-applicationhistoryservice\target\classes ( + set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-applicationhistoryservice\target\classes ) - if exist %HADOOP_YARN_HOME%\build\test\classes ( set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\build\test\classes ) @@ -138,7 +141,8 @@ if "%1" == "--config" ( goto :eof ) - set yarncommands=resourcemanager nodemanager proxyserver rmadmin version jar application node logs daemonlog + set yarncommands=resourcemanager nodemanager proxyserver rmadmin version jar ^ + application applicationattempt container node logs daemonlog historyserver for %%i in ( %yarncommands% ) do ( if %yarn-command% == %%i set yarncommand=true ) @@ -170,8 +174,21 @@ goto :eof :application set CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% + set yarn-command-arguments=%yarn-command% %yarn-command-arguments% goto :eof +:applicationattempt + set CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI + set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% + set yarn-command-arguments=%yarn-command% %yarn-command-arguments% + goto :eof + +:container + set CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI + set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% + set yarn-command-arguments=%yarn-command% %yarn-command-arguments% + goto :eof + :node set CLASS=org.apache.hadoop.yarn.client.cli.NodeCLI set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% @@ -186,6 +203,15 @@ goto :eof ) goto :eof +:historyserver + set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\ahs-config\log4j.properties + set CLASS=org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer + set YARN_OPTS=%YARN_OPTS% %HADOOP_HISTORYSERVER_OPTS% + if defined YARN_RESOURCEMANAGER_HEAPSIZE ( + set JAVA_HEAP_MAX=-Xmx%YARN_HISTORYSERVER_HEAPSIZE%m + ) + goto :eof + :nodemanager set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\nm-config\log4j.properties set CLASS=org.apache.hadoop.yarn.server.nodemanager.NodeManager @@ -251,10 +277,13 @@ goto :eof @echo where COMMAND is one of: @echo resourcemanager run the ResourceManager @echo nodemanager run a nodemanager on each slave + @echo historyserver run the application history server @echo rmadmin admin tools @echo version print the version @echo jar ^ run a jar file @echo application prints application(s) report/kill application + @echo applicationattempt prints applicationattempt(s) report + @echo container prints container(s) report @echo node prints node report(s) @echo logs dump container logs @echo classpath prints the class path needed to get the diff --git a/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh b/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh index f450740d53a..c2b01becc6d 100644 --- a/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh +++ b/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh @@ -54,6 +54,15 @@ fi # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. #export YARN_RESOURCEMANAGER_HEAPSIZE=1000 +# Specify the max Heapsize for the HistoryManager using a numerical value +# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set +# the value to 1000. +# This value will be overridden by an Xmx setting specified in either YARN_OPTS +# and/or YARN_HISTORYSERVER_OPTS. +# If not specified, the default value will be picked from either YARN_HEAPMAX +# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. +#export YARN_HISTORYSERVER_HEAPSIZE=1000 + # Specify the JVM options to be used when starting the ResourceManager. # These options will be appended to the options specified as YARN_OPTS # and therefore may override any similar flags set in YARN_OPTS diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml index 66a647527f4..09990f4bb67 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml @@ -112,6 +112,8 @@ containermanagement_protocol.proto server/yarn_server_resourcemanager_service_protos.proto server/resourcemanager_administration_protocol.proto + application_history_client.proto + server/application_history_server.proto ${project.build.directory}/generated-sources/java diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationHistoryProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationHistoryProtocol.java new file mode 100644 index 00000000000..0bfd2eda47e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationHistoryProtocol.java @@ -0,0 +1,334 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest; +import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse; +import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest; +import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenResponse; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerReport; +import org.apache.hadoop.yarn.api.records.Token; +import org.apache.hadoop.yarn.exceptions.YarnException; + +/** + *

+ * The protocol between clients and the ApplicationHistoryServer to + * get the information of completed applications etc. + *

+ */ +@Public +@Unstable +public interface ApplicationHistoryProtocol { + + /** + *

+ * The interface used by clients to get a report of an Application from the + * ResourceManager. + *

+ * + *

+ * The client, via {@link GetApplicationReportRequest} provides the + * {@link ApplicationId} of the application. + *

+ * + *

+ * In secure mode,the ApplicationHistoryServer verifies access to + * the application, queue etc. before accepting the request. + *

+ * + *

+ * The ApplicationHistoryServer responds with a + * {@link GetApplicationReportResponse} which includes the + * {@link ApplicationReport} for the application. + *

+ * + *

+ * If the user does not have VIEW_APP access then the following + * fields in the report will be set to stubbed values: + *

    + *
  • host - set to "N/A"
  • + *
  • RPC port - set to -1
  • + *
  • client token - set to "N/A"
  • + *
  • diagnostics - set to "N/A"
  • + *
  • tracking URL - set to "N/A"
  • + *
  • original tracking URL - set to "N/A"
  • + *
  • resource usage report - all values are -1
  • + *
+ *

+ * + * @param request + * request for an application report + * @return application report + * @throws YarnException + * @throws IOException + */ + @Public + @Unstable + public GetApplicationReportResponse getApplicationReport( + GetApplicationReportRequest request) throws YarnException, IOException; + + /** + *

+ * The interface used by clients to get a report of all Applications in the + * cluster from the ApplicationHistoryServer. + *

+ * + *

+ * The ApplicationHistoryServer responds with a + * {@link GetApplicationsResponse} which includes a list of + * {@link ApplicationReport} for all the applications. + *

+ * + *

+ * If the user does not have VIEW_APP access for an application + * then the corresponding report will be filtered as described in + * {@link #getApplicationReport(GetApplicationReportRequest)}. + *

+ * + * @param request + * request for reports on all the applications + * @return report on applications matching the given application types defined + * in the request + * @throws YarnException + * @throws IOException + */ + @Public + @Unstable + public GetApplicationsResponse + getApplications(GetApplicationsRequest request) throws YarnException, + IOException; + + /** + *

+ * The interface used by clients to get a report of an Application Attempt + * from the ApplicationHistoryServer. + *

+ * + *

+ * The client, via {@link GetApplicationAttemptReportRequest} provides the + * {@link ApplicationAttemptId} of the application attempt. + *

+ * + *

+ * In secure mode,the ApplicationHistoryServer verifies access to + * the method before accepting the request. + *

+ * + *

+ * The ApplicationHistoryServer responds with a + * {@link GetApplicationAttemptReportResponse} which includes the + * {@link ApplicationAttemptReport} for the application attempt. + *

+ * + *

+ * If the user does not have VIEW_APP access then the following + * fields in the report will be set to stubbed values: + *

    + *
  • host
  • + *
  • RPC port
  • + *
  • client token
  • + *
  • diagnostics - set to "N/A"
  • + *
  • tracking URL
  • + *
+ *

+ * + * @param request + * request for an application attempt report + * @return application attempt report + * @throws YarnException + * @throws IOException + */ + @Public + @Unstable + public GetApplicationAttemptReportResponse getApplicationAttemptReport( + GetApplicationAttemptReportRequest request) throws YarnException, + IOException; + + /** + *

+ * The interface used by clients to get a report of all Application attempts + * in the cluster from the ApplicationHistoryServer. + *

+ * + *

+ * The ApplicationHistoryServer responds with a + * {@link GetApplicationAttemptsRequest} which includes the + * {@link ApplicationAttemptReport} for all the applications attempts of a + * specified application attempt. + *

+ * + *

+ * If the user does not have VIEW_APP access for an application + * then the corresponding report will be filtered as described in + * {@link #getApplicationAttemptReport(GetApplicationAttemptReportRequest)}. + *

+ * + * @param request + * request for reports on all application attempts of an application + * @return reports on all application attempts of an application + * @throws YarnException + * @throws IOException + */ + @Public + @Unstable + public GetApplicationAttemptsResponse getApplicationAttempts( + GetApplicationAttemptsRequest request) throws YarnException, IOException; + + /** + *

+ * The interface used by clients to get a report of an Container from the + * ApplicationHistoryServer. + *

+ * + *

+ * The client, via {@link GetContainerReportRequest} provides the + * {@link ContainerId} of the container. + *

+ * + *

+ * In secure mode,the ApplicationHistoryServer verifies access to + * the method before accepting the request. + *

+ * + *

+ * The ApplicationHistoryServer responds with a + * {@link GetContainerReportResponse} which includes the + * {@link ContainerReport} for the container. + *

+ * + * @param request + * request for a container report + * @return container report + * @throws YarnException + * @throws IOException + */ + @Public + @Unstable + public GetContainerReportResponse getContainerReport( + GetContainerReportRequest request) throws YarnException, IOException; + + /** + *

+ * The interface used by clients to get a report of Containers for an + * application attempt from the ApplciationHistoryServer. + *

+ * + *

+ * The client, via {@link GetContainersRequest} provides the + * {@link ApplicationAttemptId} of the application attempt. + *

+ * + *

+ * In secure mode,the ApplicationHistoryServer verifies access to + * the method before accepting the request. + *

+ * + *

+ * The ApplicationHistoryServer responds with a + * {@link GetContainersResponse} which includes a list of + * {@link ContainerReport} for all the containers of a specific application + * attempt. + *

+ * + * @param request + * request for a list of container reports of an application attempt. + * @return reports on all containers of an application attempt + * @throws YarnException + * @throws IOException + */ + @Public + @Unstable + public GetContainersResponse getContainers(GetContainersRequest request) + throws YarnException, IOException; + + /** + *

+ * The interface used by clients to get delegation token, enabling the + * containers to be able to talk to the service using those tokens. + *

+ * + *

+ * The ApplicationHistoryServer responds with the delegation + * token {@link Token} that can be used by the client to speak to this + * service. + *

+ * + * @param request + * request to get a delegation token for the client. + * @return delegation token that can be used to talk to this service + * @throws YarnException + * @throws IOException + */ + @Public + @Unstable + public GetDelegationTokenResponse getDelegationToken( + GetDelegationTokenRequest request) throws YarnException, IOException; + + /** + * Renew an existing delegation token. + * + * @param request + * the delegation token to be renewed. + * @return the new expiry time for the delegation token. + * @throws YarnException + * @throws IOException + */ + @Private + @Unstable + public RenewDelegationTokenResponse renewDelegationToken( + RenewDelegationTokenRequest request) throws YarnException, IOException; + + /** + * Cancel an existing delegation token. + * + * @param request + * the delegation token to be cancelled. + * @return an empty response. + * @throws YarnException + * @throws IOException + */ + @Private + @Unstable + public CancelDelegationTokenResponse cancelDelegationToken( + CancelDelegationTokenRequest request) throws YarnException, IOException; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationAttemptReportRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationAttemptReportRequest.java new file mode 100644 index 00000000000..fb50952263e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationAttemptReportRequest.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ApplicationHistoryProtocol; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; +import org.apache.hadoop.yarn.util.Records; + +/** + *

+ * The request sent by a client to the ResourceManager to get an + * {@link ApplicationAttemptReport} for an application attempt. + *

+ * + *

+ * The request should include the {@link ApplicationAttemptId} of the + * application attempt. + *

+ * + * @see ApplicationAttemptReport + * @see ApplicationHistoryProtocol#getApplicationAttemptReport(GetApplicationAttemptReportRequest) + */ +@Public +@Unstable +public abstract class GetApplicationAttemptReportRequest { + + @Public + @Unstable + public static GetApplicationAttemptReportRequest newInstance( + ApplicationAttemptId applicationAttemptId) { + GetApplicationAttemptReportRequest request = + Records.newRecord(GetApplicationAttemptReportRequest.class); + request.setApplicationAttemptId(applicationAttemptId); + return request; + } + + /** + * Get the ApplicationAttemptId of an application attempt. + * + * @return ApplicationAttemptId of an application attempt + */ + @Public + @Unstable + public abstract ApplicationAttemptId getApplicationAttemptId(); + + /** + * Set the ApplicationAttemptId of an application attempt + * + * @param applicationAttemptId + * ApplicationAttemptId of an application attempt + */ + @Public + @Unstable + public abstract void setApplicationAttemptId( + ApplicationAttemptId applicationAttemptId); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationAttemptReportResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationAttemptReportResponse.java new file mode 100644 index 00000000000..eb58f96cdd6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationAttemptReportResponse.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ApplicationHistoryProtocol; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; +import org.apache.hadoop.yarn.util.Records; + +/** + *

+ * The response sent by the ResourceManager to a client requesting + * an application attempt report. + *

+ * + *

+ * The response includes an {@link ApplicationAttemptReport} which has the + * details about the particular application attempt + *

+ * + * @see ApplicationAttemptReport + * @see ApplicationHistoryProtocol#getApplicationAttemptReport(GetApplicationAttemptReportRequest) + */ +@Public +@Unstable +public abstract class GetApplicationAttemptReportResponse { + + @Public + @Unstable + public static GetApplicationAttemptReportResponse newInstance( + ApplicationAttemptReport ApplicationAttemptReport) { + GetApplicationAttemptReportResponse response = + Records.newRecord(GetApplicationAttemptReportResponse.class); + response.setApplicationAttemptReport(ApplicationAttemptReport); + return response; + } + + /** + * Get the ApplicationAttemptReport for the application attempt. + * + * @return ApplicationAttemptReport for the application attempt + */ + @Public + @Unstable + public abstract ApplicationAttemptReport getApplicationAttemptReport(); + + /** + * Get the ApplicationAttemptReport for the application attempt. + * + * @param applicationAttemptReport + * ApplicationAttemptReport for the application attempt + */ + @Public + @Unstable + public abstract void setApplicationAttemptReport( + ApplicationAttemptReport applicationAttemptReport); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationAttemptsRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationAttemptsRequest.java new file mode 100644 index 00000000000..4c721a78951 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationAttemptsRequest.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ApplicationHistoryProtocol; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.util.Records; + +/** + *

+ * The request from clients to get a list of application attempt reports of an + * application from the ResourceManager. + *

+ * + * @see ApplicationHistoryProtocol#getApplicationAttempts(GetApplicationAttemptsRequest) + */ +@Public +@Unstable +public abstract class GetApplicationAttemptsRequest { + + @Public + @Unstable + public static GetApplicationAttemptsRequest newInstance( + ApplicationId applicationId) { + GetApplicationAttemptsRequest request = + Records.newRecord(GetApplicationAttemptsRequest.class); + request.setApplicationId(applicationId); + return request; + } + + /** + * Get the ApplicationId of an application + * + * @return ApplicationId of an application + */ + @Public + @Unstable + public abstract ApplicationId getApplicationId(); + + /** + * Set the ApplicationId of an application + * + * @param applicationId + * ApplicationId of an application + */ + @Public + @Unstable + public abstract void setApplicationId(ApplicationId applicationId); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationAttemptsResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationAttemptsResponse.java new file mode 100644 index 00000000000..94ca74a714e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationAttemptsResponse.java @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords; + +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ApplicationHistoryProtocol; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; +import org.apache.hadoop.yarn.util.Records; + +/** + *

+ * The response sent by the ResourceManager to a client requesting + * a list of {@link ApplicationAttemptReport} for application attempts. + *

+ * + *

+ * The ApplicationAttemptReport for each application includes the + * details of an application attempt. + *

+ * + * @see ApplicationAttemptReport + * @see ApplicationHistoryProtocol#getApplicationAttempts(GetApplicationAttemptsRequest) + */ +@Public +@Unstable +public abstract class GetApplicationAttemptsResponse { + + @Public + @Unstable + public static GetApplicationAttemptsResponse newInstance( + List applicationAttempts) { + GetApplicationAttemptsResponse response = + Records.newRecord(GetApplicationAttemptsResponse.class); + response.setApplicationAttemptList(applicationAttempts); + return response; + } + + /** + * Get a list of ApplicationReport of an application. + * + * @return a list of ApplicationReport of an application + */ + @Public + @Unstable + public abstract List getApplicationAttemptList(); + + /** + * Get a list of ApplicationReport of an application. + * + * @param applicationAttempts + * a list of ApplicationReport of an application + */ + @Public + @Unstable + public abstract void setApplicationAttemptList( + List applicationAttempts); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainerReportRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainerReportRequest.java new file mode 100644 index 00000000000..1b53964dfca --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainerReportRequest.java @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerReport; +import org.apache.hadoop.yarn.util.Records; + +/** + *

+ * The request sent by a client to the ResourceManager to get an + * {@link ContainerReport} for a container. + *

+ */ +@Public +@Unstable +public abstract class GetContainerReportRequest { + + @Public + @Unstable + public static GetContainerReportRequest newInstance(ContainerId containerId) { + GetContainerReportRequest request = + Records.newRecord(GetContainerReportRequest.class); + request.setContainerId(containerId); + return request; + } + + /** + * Get the ContainerId of the Container. + * + * @return ContainerId of the Container + */ + @Public + @Unstable + public abstract ContainerId getContainerId(); + + /** + * Set the ContainerId of the container + * + * @param containerId + * ContainerId of the container + */ + @Public + @Unstable + public abstract void setContainerId(ContainerId containerId); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainerReportResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainerReportResponse.java new file mode 100644 index 00000000000..7b0c2ff7c1d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainerReportResponse.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.records.ContainerReport; +import org.apache.hadoop.yarn.util.Records; + +/** + *

+ * The response sent by the ResourceManager to a client requesting + * a container report. + *

+ * + *

+ * The response includes a {@link ContainerReport} which has details of a + * container. + *

+ * + */ +@Public +@Unstable +public abstract class GetContainerReportResponse { + @Public + @Unstable + public static GetContainerReportResponse newInstance( + ContainerReport containerReport) { + GetContainerReportResponse response = + Records.newRecord(GetContainerReportResponse.class); + response.setContainerReport(containerReport); + return response; + } + + /** + * Get the ContainerReport for the container. + * + * @return ContainerReport for the container + */ + @Public + @Unstable + public abstract ContainerReport getContainerReport(); + + @Public + @Unstable + public abstract void setContainerReport(ContainerReport containerReport); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainersRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainersRequest.java new file mode 100644 index 00000000000..54133b97a24 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainersRequest.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ApplicationHistoryProtocol; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.util.Records; + +/** + *

+ * The request from clients to get a list of container reports, which belong to + * an application attempt from the ResourceManager. + *

+ * + * @see ApplicationHistoryProtocol#getContainers(GetContainersRequest) + */ +@Public +@Unstable +public abstract class GetContainersRequest { + @Public + @Unstable + public static GetContainersRequest newInstance( + ApplicationAttemptId applicationAttemptId) { + GetContainersRequest request = + Records.newRecord(GetContainersRequest.class); + request.setApplicationAttemptId(applicationAttemptId); + return request; + } + + /** + * Get the ApplicationAttemptId of an application attempt. + * + * @return ApplicationAttemptId of an application attempt + */ + @Public + @Unstable + public abstract ApplicationAttemptId getApplicationAttemptId(); + + /** + * Set the ApplicationAttemptId of an application attempt + * + * @param applicationAttemptId + * ApplicationAttemptId of an application attempt + */ + @Public + @Unstable + public abstract void setApplicationAttemptId( + ApplicationAttemptId applicationAttemptId); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainersResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainersResponse.java new file mode 100644 index 00000000000..af36ee49b30 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainersResponse.java @@ -0,0 +1,81 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords; + +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ApplicationHistoryProtocol; +import org.apache.hadoop.yarn.api.records.ContainerReport; +import org.apache.hadoop.yarn.util.Records; + +/** + *

+ * The response sent by the ResourceManager to a client requesting + * a list of {@link ContainerReport} for containers. + *

+ * + *

+ * The ContainerReport for each container includes the container + * details. + *

+ * + * @see ContainerReport + * @see ApplicationHistoryProtocol#getContainers(GetContainersRequest) + */ +@Public +@Unstable +public abstract class GetContainersResponse { + + @Public + @Unstable + public static GetContainersResponse newInstance( + List containers) { + GetContainersResponse response = + Records.newRecord(GetContainersResponse.class); + response.setContainerList(containers); + return response; + } + + /** + * Get a list of ContainerReport for all the containers of an + * application attempt. + * + * @return a list of ContainerReport for all the containers of an + * application attempt + * + */ + @Public + @Unstable + public abstract List getContainerList(); + + /** + * Set a list of ContainerReport for all the containers of an + * application attempt. + * + * @param containers + * a list of ContainerReport for all the containers of + * an application attempt + * + */ + @Public + @Unstable + public abstract void setContainerList(List containers); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationAttemptReport.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationAttemptReport.java new file mode 100644 index 00000000000..031573f3c36 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationAttemptReport.java @@ -0,0 +1,165 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + *

+ * ApplicationAttemptReport is a report of an application attempt. + *

+ * + *

+ * It includes details such as: + *

    + *
  • {@link ApplicationAttemptId} of the application.
  • + *
  • Host on which the ApplicationMaster of this attempt is + * running.
  • + *
  • RPC port of the ApplicationMaster of this attempt.
  • + *
  • Tracking URL.
  • + *
  • Diagnostic information in case of errors.
  • + *
  • {@link YarnApplicationAttemptState} of the application attempt.
  • + *
  • {@link ContainerId} of the master Container.
  • + *
+ *

+ * + */ +@Public +@Unstable +public abstract class ApplicationAttemptReport { + + @Private + @Unstable + public static ApplicationAttemptReport newInstance( + ApplicationAttemptId applicationAttemptId, String host, int rpcPort, + String url, String diagnostics, YarnApplicationAttemptState state, + ContainerId amContainerId) { + ApplicationAttemptReport report = + Records.newRecord(ApplicationAttemptReport.class); + report.setApplicationAttemptId(applicationAttemptId); + report.setHost(host); + report.setRpcPort(rpcPort); + report.setTrackingUrl(url); + report.setDiagnostics(diagnostics); + report.setYarnApplicationAttemptState(state); + report.setAMContainerId(amContainerId); + return report; + } + + /** + * Get the YarnApplicationAttemptState of the application attempt. + * + * @return YarnApplicationAttemptState of the application attempt + */ + @Public + @Unstable + public abstract YarnApplicationAttemptState getYarnApplicationAttemptState(); + + @Private + @Unstable + public abstract void setYarnApplicationAttemptState( + YarnApplicationAttemptState yarnApplicationAttemptState); + + /** + * Get the RPC port of this attempt ApplicationMaster. + * + * @return RPC port of this attempt ApplicationMaster + */ + @Public + @Unstable + public abstract int getRpcPort(); + + @Private + @Unstable + public abstract void setRpcPort(int rpcPort); + + /** + * Get the host on which this attempt of + * ApplicationMaster is running. + * + * @return host on which this attempt of + * ApplicationMaster is running + */ + @Public + @Unstable + public abstract String getHost(); + + @Private + @Unstable + public abstract void setHost(String host); + + /** + * Get the diagnositic information of the application attempt in case + * of errors. + * + * @return diagnositic information of the application attempt in case + * of errors + */ + @Public + @Unstable + public abstract String getDiagnostics(); + + @Private + @Unstable + public abstract void setDiagnostics(String diagnostics); + + /** + * Get the tracking url for the application attempt. + * + * @return tracking url for the application attempt + */ + @Public + @Unstable + public abstract String getTrackingUrl(); + + @Private + @Unstable + public abstract void setTrackingUrl(String url); + + /** + * Get the ApplicationAttemptId of this attempt of the + * application + * + * @return ApplicationAttemptId of the attempt + */ + @Public + @Unstable + public abstract ApplicationAttemptId getApplicationAttemptId(); + + @Private + @Unstable + public abstract void setApplicationAttemptId( + ApplicationAttemptId applicationAttemptId); + + /** + * Get the ContainerId of AMContainer for this attempt + * + * @return ContainerId of the attempt + */ + @Public + @Unstable + public abstract ContainerId getAMContainerId(); + + @Private + @Unstable + public abstract void setAMContainerId(ContainerId amContainerId); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java new file mode 100644 index 00000000000..7513f1650dc --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java @@ -0,0 +1,202 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + *

+ * ContainerReport is a report of an container. + *

+ * + *

+ * It includes details such as: + *

    + *
  • {@link ContainerId} of the container.
  • + *
  • Allocated Resources to the container.
  • + *
  • Assigned Node id.
  • + *
  • Assigned Priority.
  • + *
  • Start Time.
  • + *
  • Finish Time.
  • + *
  • Container Exit Status.
  • + *
  • {@link ContainerState} of the container.
  • + *
  • Diagnostic information in case of errors.
  • + *
  • Log URL.
  • + *
+ *

+ * + */ + +@Public +@Unstable +public abstract class ContainerReport { + @Private + @Unstable + public static ContainerReport newInstance(ContainerId containerId, + Resource allocatedResource, NodeId assignedNode, Priority priority, + long startTime, long finishTime, String diagnosticInfo, String logUrl, + int containerExitStatus, ContainerState containerState) { + ContainerReport report = Records.newRecord(ContainerReport.class); + report.setContainerId(containerId); + report.setAllocatedResource(allocatedResource); + report.setAssignedNode(assignedNode); + report.setPriority(priority); + report.setStartTime(startTime); + report.setFinishTime(finishTime); + report.setDiagnosticsInfo(diagnosticInfo); + report.setLogUrl(logUrl); + report.setContainerExitStatus(containerExitStatus); + report.setContainerState(containerState); + return report; + } + + /** + * Get the ContainerId of the container. + * + * @return ContainerId of the container. + */ + @Public + @Unstable + public abstract ContainerId getContainerId(); + + @Public + @Unstable + public abstract void setContainerId(ContainerId containerId); + + /** + * Get the allocated Resource of the container. + * + * @return allocated Resource of the container. + */ + @Public + @Unstable + public abstract Resource getAllocatedResource(); + + @Public + @Unstable + public abstract void setAllocatedResource(Resource resource); + + /** + * Get the allocated NodeId where container is running. + * + * @return allocated NodeId where container is running. + */ + @Public + @Unstable + public abstract NodeId getAssignedNode(); + + @Public + @Unstable + public abstract void setAssignedNode(NodeId nodeId); + + /** + * Get the allocated Priority of the container. + * + * @return allocated Priority of the container. + */ + @Public + @Unstable + public abstract Priority getPriority(); + + @Public + @Unstable + public abstract void setPriority(Priority priority); + + /** + * Get the Start time of the container. + * + * @return Start time of the container + */ + @Public + @Unstable + public abstract long getStartTime(); + + @Public + @Unstable + public abstract void setStartTime(long startTime); + + /** + * Get the Finish time of the container. + * + * @return Finish time of the container + */ + @Public + @Unstable + public abstract long getFinishTime(); + + @Public + @Unstable + public abstract void setFinishTime(long finishTime); + + /** + * Get the DiagnosticsInfo of the container. + * + * @return DiagnosticsInfo of the container + */ + @Public + @Unstable + public abstract String getDiagnosticsInfo(); + + @Public + @Unstable + public abstract void setDiagnosticsInfo(String diagnosticsInfo); + + /** + * Get the LogURL of the container. + * + * @return LogURL of the container + */ + @Public + @Unstable + public abstract String getLogUrl(); + + @Public + @Unstable + public abstract void setLogUrl(String logUrl); + + /** + * Get the final ContainerState of the container. + * + * @return final ContainerState of the container. + */ + @Public + @Unstable + public abstract ContainerState getContainerState(); + + @Public + @Unstable + public abstract void setContainerState(ContainerState containerState); + + /** + * Get the final exit status of the container. + * + * @return final exit status of the container. + */ + @Public + @Unstable + public abstract int getContainerExitStatus(); + + @Public + @Unstable + public abstract void setContainerExitStatus(int containerExitStatus); + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/YarnApplicationAttemptState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/YarnApplicationAttemptState.java new file mode 100644 index 00000000000..8b180a1f8ea --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/YarnApplicationAttemptState.java @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.records; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; + +/** + * Enumeration of various states of a RMAppAttempt. + */ +@Public +@Stable +public enum YarnApplicationAttemptState { + /** AppAttempt was just created. */ + NEW, + + /** AppAttempt has been submitted. */ + SUBMITTED, + + /** AppAttempt was scheduled */ + SCHEDULED, + + /** Acquired AM Container from Scheduler and Saving AppAttempt Data */ + ALLOCATED_SAVING, + + /** AppAttempt Data was saved */ + ALLOCATED, + + /** AppAttempt was launched */ + LAUNCHED, + + /** AppAttempt failed. */ + FAILED, + + /** AppAttempt is currently running. */ + RUNNING, + + /** AppAttempt is waiting for state bing saved */ + FINAL_SAVING, + + /** AppAttempt is finishing. */ + FINISHING, + + /** AppAttempt finished successfully. */ + FINISHED, + + /** AppAttempt was terminated by a user or admin. */ + KILLED + +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index dc195858cb8..7fc8165813f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -263,6 +263,17 @@ public class YarnConfiguration extends Configuration { RM_PREFIX + "nodemanagers.heartbeat-interval-ms"; public static final long DEFAULT_RM_NM_HEARTBEAT_INTERVAL_MS = 1000; + /** Number of worker threads that write the history data. */ + public static final String RM_HISTORY_WRITER_MULTI_THREADED_DISPATCHER_POOL_SIZE = + RM_PREFIX + "history-writer.multi-threaded-dispatcher.pool-size"; + public static final int DEFAULT_RM_HISTORY_WRITER_MULTI_THREADED_DISPATCHER_POOL_SIZE = + 10; + + /** The implementation class of ApplicationHistoryStore, which is to be used + * by RMApplicationHistoryWriter. */ + public static final String RM_HISTORY_WRITER_CLASS = RM_PREFIX + + "history-writer.class"; + //Delegation token related keys public static final String DELEGATION_KEY_UPDATE_INTERVAL_KEY = RM_PREFIX + "delegation.key.update-interval"; @@ -931,6 +942,63 @@ public class YarnConfiguration extends Configuration { public static final String YARN_APP_CONTAINER_LOG_BACKUPS = YARN_PREFIX + "app.container.log.backups"; + //////////////////////////////// + // AHS Configs + //////////////////////////////// + + public static final String AHS_PREFIX = YARN_PREFIX + "ahs."; + + /** The setting that controls whether history-service is enabled or not.. */ + public static final String YARN_HISTORY_SERVICE_ENABLED = AHS_PREFIX + + ".enabled"; + public static final boolean DEFAULT_YARN_HISTORY_SERVICE_ENABLED = false; + + /** URI for FileSystemApplicationHistoryStore */ + public static final String FS_HISTORY_STORE_URI = AHS_PREFIX + "fs-history-store.uri"; + + /** T-file compression types used to compress history data.*/ + public static final String FS_HISTORY_STORE_COMPRESSION_TYPE = AHS_PREFIX + "fs-history-store.compression-type"; + public static final String DEFAULT_FS_HISTORY_STORE_COMPRESSION_TYPE = "none"; + + /** AHS store class */ + public static final String AHS_STORE = AHS_PREFIX + "store.class"; + + /** host:port address for Application History Server API. */ + public static final String AHS_ADDRESS = AHS_PREFIX + "address"; + public static final int DEFAULT_AHS_PORT = 10200; + public static final String DEFAULT_AHS_ADDRESS = "0.0.0.0:" + + DEFAULT_AHS_PORT; + + /** The number of threads to handle client API requests. */ + public static final String AHS_CLIENT_THREAD_COUNT = AHS_PREFIX + + "client.thread-count"; + public static final int DEFAULT_AHS_CLIENT_THREAD_COUNT = 10; + + + /** The address of the AHS web application.*/ + public static final String AHS_WEBAPP_ADDRESS = AHS_PREFIX + + "webapp.address"; + + public static final int DEFAULT_AHS_WEBAPP_PORT = 8188; + public static final String DEFAULT_AHS_WEBAPP_ADDRESS = "0.0.0.0:" + + DEFAULT_AHS_WEBAPP_PORT; + + /** The https address of the AHS web application.*/ + public static final String AHS_WEBAPP_HTTPS_ADDRESS = AHS_PREFIX + + "webapp.https.address"; + + public static final int DEFAULT_AHS_WEBAPP_HTTPS_PORT = 8190; + public static final String DEFAULT_AHS_WEBAPP_HTTPS_ADDRESS = "0.0.0.0:" + + DEFAULT_AHS_WEBAPP_HTTPS_PORT; + + /**The kerberos principal to be used for spnego filter for AHS.*/ + public static final String AHS_WEBAPP_SPNEGO_USER_NAME_KEY = + AHS_PREFIX + "webapp.spnego-principal"; + + /**The kerberos keytab to be used for spnego filter for AHS.*/ + public static final String AHS_WEBAPP_SPNEGO_KEYTAB_FILE_KEY = + AHS_PREFIX + "webapp.spnego-keytab-file"; + //////////////////////////////// // Other Configs //////////////////////////////// diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ApplicationAttemptNotFoundException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ApplicationAttemptNotFoundException.java new file mode 100644 index 00000000000..59ebf5bc407 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ApplicationAttemptNotFoundException.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.exceptions; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ApplicationHistoryProtocol; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; + +/** + * This exception is thrown on + * {@link ApplicationHistoryProtocol#getApplicationAttemptReport (GetApplicationAttemptReportRequest)} + * API when the Application Attempt doesn't exist in Application History Server + */ +@Public +@Unstable +public class ApplicationAttemptNotFoundException extends YarnException { + + private static final long serialVersionUID = 8694508L; + + public ApplicationAttemptNotFoundException(Throwable cause) { + super(cause); + } + + public ApplicationAttemptNotFoundException(String message) { + super(message); + } + + public ApplicationAttemptNotFoundException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ApplicationNotFoundException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ApplicationNotFoundException.java index 8f9a9e25715..da83c397e8f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ApplicationNotFoundException.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ApplicationNotFoundException.java @@ -18,14 +18,19 @@ package org.apache.hadoop.yarn.exceptions; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; /** * This exception is thrown on - * {@link ApplicationClientProtocol#getApplicationReport(GetApplicationReportRequest)} API - * when the Application doesn't exist in RM + * {@link ApplicationClientProtocol#getApplicationReport + * (GetApplicationReportRequest)} API + * when the Application doesn't exist in RM and AHS */ +@Public +@Unstable public class ApplicationNotFoundException extends YarnException{ private static final long serialVersionUID = 8694408L; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ContainerNotFoundException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ContainerNotFoundException.java new file mode 100644 index 00000000000..d5ef27b4e9b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ContainerNotFoundException.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.exceptions; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ApplicationHistoryProtocol; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest; + +/** + * This exception is thrown on + * {@link ApplicationHistoryProtocol#getContainerReport (GetContainerReportRequest)} + * API when the container doesn't exist in AHS + */ +@Public +@Unstable +public class ContainerNotFoundException extends YarnException { + + private static final long serialVersionUID = 8694608L; + + public ContainerNotFoundException(Throwable cause) { + super(cause); + } + + public ContainerNotFoundException(String message) { + super(message); + } + + public ContainerNotFoundException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/application_history_client.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/application_history_client.proto new file mode 100644 index 00000000000..7ad06c9cab1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/application_history_client.proto @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +option java_package = "org.apache.hadoop.yarn.proto"; +option java_outer_classname = "ApplicationHistoryProtocol"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +package hadoop.yarn; + +import "Security.proto"; +import "yarn_service_protos.proto"; + +service ApplicationHistoryProtocolService { + rpc getApplicationReport (GetApplicationReportRequestProto) returns (GetApplicationReportResponseProto); + rpc getApplications (GetApplicationsRequestProto) returns (GetApplicationsResponseProto); + rpc getApplicationAttemptReport (GetApplicationAttemptReportRequestProto) returns (GetApplicationAttemptReportResponseProto); + rpc getApplicationAttempts (GetApplicationAttemptsRequestProto) returns (GetApplicationAttemptsResponseProto); + rpc getContainerReport (GetContainerReportRequestProto) returns (GetContainerReportResponseProto); + rpc getContainers (GetContainersRequestProto) returns (GetContainersResponseProto); + rpc getDelegationToken(hadoop.common.GetDelegationTokenRequestProto) returns (hadoop.common.GetDelegationTokenResponseProto); + rpc renewDelegationToken(hadoop.common.RenewDelegationTokenRequestProto) returns (hadoop.common.RenewDelegationTokenResponseProto); + rpc cancelDelegationToken(hadoop.common.CancelDelegationTokenRequestProto) returns (hadoop.common.CancelDelegationTokenResponseProto); +} + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/application_history_server.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/application_history_server.proto new file mode 100644 index 00000000000..64e7af87f93 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/application_history_server.proto @@ -0,0 +1,113 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +option java_package = "org.apache.hadoop.yarn.proto"; +option java_outer_classname = "ApplicationHistoryServerProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +package hadoop.yarn; + +import "yarn_protos.proto"; + +message ApplicationHistoryDataProto { + optional ApplicationIdProto application_id = 1; + optional string application_name = 2; + optional string application_type = 3; + optional string user = 4; + optional string queue = 5; + optional int64 submit_time = 6; + optional int64 start_time = 7; + optional int64 finish_time = 8; + optional string diagnostics_info = 9; + optional FinalApplicationStatusProto final_application_status = 10; + optional YarnApplicationStateProto yarn_application_state = 11; +} + +message ApplicationStartDataProto { + optional ApplicationIdProto application_id = 1; + optional string application_name = 2; + optional string application_type = 3; + optional string user = 4; + optional string queue = 5; + optional int64 submit_time = 6; + optional int64 start_time = 7; +} + +message ApplicationFinishDataProto { + optional ApplicationIdProto application_id = 1; + optional int64 finish_time = 2; + optional string diagnostics_info = 3; + optional FinalApplicationStatusProto final_application_status = 4; + optional YarnApplicationStateProto yarn_application_state = 5; +} + +message ApplicationAttemptHistoryDataProto { + optional ApplicationAttemptIdProto application_attempt_id = 1; + optional string host = 2; + optional int32 rpc_port = 3; + optional string tracking_url = 4; + optional string diagnostics_info = 5; + optional FinalApplicationStatusProto final_application_status = 6; + optional ContainerIdProto master_container_id = 7; + optional YarnApplicationAttemptStateProto yarn_application_attempt_state = 8; +} + +message ApplicationAttemptStartDataProto { + optional ApplicationAttemptIdProto application_attempt_id = 1; + optional string host = 2; + optional int32 rpc_port = 3; + optional ContainerIdProto master_container_id = 4; +} + +message ApplicationAttemptFinishDataProto { + optional ApplicationAttemptIdProto application_attempt_id = 1; + optional string tracking_url = 2; + optional string diagnostics_info = 3; + optional FinalApplicationStatusProto final_application_status = 4; + optional YarnApplicationAttemptStateProto yarn_application_attempt_state = 5; +} + +message ContainerHistoryDataProto { + optional ContainerIdProto container_id = 1; + optional ResourceProto allocated_resource = 2; + optional NodeIdProto assigned_node_id = 3; + optional PriorityProto priority = 4; + optional int64 start_time = 5; + optional int64 finish_time = 6; + optional string diagnostics_info = 7; + optional string log_url = 8; + optional int32 container_exit_status = 9; + optional ContainerStateProto container_state = 10; +} + +message ContainerStartDataProto { + optional ContainerIdProto container_id = 1; + optional ResourceProto allocated_resource = 2; + optional NodeIdProto assigned_node_id = 3; + optional PriorityProto priority = 4; + optional int64 start_time = 5; +} + +message ContainerFinishDataProto { + optional ContainerIdProto container_id = 1; + optional int64 finish_time = 2; + optional string diagnostics_info = 3; + optional string log_url = 4; + optional int32 container_exit_status = 5; + optional ContainerStateProto container_state = 6; +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto index c90c263d5ee..c86d97149c7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto @@ -87,6 +87,19 @@ message ContainerProto { optional hadoop.common.TokenProto container_token = 6; } +message ContainerReportProto { + optional ContainerIdProto container_id = 1; + optional ResourceProto resource = 2; + optional NodeIdProto node_id = 3; + optional PriorityProto priority = 4; + optional int64 start_time = 5; + optional int64 finish_time = 6; + optional string diagnostics_info = 7 [default = "N/A"]; + optional string log_url = 8; + optional int32 container_exit_status = 9; + optional ContainerStateProto container_state = 10; +} + enum YarnApplicationStateProto { NEW = 1; NEW_SAVING = 2; @@ -98,6 +111,21 @@ enum YarnApplicationStateProto { KILLED = 8; } +enum YarnApplicationAttemptStateProto { + APP_ATTEMPT_NEW = 1; + APP_ATTEMPT_SUBMITTED = 2; + APP_ATTEMPT_SCHEDULED = 3; + APP_ATTEMPT_ALLOCATED_SAVING = 4; + APP_ATTEMPT_ALLOCATED = 5; + APP_ATTEMPT_LAUNCHED = 6; + APP_ATTEMPT_FAILED = 7; + APP_ATTEMPT_RUNNING = 8; + APP_ATTEMPT_FINAL_SAVING = 9; + APP_ATTEMPT_FINISHING = 10; + APP_ATTEMPT_FINISHED = 11; + APP_ATTEMPT_KILLED = 12; +} + enum FinalApplicationStatusProto { APP_UNDEFINED = 0; APP_SUCCEEDED = 1; @@ -164,6 +192,16 @@ message ApplicationReportProto { optional hadoop.common.TokenProto am_rm_token = 19; } +message ApplicationAttemptReportProto { + optional ApplicationAttemptIdProto application_attempt_id = 1; + optional string host = 2; + optional int32 rpc_port = 3; + optional string tracking_url = 4; + optional string diagnostics = 5 [default = "N/A"]; + optional YarnApplicationAttemptStateProto yarn_application_attempt_state = 6; + optional ContainerIdProto am_container_id = 7; +} + enum NodeStateProto { NS_NEW = 1; NS_RUNNING = 2; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto index d0e1b267e8a..139b3bb1394 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto @@ -240,3 +240,39 @@ message GetContainerStatusesResponseProto { repeated ContainerStatusProto status = 1; repeated ContainerExceptionMapProto failed_requests = 2; } + +////////////////////////////////////////////////////// +/////// Application_History_Protocol ///////////////// +////////////////////////////////////////////////////// + +message GetApplicationAttemptReportRequestProto { + optional ApplicationAttemptIdProto application_attempt_id = 1; +} + +message GetApplicationAttemptReportResponseProto { + optional ApplicationAttemptReportProto application_attempt_report = 1; +} + +message GetApplicationAttemptsRequestProto { + optional ApplicationIdProto application_id = 1; +} + +message GetApplicationAttemptsResponseProto { + repeated ApplicationAttemptReportProto application_attempts = 1; +} + +message GetContainerReportRequestProto { + optional ContainerIdProto container_id = 1; +} + +message GetContainerReportResponseProto { + optional ContainerReportProto container_report = 1; +} + +message GetContainersRequestProto { + optional ApplicationAttemptIdProto application_attempt_id = 1; +} + +message GetContainersResponseProto { + repeated ContainerReportProto containers = 1; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AHSClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AHSClient.java new file mode 100644 index 00000000000..b3c5308240a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AHSClient.java @@ -0,0 +1,180 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.client.api; + +import java.io.IOException; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerReport; +import org.apache.hadoop.yarn.client.api.impl.AHSClientImpl; +import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException; +import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException; +import org.apache.hadoop.yarn.exceptions.YarnException; + +@InterfaceAudience.Public +@InterfaceStability.Stable +public abstract class AHSClient extends AbstractService { + + /** + * Create a new instance of AHSClient. + */ + @Public + public static AHSClient createAHSClient() { + AHSClient client = new AHSClientImpl(); + return client; + } + + @Private + public AHSClient(String name) { + super(name); + } + + /** + *

+ * Get a report of the given Application. + *

+ * + *

+ * In secure mode, YARN verifies access to the application, queue + * etc. before accepting the request. + *

+ * + *

+ * If the user does not have VIEW_APP access then the following + * fields in the report will be set to stubbed values: + *

    + *
  • host - set to "N/A"
  • + *
  • RPC port - set to -1
  • + *
  • client token - set to "N/A"
  • + *
  • diagnostics - set to "N/A"
  • + *
  • tracking URL - set to "N/A"
  • + *
  • original tracking URL - set to "N/A"
  • + *
  • resource usage report - all values are -1
  • + *
+ *

+ * + * @param appId + * {@link ApplicationId} of the application that needs a report + * @return application report + * @throws YarnException + * @throws IOException + */ + public abstract ApplicationReport getApplicationReport(ApplicationId appId) + throws YarnException, IOException; + + /** + *

+ * Get a report (ApplicationReport) of all Applications in the cluster. + *

+ * + *

+ * If the user does not have VIEW_APP access for an application + * then the corresponding report will be filtered as described in + * {@link #getApplicationReport(ApplicationId)}. + *

+ * + * @return a list of reports for all applications + * @throws YarnException + * @throws IOException + */ + public abstract List getApplications() + throws YarnException, IOException; + + /** + *

+ * Get a report of the given ApplicationAttempt. + *

+ * + *

+ * In secure mode, YARN verifies access to the application, queue + * etc. before accepting the request. + *

+ * + * @param applicationAttemptId + * {@link ApplicationAttemptId} of the application attempt that needs + * a report + * @return application attempt report + * @throws YarnException + * @throws {@link ApplicationAttemptNotFoundException} if application attempt + * not found + * @throws IOException + */ + public abstract ApplicationAttemptReport getApplicationAttemptReport( + ApplicationAttemptId applicationAttemptId) throws YarnException, + IOException; + + /** + *

+ * Get a report of all (ApplicationAttempts) of Application in the cluster. + *

+ * + * @param applicationId + * @return a list of reports for all application attempts for specified + * application + * @throws YarnException + * @throws IOException + */ + public abstract List getApplicationAttempts( + ApplicationId applicationId) throws YarnException, IOException; + + /** + *

+ * Get a report of the given Container. + *

+ * + *

+ * In secure mode, YARN verifies access to the application, queue + * etc. before accepting the request. + *

+ * + * @param containerId + * {@link ContainerId} of the container that needs a report + * @return container report + * @throws YarnException + * @throws {@link ContainerNotFoundException} if container not found + * @throws IOException + */ + public abstract ContainerReport getContainerReport(ContainerId containerId) + throws YarnException, IOException; + + /** + *

+ * Get a report of all (Containers) of ApplicationAttempt in the cluster. + *

+ * + * @param applicationAttemptId + * @return a list of reports of all containers for specified application + * attempt + * @throws YarnException + * @throws IOException + */ + public abstract List getContainers( + ApplicationAttemptId applicationAttemptId) throws YarnException, + IOException; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java index 155ba5d51a5..dd27a0209da 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java @@ -29,9 +29,13 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.io.Text; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerReport; import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.QueueInfo; @@ -40,6 +44,7 @@ import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.api.records.YarnClusterMetrics; import org.apache.hadoop.yarn.client.api.impl.YarnClientImpl; +import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; @@ -360,4 +365,75 @@ public abstract List getChildQueueInfos(String parent) throws YarnExc */ public abstract List getQueueAclsInfo() throws YarnException, IOException; + + /** + *

+ * Get a report of the given ApplicationAttempt. + *

+ * + *

+ * In secure mode, YARN verifies access to the application, queue + * etc. before accepting the request. + *

+ * + * @param applicationAttemptId + * {@link ApplicationAttemptId} of the application attempt that needs + * a report + * @return application attempt report + * @throws YarnException + * @throws {@link ApplicationAttemptNotFoundException} if application attempt + * not found + * @throws IOException + */ + public abstract ApplicationAttemptReport getApplicationAttemptReport( + ApplicationAttemptId applicationAttemptId) throws YarnException, IOException; + + /** + *

+ * Get a report of all (ApplicationAttempts) of Application in the cluster. + *

+ * + * @param applicationId + * @return a list of reports for all application attempts for specified + * application. + * @throws YarnException + * @throws IOException + */ + public abstract List getApplicationAttempts( + ApplicationId applicationId) throws YarnException, IOException; + + /** + *

+ * Get a report of the given Container. + *

+ * + *

+ * In secure mode, YARN verifies access to the application, queue + * etc. before accepting the request. + *

+ * + * @param containerId + * {@link ContainerId} of the container that needs a report + * @return container report + * @throws YarnException + * @throws {@link ContainerNotFoundException} if container not found. + * @throws IOException + */ + public abstract ContainerReport getContainerReport(ContainerId containerId) + throws YarnException, IOException; + + /** + *

+ * Get a report of all (Containers) of ApplicationAttempt in the cluster. + *

+ * + * @param applicationAttemptId + * @return a list of reports of all containers for specified application + * attempts + * @throws YarnException + * @throws IOException + */ + public abstract List getContainers( + ApplicationAttemptId applicationAttemptId) throws YarnException, + IOException; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AHSClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AHSClientImpl.java new file mode 100644 index 00000000000..eea2e18559e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AHSClientImpl.java @@ -0,0 +1,155 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.client.api.impl; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.yarn.api.ApplicationHistoryProtocol; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerReport; +import org.apache.hadoop.yarn.client.AHSProxy; +import org.apache.hadoop.yarn.client.api.AHSClient; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; + +@Private +@Unstable +public class AHSClientImpl extends AHSClient { + + protected ApplicationHistoryProtocol ahsClient; + protected InetSocketAddress ahsAddress; + + public AHSClientImpl() { + super(AHSClientImpl.class.getName()); + } + + private static InetSocketAddress getAHSAddress(Configuration conf) { + return conf.getSocketAddr(YarnConfiguration.AHS_ADDRESS, + YarnConfiguration.DEFAULT_AHS_ADDRESS, + YarnConfiguration.DEFAULT_AHS_PORT); + } + + @Override + protected void serviceInit(Configuration conf) throws Exception { + this.ahsAddress = getAHSAddress(conf); + super.serviceInit(conf); + } + + @Override + protected void serviceStart() throws Exception { + try { + ahsClient = AHSProxy.createAHSProxy(getConfig(), + ApplicationHistoryProtocol.class, this.ahsAddress); + } catch (IOException e) { + throw new YarnRuntimeException(e); + } + super.serviceStart(); + } + + @Override + protected void serviceStop() throws Exception { + if (this.ahsClient != null) { + RPC.stopProxy(this.ahsClient); + } + super.serviceStop(); + } + + @Override + public ApplicationReport getApplicationReport(ApplicationId appId) + throws YarnException, IOException { + GetApplicationReportRequest request = GetApplicationReportRequest + .newInstance(appId); + GetApplicationReportResponse response = ahsClient + .getApplicationReport(request); + return response.getApplicationReport(); + } + + @Override + public List getApplications() throws YarnException, + IOException { + GetApplicationsRequest request = GetApplicationsRequest.newInstance(null, + null); + GetApplicationsResponse response = ahsClient.getApplications(request); + return response.getApplicationList(); + } + + @Override + public ApplicationAttemptReport getApplicationAttemptReport( + ApplicationAttemptId applicationAttemptId) throws YarnException, + IOException { + GetApplicationAttemptReportRequest request = GetApplicationAttemptReportRequest + .newInstance(applicationAttemptId); + GetApplicationAttemptReportResponse response = ahsClient + .getApplicationAttemptReport(request); + return response.getApplicationAttemptReport(); + } + + @Override + public List getApplicationAttempts( + ApplicationId appId) throws YarnException, IOException { + GetApplicationAttemptsRequest request = GetApplicationAttemptsRequest + .newInstance(appId); + GetApplicationAttemptsResponse response = ahsClient + .getApplicationAttempts(request); + return response.getApplicationAttemptList(); + } + + @Override + public ContainerReport getContainerReport(ContainerId containerId) + throws YarnException, IOException { + GetContainerReportRequest request = GetContainerReportRequest + .newInstance(containerId); + GetContainerReportResponse response = ahsClient.getContainerReport(request); + return response.getContainerReport(); + } + + @Override + public List getContainers( + ApplicationAttemptId applicationAttemptId) throws YarnException, + IOException { + GetContainersRequest request = GetContainersRequest + .newInstance(applicationAttemptId); + GetContainersResponse response = ahsClient.getContainers(request); + return response.getContainerList(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java index a5ff9f67dc9..2e754335783 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java @@ -49,9 +49,13 @@ import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerReport; import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.QueueInfo; @@ -60,9 +64,11 @@ import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.api.records.YarnClusterMetrics; import org.apache.hadoop.yarn.client.ClientRMProxy; +import org.apache.hadoop.yarn.client.api.AHSClient; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.client.api.YarnClientApplication; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; @@ -80,6 +86,8 @@ public class YarnClientImpl extends YarnClient { protected ApplicationClientProtocol rmClient; protected long submitPollIntervalMillis; private long asyncApiPollIntervalMillis; + protected AHSClient historyClient; + private boolean historyServiceEnabled; private static final String ROOT = "root"; @@ -100,6 +108,14 @@ protected void serviceInit(Configuration conf) throws Exception { YarnConfiguration.YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS, YarnConfiguration.DEFAULT_YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_INTERVAL_MS); } + + if (conf.getBoolean(YarnConfiguration.YARN_HISTORY_SERVICE_ENABLED, + YarnConfiguration.DEFAULT_YARN_HISTORY_SERVICE_ENABLED)) { + historyServiceEnabled = true; + historyClient = AHSClientImpl.createAHSClient(); + historyClient.init(getConfig()); + } + super.serviceInit(conf); } @@ -107,7 +123,10 @@ protected void serviceInit(Configuration conf) throws Exception { protected void serviceStart() throws Exception { try { rmClient = ClientRMProxy.createRMProxy(getConfig(), - ApplicationClientProtocol.class); + ApplicationClientProtocol.class); + if (historyServiceEnabled) { + historyClient.start(); + } } catch (IOException e) { throw new YarnRuntimeException(e); } @@ -119,6 +138,9 @@ protected void serviceStop() throws Exception { if (this.rmClient != null) { RPC.stopProxy(this.rmClient); } + if (historyServiceEnabled) { + historyClient.stop(); + } super.serviceStop(); } @@ -207,11 +229,27 @@ public void killApplication(ApplicationId applicationId) @Override public ApplicationReport getApplicationReport(ApplicationId appId) throws YarnException, IOException { - GetApplicationReportRequest request = - Records.newRecord(GetApplicationReportRequest.class); - request.setApplicationId(appId); - GetApplicationReportResponse response = - rmClient.getApplicationReport(request); + GetApplicationReportResponse response = null; + try { + GetApplicationReportRequest request = Records + .newRecord(GetApplicationReportRequest.class); + request.setApplicationId(appId); + response = rmClient.getApplicationReport(request); + } catch (YarnException e) { + + if (!historyServiceEnabled) { + // Just throw it as usual if historyService is not enabled. + throw e; + } + + // Even if history-service is enabled, treat all exceptions still the same + // except the following + if (!(e.getClass() == ApplicationNotFoundException.class)) { + throw e; + } + + return historyClient.getApplicationReport(appId); + } return response.getApplicationReport(); } @@ -373,4 +411,41 @@ private void getChildQueues(QueueInfo parent, List queues, public void setRMClient(ApplicationClientProtocol rmClient) { this.rmClient = rmClient; } + + @Override + public ApplicationAttemptReport getApplicationAttemptReport( + ApplicationAttemptId appAttemptId) throws YarnException, IOException { + if (historyServiceEnabled) { + return historyClient.getApplicationAttemptReport(appAttemptId); + } + throw new YarnException("History service is not enabled."); + } + + @Override + public List getApplicationAttempts( + ApplicationId appId) throws YarnException, IOException { + if (historyServiceEnabled) { + return historyClient.getApplicationAttempts(appId); + } + throw new YarnException("History service is not enabled."); + } + + @Override + public ContainerReport getContainerReport(ContainerId containerId) + throws YarnException, IOException { + if (historyServiceEnabled) { + return historyClient.getContainerReport(containerId); + } + throw new YarnException("History service is not enabled."); + } + + @Override + public List getContainers( + ApplicationAttemptId applicationAttemptId) throws YarnException, + IOException { + if (historyServiceEnabled) { + return historyClient.getContainers(applicationAttemptId); + } + throw new YarnException("History service is not enabled."); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java index a7b7d654643..9b465b78b28 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java @@ -35,8 +35,10 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ContainerReport; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.util.ConverterUtils; @@ -46,13 +48,22 @@ @Private @Unstable public class ApplicationCLI extends YarnCLI { - private static final String APPLICATIONS_PATTERN = - "%30s\t%20s\t%20s\t%10s\t%10s\t%18s\t%18s\t%15s\t%35s" + - System.getProperty("line.separator"); + private static final String APPLICATIONS_PATTERN = + "%30s\t%20s\t%20s\t%10s\t%10s\t%18s\t%18s\t%15s\t%35s" + + System.getProperty("line.separator"); + private static final String APPLICATION_ATTEMPTS_PATTERN = + "%30s\t%20s\t%35s\t%35s" + + System.getProperty("line.separator"); + private static final String CONTAINER_PATTERN = + "%30s\t%20s\t%20s\t%20s\t%20s\t%35s" + + System.getProperty("line.separator"); private static final String APP_TYPE_CMD = "appTypes"; - private static final String APP_STATE_CMD ="appStates"; + private static final String APP_STATE_CMD = "appStates"; private static final String ALLSTATES_OPTION = "ALL"; + public static final String APPLICATION = "application"; + public static final String APPLICATION_ATTEMPT = "applicationattempt"; + public static final String CONTAINER = "container"; private boolean allAppStates; @@ -69,23 +80,33 @@ public static void main(String[] args) throws Exception { public int run(String[] args) throws Exception { Options opts = new Options(); - opts.addOption(STATUS_CMD, true, "Prints the status of the application."); - opts.addOption(LIST_CMD, false, "List applications from the RM. " + - "Supports optional use of -appTypes to filter applications " + - "based on application type, " + - "and -appStates to filter applications based on application state"); + opts.addOption(STATUS_CMD, true, + "Prints the status of the application."); + if (args.length > 0 + && args[0].compareToIgnoreCase(APPLICATION_ATTEMPT) == 0) { + opts.addOption(LIST_CMD, true, + "List application attempts for aplication from AHS. "); + } else if (args.length > 0 && args[0].compareToIgnoreCase("container") == 0) { + opts.addOption(LIST_CMD, true, + "List containers for application attempts from AHS. "); + } else { + opts.addOption(LIST_CMD, false, "List applications from the RM. " + + "Supports optional use of -appTypes to filter applications " + + "based on application type, " + + "and -appStates to filter applications based on application state"); + } opts.addOption(KILL_CMD, true, "Kills the application."); opts.addOption(HELP_CMD, false, "Displays help for all commands."); - Option appTypeOpt = new Option(APP_TYPE_CMD, true, "Works with -list to " + - "filter applications based on " + - "input comma-separated list of application types."); + Option appTypeOpt = new Option(APP_TYPE_CMD, true, "Works with -list to " + + "filter applications based on " + + "input comma-separated list of application types."); appTypeOpt.setValueSeparator(','); appTypeOpt.setArgs(Option.UNLIMITED_VALUES); appTypeOpt.setArgName("Types"); opts.addOption(appTypeOpt); - Option appStateOpt = new Option(APP_STATE_CMD, true, "Works with -list " + - "to filter applications based on input comma-separated list of " + - "application states. " + getAllValidApplicationStates()); + Option appStateOpt = new Option(APP_STATE_CMD, true, "Works with -list " + + "to filter applications based on input comma-separated list of " + + "application states. " + getAllValidApplicationStates()); appStateOpt.setValueSeparator(','); appStateOpt.setArgs(Option.UNLIMITED_VALUES); appStateOpt.setArgName("States"); @@ -104,50 +125,77 @@ public int run(String[] args) throws Exception { } if (cliParser.hasOption(STATUS_CMD)) { - if (args.length != 2) { + if ((args[0].compareToIgnoreCase(APPLICATION) == 0) + || (args[0].compareToIgnoreCase(APPLICATION_ATTEMPT) == 0) + || (args[0].compareToIgnoreCase(CONTAINER) == 0)) { + if (args.length != 3) { + printUsage(opts); + return exitCode; + } + } else if (args.length != 2) { printUsage(opts); return exitCode; } - printApplicationReport(cliParser.getOptionValue(STATUS_CMD)); + if (args[0].compareToIgnoreCase(APPLICATION_ATTEMPT) == 0) { + printApplicationAttemptReport(cliParser.getOptionValue(STATUS_CMD)); + } else if (args[0].compareToIgnoreCase(CONTAINER) == 0) { + printContainerReport(cliParser.getOptionValue(STATUS_CMD)); + } else { + printApplicationReport(cliParser.getOptionValue(STATUS_CMD)); + } } else if (cliParser.hasOption(LIST_CMD)) { - allAppStates = false; - Set appTypes = new HashSet(); - if(cliParser.hasOption(APP_TYPE_CMD)) { - String[] types = cliParser.getOptionValues(APP_TYPE_CMD); - if (types != null) { - for (String type : types) { - if (!type.trim().isEmpty()) { - appTypes.add(type.toUpperCase().trim()); + if (args[0].compareToIgnoreCase(APPLICATION_ATTEMPT) == 0) { + if (args.length != 3) { + printUsage(opts); + return exitCode; + } + listApplicationAttempts(cliParser.getOptionValue(LIST_CMD)); + } else if (args[0].compareToIgnoreCase(CONTAINER) == 0) { + if (args.length != 3) { + printUsage(opts); + return exitCode; + } + listContainers(cliParser.getOptionValue(LIST_CMD)); + } else { + allAppStates = false; + Set appTypes = new HashSet(); + if (cliParser.hasOption(APP_TYPE_CMD)) { + String[] types = cliParser.getOptionValues(APP_TYPE_CMD); + if (types != null) { + for (String type : types) { + if (!type.trim().isEmpty()) { + appTypes.add(type.toUpperCase().trim()); + } } } } - } - EnumSet appStates = - EnumSet.noneOf(YarnApplicationState.class); - if (cliParser.hasOption(APP_STATE_CMD)) { - String[] states = cliParser.getOptionValues(APP_STATE_CMD); - if (states != null) { - for (String state : states) { - if (!state.trim().isEmpty()) { - if (state.trim().equalsIgnoreCase(ALLSTATES_OPTION)) { - allAppStates = true; - break; - } - try { - appStates.add(YarnApplicationState.valueOf(state.toUpperCase() - .trim())); - } catch (IllegalArgumentException ex) { - sysout.println("The application state " + state - + " is invalid."); - sysout.println(getAllValidApplicationStates()); - return exitCode; + EnumSet appStates = EnumSet + .noneOf(YarnApplicationState.class); + if (cliParser.hasOption(APP_STATE_CMD)) { + String[] states = cliParser.getOptionValues(APP_STATE_CMD); + if (states != null) { + for (String state : states) { + if (!state.trim().isEmpty()) { + if (state.trim().equalsIgnoreCase(ALLSTATES_OPTION)) { + allAppStates = true; + break; + } + try { + appStates.add(YarnApplicationState.valueOf(state + .toUpperCase().trim())); + } catch (IllegalArgumentException ex) { + sysout.println("The application state " + state + + " is invalid."); + sysout.println(getAllValidApplicationStates()); + return exitCode; + } } } } } + listApplications(appTypes, appStates); } - listApplications(appTypes, appStates); } else if (cliParser.hasOption(KILL_CMD)) { if (args.length != 2) { printUsage(opts); @@ -175,8 +223,85 @@ void printUsage(Options opts) { } /** - * Lists the applications matching the given application Types - * And application States present in the Resource Manager + * Prints the application attempt report for an application attempt id. + * + * @param applicationAttemptId + * @throws YarnException + */ + private void printApplicationAttemptReport(String applicationAttemptId) + throws YarnException, IOException { + ApplicationAttemptReport appAttemptReport = client + .getApplicationAttemptReport(ConverterUtils + .toApplicationAttemptId(applicationAttemptId)); + // Use PrintWriter.println, which uses correct platform line ending. + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + PrintWriter appAttemptReportStr = new PrintWriter(baos); + if (appAttemptReport != null) { + appAttemptReportStr.println("Application Attempt Report : "); + appAttemptReportStr.print("\tApplicationAttempt-Id : "); + appAttemptReportStr.println(appAttemptReport.getApplicationAttemptId()); + appAttemptReportStr.print("\tState : "); + appAttemptReportStr.println(appAttemptReport + .getYarnApplicationAttemptState()); + appAttemptReportStr.print("\tAMContainer : "); + appAttemptReportStr.println(appAttemptReport.getAMContainerId() + .toString()); + appAttemptReportStr.print("\tTracking-URL : "); + appAttemptReportStr.println(appAttemptReport.getTrackingUrl()); + appAttemptReportStr.print("\tRPC Port : "); + appAttemptReportStr.println(appAttemptReport.getRpcPort()); + appAttemptReportStr.print("\tAM Host : "); + appAttemptReportStr.println(appAttemptReport.getHost()); + appAttemptReportStr.print("\tDiagnostics : "); + appAttemptReportStr.print(appAttemptReport.getDiagnostics()); + } else { + appAttemptReportStr.print("Application Attempt with id '" + + applicationAttemptId + "' doesn't exist in History Server."); + } + appAttemptReportStr.close(); + sysout.println(baos.toString("UTF-8")); + } + + /** + * Prints the container report for an container id. + * + * @param containerId + * @throws YarnException + */ + private void printContainerReport(String containerId) throws YarnException, + IOException { + ContainerReport containerReport = client.getContainerReport((ConverterUtils + .toContainerId(containerId))); + // Use PrintWriter.println, which uses correct platform line ending. + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + PrintWriter containerReportStr = new PrintWriter(baos); + if (containerReport != null) { + containerReportStr.println("Container Report : "); + containerReportStr.print("\tContainer-Id : "); + containerReportStr.println(containerReport.getContainerId()); + containerReportStr.print("\tStart-Time : "); + containerReportStr.println(containerReport.getStartTime()); + containerReportStr.print("\tFinish-Time : "); + containerReportStr.println(containerReport.getFinishTime()); + containerReportStr.print("\tState : "); + containerReportStr.println(containerReport.getContainerState()); + containerReportStr.print("\tLOG-URL : "); + containerReportStr.println(containerReport.getLogUrl()); + containerReportStr.print("\tHost : "); + containerReportStr.println(containerReport.getAssignedNode()); + containerReportStr.print("\tDiagnostics : "); + containerReportStr.print(containerReport.getDiagnosticsInfo()); + } else { + containerReportStr.print("Container with id '" + containerId + + "' doesn't exist in Hostory Server."); + } + containerReportStr.close(); + sysout.println(baos.toString("UTF-8")); + } + + /** + * Lists the applications matching the given application Types And application + * States present in the Resource Manager * * @param appTypes * @param appStates @@ -188,7 +313,7 @@ private void listApplications(Set appTypes, IOException { PrintWriter writer = new PrintWriter(sysout); if (allAppStates) { - for(YarnApplicationState appState : YarnApplicationState.values()) { + for (YarnApplicationState appState : YarnApplicationState.values()) { appStates.add(appState); } } else { @@ -199,23 +324,24 @@ private void listApplications(Set appTypes, } } - List appsReport = - client.getApplications(appTypes, appStates); + List appsReport = client.getApplications(appTypes, + appStates); - writer - .println("Total number of applications (application-types: " + appTypes - + " and states: " + appStates + ")" + ":" + appsReport.size()); - writer.printf(APPLICATIONS_PATTERN, "Application-Id", - "Application-Name","Application-Type", "User", "Queue", - "State", "Final-State","Progress", "Tracking-URL"); + writer.println("Total number of applications (application-types: " + + appTypes + " and states: " + appStates + ")" + ":" + + appsReport.size()); + writer.printf(APPLICATIONS_PATTERN, "Application-Id", "Application-Name", + "Application-Type", "User", "Queue", "State", "Final-State", + "Progress", "Tracking-URL"); for (ApplicationReport appReport : appsReport) { DecimalFormat formatter = new DecimalFormat("###.##%"); String progress = formatter.format(appReport.getProgress()); writer.printf(APPLICATIONS_PATTERN, appReport.getApplicationId(), - appReport.getName(),appReport.getApplicationType(), appReport.getUser(), - appReport.getQueue(),appReport.getYarnApplicationState(), - appReport.getFinalApplicationStatus(),progress, - appReport.getOriginalTrackingUrl()); + appReport.getName(), appReport.getApplicationType(), appReport + .getUser(), appReport.getQueue(), appReport + .getYarnApplicationState(), + appReport.getFinalApplicationStatus(), progress, appReport + .getOriginalTrackingUrl()); } writer.flush(); } @@ -227,8 +353,8 @@ private void listApplications(Set appTypes, * @throws YarnException * @throws IOException */ - private void killApplication(String applicationId) - throws YarnException, IOException { + private void killApplication(String applicationId) throws YarnException, + IOException { ApplicationId appId = ConverterUtils.toApplicationId(applicationId); ApplicationReport appReport = client.getApplicationReport(appId); if (appReport.getYarnApplicationState() == YarnApplicationState.FINISHED @@ -296,14 +422,63 @@ private void printApplicationReport(String applicationId) private String getAllValidApplicationStates() { StringBuilder sb = new StringBuilder(); - sb.append("The valid application state can be" - + " one of the following: "); + sb.append("The valid application state can be" + " one of the following: "); sb.append(ALLSTATES_OPTION + ","); - for (YarnApplicationState appState : YarnApplicationState - .values()) { - sb.append(appState+","); + for (YarnApplicationState appState : YarnApplicationState.values()) { + sb.append(appState + ","); } String output = sb.toString(); - return output.substring(0, output.length()-1); + return output.substring(0, output.length() - 1); + } + + /** + * Lists the application attempts matching the given applicationid + * + * @param applicationId + * @throws YarnException + * @throws IOException + */ + private void listApplicationAttempts(String appId) throws YarnException, + IOException { + PrintWriter writer = new PrintWriter(sysout); + + List appAttemptsReport = client + .getApplicationAttempts(ConverterUtils.toApplicationId(appId)); + writer.println("Total number of application attempts " + ":" + + appAttemptsReport.size()); + writer.printf(APPLICATION_ATTEMPTS_PATTERN, "ApplicationAttempt-Id", + "State", "AM-Container-Id", "Tracking-URL"); + for (ApplicationAttemptReport appAttemptReport : appAttemptsReport) { + writer.printf(APPLICATION_ATTEMPTS_PATTERN, appAttemptReport + .getApplicationAttemptId(), appAttemptReport + .getYarnApplicationAttemptState(), appAttemptReport + .getAMContainerId().toString(), appAttemptReport.getTrackingUrl()); + } + writer.flush(); + } + + /** + * Lists the containers matching the given application attempts + * + * @param appAttemptId + * @throws YarnException + * @throws IOException + */ + private void listContainers(String appAttemptId) throws YarnException, + IOException { + PrintWriter writer = new PrintWriter(sysout); + + List appsReport = client + .getContainers(ConverterUtils.toApplicationAttemptId(appAttemptId)); + writer.println("Total number of containers " + ":" + appsReport.size()); + writer.printf(CONTAINER_PATTERN, "Container-Id", "Start Time", + "Finish Time", "State", "Host", "LOG-URL"); + for (ContainerReport containerReport : appsReport) { + writer.printf(CONTAINER_PATTERN, containerReport.getContainerId(), + containerReport.getStartTime(), containerReport.getFinishTime(), + containerReport.getContainerState(), containerReport + .getAssignedNode(), containerReport.getLogUrl()); + } + writer.flush(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java new file mode 100644 index 00000000000..37c7e68a415 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java @@ -0,0 +1,415 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.client.api.impl; + +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; + +import junit.framework.Assert; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.ApplicationHistoryProtocol; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerReport; +import org.apache.hadoop.yarn.api.records.ContainerState; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.client.api.AHSClient; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.junit.Test; + +public class TestAHSClient { + + @Test + public void testClientStop() { + Configuration conf = new Configuration(); + AHSClient client = AHSClient.createAHSClient(); + client.init(conf); + client.start(); + client.stop(); + } + + @Test(timeout = 10000) + public void testGetApplications() throws YarnException, IOException { + Configuration conf = new Configuration(); + final AHSClient client = new MockAHSClient(); + client.init(conf); + client.start(); + + List expectedReports = + ((MockAHSClient) client).getReports(); + + List reports = client.getApplications(); + Assert.assertEquals(reports, expectedReports); + + reports = client.getApplications(); + Assert.assertEquals(reports.size(), 4); + client.stop(); + } + + @Test(timeout = 10000) + public void testGetApplicationReport() throws YarnException, IOException { + Configuration conf = new Configuration(); + final AHSClient client = new MockAHSClient(); + client.init(conf); + client.start(); + + List expectedReports = + ((MockAHSClient) client).getReports(); + ApplicationId applicationId = ApplicationId.newInstance(1234, 5); + ApplicationReport report = client.getApplicationReport(applicationId); + Assert.assertEquals(report, expectedReports.get(0)); + Assert.assertEquals(report.getApplicationId().toString(), expectedReports + .get(0).getApplicationId().toString()); + client.stop(); + } + + @Test(timeout = 10000) + public void testGetApplicationAttempts() throws YarnException, IOException { + Configuration conf = new Configuration(); + final AHSClient client = new MockAHSClient(); + client.init(conf); + client.start(); + + ApplicationId applicationId = ApplicationId.newInstance(1234, 5); + List reports = + client.getApplicationAttempts(applicationId); + Assert.assertNotNull(reports); + Assert.assertEquals(reports.get(0).getApplicationAttemptId(), + ApplicationAttemptId.newInstance(applicationId, 1)); + Assert.assertEquals(reports.get(1).getApplicationAttemptId(), + ApplicationAttemptId.newInstance(applicationId, 2)); + client.stop(); + } + + @Test(timeout = 10000) + public void testGetApplicationAttempt() throws YarnException, IOException { + Configuration conf = new Configuration(); + final AHSClient client = new MockAHSClient(); + client.init(conf); + client.start(); + + List expectedReports = + ((MockAHSClient) client).getReports(); + + ApplicationId applicationId = ApplicationId.newInstance(1234, 5); + ApplicationAttemptId appAttemptId = + ApplicationAttemptId.newInstance(applicationId, 1); + ApplicationAttemptReport report = + client.getApplicationAttemptReport(appAttemptId); + Assert.assertNotNull(report); + Assert.assertEquals(report.getApplicationAttemptId().toString(), + expectedReports.get(0).getCurrentApplicationAttemptId().toString()); + client.stop(); + } + + @Test(timeout = 10000) + public void testGetContainers() throws YarnException, IOException { + Configuration conf = new Configuration(); + final AHSClient client = new MockAHSClient(); + client.init(conf); + client.start(); + + ApplicationId applicationId = ApplicationId.newInstance(1234, 5); + ApplicationAttemptId appAttemptId = + ApplicationAttemptId.newInstance(applicationId, 1); + List reports = client.getContainers(appAttemptId); + Assert.assertNotNull(reports); + Assert.assertEquals(reports.get(0).getContainerId(), + (ContainerId.newInstance(appAttemptId, 1))); + Assert.assertEquals(reports.get(1).getContainerId(), + (ContainerId.newInstance(appAttemptId, 2))); + client.stop(); + } + + @Test(timeout = 10000) + public void testGetContainerReport() throws YarnException, IOException { + Configuration conf = new Configuration(); + final AHSClient client = new MockAHSClient(); + client.init(conf); + client.start(); + + List expectedReports = + ((MockAHSClient) client).getReports(); + + ApplicationId applicationId = ApplicationId.newInstance(1234, 5); + ApplicationAttemptId appAttemptId = + ApplicationAttemptId.newInstance(applicationId, 1); + ContainerId containerId = ContainerId.newInstance(appAttemptId, 1); + ContainerReport report = client.getContainerReport(containerId); + Assert.assertNotNull(report); + Assert.assertEquals(report.getContainerId().toString(), (ContainerId + .newInstance(expectedReports.get(0).getCurrentApplicationAttemptId(), 1)) + .toString()); + client.stop(); + } + + private static class MockAHSClient extends AHSClientImpl { + // private ApplicationReport mockReport; + private List reports = + new ArrayList(); + private HashMap> attempts = + new HashMap>(); + private HashMap> containers = + new HashMap>(); + GetApplicationsResponse mockAppResponse = + mock(GetApplicationsResponse.class); + GetApplicationReportResponse mockResponse = + mock(GetApplicationReportResponse.class); + GetApplicationAttemptsResponse mockAppAttemptsResponse = + mock(GetApplicationAttemptsResponse.class); + GetApplicationAttemptReportResponse mockAttemptResponse = + mock(GetApplicationAttemptReportResponse.class); + GetContainersResponse mockContainersResponse = + mock(GetContainersResponse.class); + GetContainerReportResponse mockContainerResponse = + mock(GetContainerReportResponse.class); + + public MockAHSClient() { + super(); + createAppReports(); + } + + @Override + public void start() { + ahsClient = mock(ApplicationHistoryProtocol.class); + + try { + when( + ahsClient + .getApplicationReport(any(GetApplicationReportRequest.class))) + .thenReturn(mockResponse); + when(ahsClient.getApplications(any(GetApplicationsRequest.class))) + .thenReturn(mockAppResponse); + when( + ahsClient + .getApplicationAttemptReport(any(GetApplicationAttemptReportRequest.class))) + .thenReturn(mockAttemptResponse); + when( + ahsClient + .getApplicationAttempts(any(GetApplicationAttemptsRequest.class))) + .thenReturn(mockAppAttemptsResponse); + when(ahsClient.getContainers(any(GetContainersRequest.class))) + .thenReturn(mockContainersResponse); + + when(ahsClient.getContainerReport(any(GetContainerReportRequest.class))) + .thenReturn(mockContainerResponse); + + } catch (YarnException e) { + Assert.fail("Exception is not expected."); + } catch (IOException e) { + Assert.fail("Exception is not expected."); + } + } + + @Override + public List getApplications() throws YarnException, + IOException { + when(mockAppResponse.getApplicationList()).thenReturn(reports); + return super.getApplications(); + } + + @Override + public ApplicationReport getApplicationReport(ApplicationId appId) + throws YarnException, IOException { + when(mockResponse.getApplicationReport()).thenReturn(getReport(appId)); + return super.getApplicationReport(appId); + } + + @Override + public List getApplicationAttempts( + ApplicationId appId) throws YarnException, IOException { + when(mockAppAttemptsResponse.getApplicationAttemptList()).thenReturn( + getAttempts(appId)); + return super.getApplicationAttempts(appId); + } + + @Override + public ApplicationAttemptReport getApplicationAttemptReport( + ApplicationAttemptId appAttemptId) throws YarnException, IOException { + when(mockAttemptResponse.getApplicationAttemptReport()).thenReturn( + getAttempt(appAttemptId)); + return super.getApplicationAttemptReport(appAttemptId); + } + + @Override + public List + getContainers(ApplicationAttemptId appAttemptId) throws YarnException, + IOException { + when(mockContainersResponse.getContainerList()).thenReturn( + getContainersReport(appAttemptId)); + return super.getContainers(appAttemptId); + } + + @Override + public ContainerReport getContainerReport(ContainerId containerId) + throws YarnException, IOException { + when(mockContainerResponse.getContainerReport()).thenReturn( + getContainer(containerId)); + return super.getContainerReport(containerId); + } + + @Override + public void stop() { + } + + public ApplicationReport getReport(ApplicationId appId) { + for (int i = 0; i < reports.size(); ++i) { + if (appId.toString().equalsIgnoreCase( + reports.get(i).getApplicationId().toString())) { + return reports.get(i); + } + } + return null; + } + + public List getAttempts(ApplicationId appId) { + return attempts.get(appId); + } + + public ApplicationAttemptReport + getAttempt(ApplicationAttemptId appAttemptId) { + return attempts.get(appAttemptId.getApplicationId()).get(0); + } + + public List getContainersReport( + ApplicationAttemptId appAttemptId) { + return containers.get(appAttemptId); + } + + public ContainerReport getContainer(ContainerId containerId) { + return containers.get(containerId.getApplicationAttemptId()).get(0); + } + + public List getReports() { + return this.reports; + } + + private void createAppReports() { + ApplicationId applicationId = ApplicationId.newInstance(1234, 5); + ApplicationReport newApplicationReport = + ApplicationReport.newInstance(applicationId, + ApplicationAttemptId.newInstance(applicationId, 1), "user", + "queue", "appname", "host", 124, null, + YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, + FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", + null); + List applicationReports = + new ArrayList(); + applicationReports.add(newApplicationReport); + List appAttempts = + new ArrayList(); + ApplicationAttemptReport attempt = + ApplicationAttemptReport.newInstance( + ApplicationAttemptId.newInstance(applicationId, 1), + "host", + 124, + "url", + "diagnostics", + YarnApplicationAttemptState.FINISHED, + ContainerId.newInstance( + newApplicationReport.getCurrentApplicationAttemptId(), 1)); + appAttempts.add(attempt); + ApplicationAttemptReport attempt1 = + ApplicationAttemptReport.newInstance( + ApplicationAttemptId.newInstance(applicationId, 2), + "host", + 124, + "url", + "diagnostics", + YarnApplicationAttemptState.FINISHED, + ContainerId.newInstance( + newApplicationReport.getCurrentApplicationAttemptId(), 2)); + appAttempts.add(attempt1); + attempts.put(applicationId, appAttempts); + + List containerReports = new ArrayList(); + ContainerReport container = + ContainerReport.newInstance( + ContainerId.newInstance(attempt.getApplicationAttemptId(), 1), + null, NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, + 5678, "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE); + containerReports.add(container); + + ContainerReport container1 = + ContainerReport.newInstance( + ContainerId.newInstance(attempt.getApplicationAttemptId(), 2), + null, NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, + 5678, "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE); + containerReports.add(container1); + containers.put(attempt.getApplicationAttemptId(), containerReports); + + ApplicationId applicationId2 = ApplicationId.newInstance(1234, 6); + ApplicationReport newApplicationReport2 = + ApplicationReport.newInstance(applicationId2, + ApplicationAttemptId.newInstance(applicationId2, 2), "user2", + "queue2", "appname2", "host2", 125, null, + YarnApplicationState.FINISHED, "diagnostics2", "url2", 2, 2, + FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.63789f, + "NON-YARN", null); + applicationReports.add(newApplicationReport2); + + ApplicationId applicationId3 = ApplicationId.newInstance(1234, 7); + ApplicationReport newApplicationReport3 = + ApplicationReport.newInstance(applicationId3, + ApplicationAttemptId.newInstance(applicationId3, 3), "user3", + "queue3", "appname3", "host3", 126, null, + YarnApplicationState.RUNNING, "diagnostics3", "url3", 3, 3, + FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.73789f, + "MAPREDUCE", null); + applicationReports.add(newApplicationReport3); + + ApplicationId applicationId4 = ApplicationId.newInstance(1234, 8); + ApplicationReport newApplicationReport4 = + ApplicationReport.newInstance(applicationId4, + ApplicationAttemptId.newInstance(applicationId4, 4), "user4", + "queue4", "appname4", "host4", 127, null, + YarnApplicationState.FAILED, "diagnostics4", "url4", 4, 4, + FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.83789f, + "NON-MAPREDUCE", null); + applicationReports.add(newApplicationReport4); + reports = applicationReports; + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java index 00ab7895d67..dc6d98e6920 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java @@ -58,12 +58,10 @@ import org.apache.hadoop.yarn.client.api.YarnClientApplication; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; -import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.server.MiniYARNCluster; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; -import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.util.Records; import org.apache.log4j.Level; import org.apache.log4j.LogManager; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java index 1d08f245d06..dd6be0d8a57 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java @@ -43,19 +43,26 @@ import org.apache.commons.lang.time.DateFormatUtils; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerReport; +import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.NodeState; +import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; import org.apache.hadoop.yarn.util.Records; import org.junit.Before; import org.junit.Test; +import org.mortbay.log.Log; import org.apache.commons.cli.Options; @@ -113,20 +120,181 @@ public void testGetApplicationReport() throws Exception { verify(sysOut, times(1)).println(isA(String.class)); } + @Test + public void testGetApplicationAttemptReport() throws Exception { + ApplicationCLI cli = createAndGetAppCLI(); + ApplicationId applicationId = ApplicationId.newInstance(1234, 5); + ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance( + applicationId, 1); + ApplicationAttemptReport attemptReport = ApplicationAttemptReport + .newInstance(attemptId, "host", 124, "url", "diagnostics", + YarnApplicationAttemptState.FINISHED, ContainerId.newInstance( + attemptId, 1)); + when( + client + .getApplicationAttemptReport(any(ApplicationAttemptId.class))) + .thenReturn(attemptReport); + int result = cli.run(new String[] { "applicationattempt", "-status", + attemptId.toString() }); + assertEquals(0, result); + verify(client).getApplicationAttemptReport(attemptId); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + PrintWriter pw = new PrintWriter(baos); + pw.println("Application Attempt Report : "); + pw.println("\tApplicationAttempt-Id : appattempt_1234_0005_000001"); + pw.println("\tState : FINISHED"); + pw.println("\tAMContainer : container_1234_0005_01_000001"); + pw.println("\tTracking-URL : url"); + pw.println("\tRPC Port : 124"); + pw.println("\tAM Host : host"); + pw.println("\tDiagnostics : diagnostics"); + pw.close(); + String appReportStr = baos.toString("UTF-8"); + Assert.assertEquals(appReportStr, sysOutStream.toString()); + verify(sysOut, times(1)).println(isA(String.class)); + } + + @Test + public void testGetApplicationAttempts() throws Exception { + ApplicationCLI cli = createAndGetAppCLI(); + ApplicationId applicationId = ApplicationId.newInstance(1234, 5); + ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance( + applicationId, 1); + ApplicationAttemptId attemptId1 = ApplicationAttemptId.newInstance( + applicationId, 2); + ApplicationAttemptReport attemptReport = ApplicationAttemptReport + .newInstance(attemptId, "host", 124, "url", "diagnostics", + YarnApplicationAttemptState.FINISHED, ContainerId.newInstance( + attemptId, 1)); + ApplicationAttemptReport attemptReport1 = ApplicationAttemptReport + .newInstance(attemptId1, "host", 124, "url", "diagnostics", + YarnApplicationAttemptState.FINISHED, ContainerId.newInstance( + attemptId1, 1)); + List reports = new ArrayList(); + reports.add(attemptReport); + reports.add(attemptReport1); + when(client.getApplicationAttempts(any(ApplicationId.class))) + .thenReturn(reports); + int result = cli.run(new String[] { "applicationattempt", "-list", + applicationId.toString() }); + assertEquals(0, result); + verify(client).getApplicationAttempts(applicationId); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + PrintWriter pw = new PrintWriter(baos); + pw.println("Total number of application attempts :2"); + pw.print(" ApplicationAttempt-Id"); + pw.print("\t State"); + pw.print("\t AM-Container-Id"); + pw.println("\t Tracking-URL"); + pw.print(" appattempt_1234_0005_000001"); + pw.print("\t FINISHED"); + pw.print("\t container_1234_0005_01_000001"); + pw.println("\t url"); + pw.print(" appattempt_1234_0005_000002"); + pw.print("\t FINISHED"); + pw.print("\t container_1234_0005_02_000001"); + pw.println("\t url"); + pw.close(); + String appReportStr = baos.toString("UTF-8"); + Assert.assertEquals(appReportStr, sysOutStream.toString()); + } + + @Test + public void testGetContainerReport() throws Exception { + ApplicationCLI cli = createAndGetAppCLI(); + ApplicationId applicationId = ApplicationId.newInstance(1234, 5); + ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance( + applicationId, 1); + ContainerId containerId = ContainerId.newInstance(attemptId, 1); + ContainerReport container = ContainerReport.newInstance(containerId, null, + NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678, + "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE); + when(client.getContainerReport(any(ContainerId.class))).thenReturn( + container); + int result = cli.run(new String[] { "container", "-status", + containerId.toString() }); + assertEquals(0, result); + verify(client).getContainerReport(containerId); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + PrintWriter pw = new PrintWriter(baos); + pw.println("Container Report : "); + pw.println("\tContainer-Id : container_1234_0005_01_000001"); + pw.println("\tStart-Time : 1234"); + pw.println("\tFinish-Time : 5678"); + pw.println("\tState : COMPLETE"); + pw.println("\tLOG-URL : logURL"); + pw.println("\tHost : host:1234"); + pw.println("\tDiagnostics : diagnosticInfo"); + pw.close(); + String appReportStr = baos.toString("UTF-8"); + Assert.assertEquals(appReportStr, sysOutStream.toString()); + verify(sysOut, times(1)).println(isA(String.class)); + } + + @Test + public void testGetContainers() throws Exception { + ApplicationCLI cli = createAndGetAppCLI(); + ApplicationId applicationId = ApplicationId.newInstance(1234, 5); + ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance( + applicationId, 1); + ContainerId containerId = ContainerId.newInstance(attemptId, 1); + ContainerId containerId1 = ContainerId.newInstance(attemptId, 2); + ContainerReport container = ContainerReport.newInstance(containerId, null, + NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678, + "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE); + ContainerReport container1 = ContainerReport.newInstance(containerId1, null, + NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678, + "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE); + List reports = new ArrayList(); + reports.add(container); + reports.add(container1); + when(client.getContainers(any(ApplicationAttemptId.class))).thenReturn( + reports); + int result = cli.run(new String[] { "container", "-list", + attemptId.toString() }); + assertEquals(0, result); + verify(client).getContainers(attemptId); + Log.info(sysOutStream.toString()); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + PrintWriter pw = new PrintWriter(baos); + pw.println("Total number of containers :2"); + pw.print(" Container-Id"); + pw.print("\t Start Time"); + pw.print("\t Finish Time"); + pw.print("\t State"); + pw.print("\t Host"); + pw.println("\t LOG-URL"); + pw.print(" container_1234_0005_01_000001"); + pw.print("\t 1234"); + pw.print("\t 5678"); + pw.print("\t COMPLETE"); + pw.print("\t host:1234"); + pw.println("\t logURL"); + pw.print(" container_1234_0005_01_000002"); + pw.print("\t 1234"); + pw.print("\t 5678"); + pw.print("\t COMPLETE"); + pw.print("\t host:1234"); + pw.println("\t logURL"); + pw.close(); + String appReportStr = baos.toString("UTF-8"); + Assert.assertEquals(appReportStr, sysOutStream.toString()); + } + @Test public void testGetApplicationReportException() throws Exception { ApplicationCLI cli = createAndGetAppCLI(); ApplicationId applicationId = ApplicationId.newInstance(1234, 5); when(client.getApplicationReport(any(ApplicationId.class))).thenThrow( - new ApplicationNotFoundException("Application with id '" - + applicationId + "' doesn't exist in RM.")); + new ApplicationNotFoundException("History file for application" + + applicationId + " is not found")); try { cli.run(new String[] { "-status", applicationId.toString() }); Assert.fail(); } catch (Exception ex) { Assert.assertTrue(ex instanceof ApplicationNotFoundException); - Assert.assertEquals("Application with id '" + applicationId - + "' doesn't exist in RM.", ex.getMessage()); + Assert.assertEquals("History file for application" + + applicationId + " is not found", ex.getMessage()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/ApplicationHistoryProtocolPB.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/ApplicationHistoryProtocolPB.java new file mode 100644 index 00000000000..00ae51291b0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/ApplicationHistoryProtocolPB.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.ipc.ProtocolInfo; +import org.apache.hadoop.yarn.proto.ApplicationHistoryProtocol.ApplicationHistoryProtocolService; + +@Private +@Unstable +@ProtocolInfo( + protocolName = "org.apache.hadoop.yarn.api.ApplicationHistoryProtocolPB", + protocolVersion = 1) +public interface ApplicationHistoryProtocolPB extends + ApplicationHistoryProtocolService.BlockingInterface { +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationHistoryProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationHistoryProtocolPBClientImpl.java new file mode 100644 index 00000000000..f686652a72f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationHistoryProtocolPBClientImpl.java @@ -0,0 +1,230 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.impl.pb.client; + +import java.io.Closeable; +import java.io.IOException; +import java.net.InetSocketAddress; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto; +import org.apache.hadoop.yarn.api.ApplicationClientProtocolPB; +import org.apache.hadoop.yarn.api.ApplicationHistoryProtocol; +import org.apache.hadoop.yarn.api.ApplicationHistoryProtocolPB; +import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest; +import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse; +import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest; +import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenResponse; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CancelDelegationTokenRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CancelDelegationTokenResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptReportRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptReportResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptsRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptsResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationReportRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationReportResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationsRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationsResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerReportRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerReportResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainersRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainersResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetDelegationTokenRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetDelegationTokenResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RenewDelegationTokenRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RenewDelegationTokenResponsePBImpl; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.ipc.RPCUtil; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptReportRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerReportRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainersRequestProto; + +import com.google.protobuf.ServiceException; + +public class ApplicationHistoryProtocolPBClientImpl implements + ApplicationHistoryProtocol, Closeable { + + private ApplicationHistoryProtocolPB proxy; + + public ApplicationHistoryProtocolPBClientImpl(long clientVersion, + InetSocketAddress addr, Configuration conf) throws IOException { + RPC.setProtocolEngine(conf, ApplicationHistoryProtocolPB.class, + ProtobufRpcEngine.class); + proxy = + RPC.getProxy(ApplicationHistoryProtocolPB.class, clientVersion, addr, + conf); + } + + @Override + public void close() throws IOException { + if (this.proxy != null) { + RPC.stopProxy(this.proxy); + } + } + + @Override + public GetApplicationReportResponse getApplicationReport( + GetApplicationReportRequest request) throws YarnException, IOException { + GetApplicationReportRequestProto requestProto = + ((GetApplicationReportRequestPBImpl) request).getProto(); + try { + return new GetApplicationReportResponsePBImpl(proxy.getApplicationReport( + null, requestProto)); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + return null; + } + } + + @Override + public GetApplicationsResponse + getApplications(GetApplicationsRequest request) throws YarnException, + IOException { + GetApplicationsRequestProto requestProto = + ((GetApplicationsRequestPBImpl) request).getProto(); + try { + return new GetApplicationsResponsePBImpl(proxy.getApplications(null, + requestProto)); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + return null; + } + } + + @Override + public GetApplicationAttemptReportResponse getApplicationAttemptReport( + GetApplicationAttemptReportRequest request) throws YarnException, + IOException { + GetApplicationAttemptReportRequestProto requestProto = + ((GetApplicationAttemptReportRequestPBImpl) request).getProto(); + try { + return new GetApplicationAttemptReportResponsePBImpl( + proxy.getApplicationAttemptReport(null, requestProto)); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + return null; + } + } + + @Override + public GetApplicationAttemptsResponse getApplicationAttempts( + GetApplicationAttemptsRequest request) throws YarnException, IOException { + GetApplicationAttemptsRequestProto requestProto = + ((GetApplicationAttemptsRequestPBImpl) request).getProto(); + try { + return new GetApplicationAttemptsResponsePBImpl( + proxy.getApplicationAttempts(null, requestProto)); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + return null; + } + } + + @Override + public GetContainerReportResponse getContainerReport( + GetContainerReportRequest request) throws YarnException, IOException { + GetContainerReportRequestProto requestProto = + ((GetContainerReportRequestPBImpl) request).getProto(); + try { + return new GetContainerReportResponsePBImpl(proxy.getContainerReport( + null, requestProto)); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + return null; + } + } + + @Override + public GetContainersResponse getContainers(GetContainersRequest request) + throws YarnException, IOException { + GetContainersRequestProto requestProto = + ((GetContainersRequestPBImpl) request).getProto(); + try { + return new GetContainersResponsePBImpl(proxy.getContainers(null, + requestProto)); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + return null; + } + } + + @Override + public GetDelegationTokenResponse getDelegationToken( + GetDelegationTokenRequest request) throws YarnException, IOException { + GetDelegationTokenRequestProto requestProto = + ((GetDelegationTokenRequestPBImpl) request).getProto(); + try { + return new GetDelegationTokenResponsePBImpl(proxy.getDelegationToken( + null, requestProto)); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + return null; + } + } + + @Override + public RenewDelegationTokenResponse renewDelegationToken( + RenewDelegationTokenRequest request) throws YarnException, IOException { + RenewDelegationTokenRequestProto requestProto = + ((RenewDelegationTokenRequestPBImpl) request).getProto(); + try { + return new RenewDelegationTokenResponsePBImpl(proxy.renewDelegationToken( + null, requestProto)); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + return null; + } + } + + @Override + public CancelDelegationTokenResponse cancelDelegationToken( + CancelDelegationTokenRequest request) throws YarnException, IOException { + CancelDelegationTokenRequestProto requestProto = + ((CancelDelegationTokenRequestPBImpl) request).getProto(); + try { + return new CancelDelegationTokenResponsePBImpl( + proxy.cancelDelegationToken(null, requestProto)); + + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + return null; + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ApplicationHistoryProtocolPBServiceImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ApplicationHistoryProtocolPBServiceImpl.java new file mode 100644 index 00000000000..4511cc45b19 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ApplicationHistoryProtocolPBServiceImpl.java @@ -0,0 +1,230 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.impl.pb.service; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto; +import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto; +import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto; +import org.apache.hadoop.yarn.api.ApplicationHistoryProtocol; +import org.apache.hadoop.yarn.api.ApplicationHistoryProtocolPB; +import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse; +import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenResponse; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CancelDelegationTokenRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CancelDelegationTokenResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptReportRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptReportResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptsRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptsResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationReportRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationReportResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationsRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationsResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerReportRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerReportResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainersRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainersResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetDelegationTokenRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetDelegationTokenResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RenewDelegationTokenRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RenewDelegationTokenResponsePBImpl; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptReportRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptReportResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptsResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerReportRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerReportResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainersRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainersResponseProto; + +import com.google.protobuf.RpcController; +import com.google.protobuf.ServiceException; + +@Private +public class ApplicationHistoryProtocolPBServiceImpl implements + ApplicationHistoryProtocolPB { + private ApplicationHistoryProtocol real; + + public ApplicationHistoryProtocolPBServiceImpl(ApplicationHistoryProtocol impl) { + this.real = impl; + } + + @Override + public GetApplicationReportResponseProto getApplicationReport( + RpcController arg0, GetApplicationReportRequestProto proto) + throws ServiceException { + GetApplicationReportRequestPBImpl request = + new GetApplicationReportRequestPBImpl(proto); + try { + GetApplicationReportResponse response = + real.getApplicationReport(request); + return ((GetApplicationReportResponsePBImpl) response).getProto(); + } catch (YarnException e) { + throw new ServiceException(e); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public GetApplicationsResponseProto getApplications(RpcController controller, + GetApplicationsRequestProto proto) throws ServiceException { + GetApplicationsRequestPBImpl request = + new GetApplicationsRequestPBImpl(proto); + try { + GetApplicationsResponse response = real.getApplications(request); + return ((GetApplicationsResponsePBImpl) response).getProto(); + } catch (YarnException e) { + throw new ServiceException(e); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public GetApplicationAttemptReportResponseProto getApplicationAttemptReport( + RpcController controller, GetApplicationAttemptReportRequestProto proto) + throws ServiceException { + GetApplicationAttemptReportRequestPBImpl request = + new GetApplicationAttemptReportRequestPBImpl(proto); + try { + GetApplicationAttemptReportResponse response = + real.getApplicationAttemptReport(request); + return ((GetApplicationAttemptReportResponsePBImpl) response).getProto(); + } catch (YarnException e) { + throw new ServiceException(e); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public GetApplicationAttemptsResponseProto getApplicationAttempts( + RpcController controller, GetApplicationAttemptsRequestProto proto) + throws ServiceException { + GetApplicationAttemptsRequestPBImpl request = + new GetApplicationAttemptsRequestPBImpl(proto); + try { + GetApplicationAttemptsResponse response = + real.getApplicationAttempts(request); + return ((GetApplicationAttemptsResponsePBImpl) response).getProto(); + } catch (YarnException e) { + throw new ServiceException(e); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public GetContainerReportResponseProto getContainerReport( + RpcController controller, GetContainerReportRequestProto proto) + throws ServiceException { + GetContainerReportRequestPBImpl request = + new GetContainerReportRequestPBImpl(proto); + try { + GetContainerReportResponse response = real.getContainerReport(request); + return ((GetContainerReportResponsePBImpl) response).getProto(); + } catch (YarnException e) { + throw new ServiceException(e); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public GetContainersResponseProto getContainers(RpcController controller, + GetContainersRequestProto proto) throws ServiceException { + GetContainersRequestPBImpl request = new GetContainersRequestPBImpl(proto); + try { + GetContainersResponse response = real.getContainers(request); + return ((GetContainersResponsePBImpl) response).getProto(); + } catch (YarnException e) { + throw new ServiceException(e); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public GetDelegationTokenResponseProto getDelegationToken( + RpcController controller, GetDelegationTokenRequestProto proto) + throws ServiceException { + GetDelegationTokenRequestPBImpl request = + new GetDelegationTokenRequestPBImpl(proto); + try { + GetDelegationTokenResponse response = real.getDelegationToken(request); + return ((GetDelegationTokenResponsePBImpl) response).getProto(); + } catch (YarnException e) { + throw new ServiceException(e); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public RenewDelegationTokenResponseProto renewDelegationToken( + RpcController controller, RenewDelegationTokenRequestProto proto) + throws ServiceException { + RenewDelegationTokenRequestPBImpl request = + new RenewDelegationTokenRequestPBImpl(proto); + try { + RenewDelegationTokenResponse response = + real.renewDelegationToken(request); + return ((RenewDelegationTokenResponsePBImpl) response).getProto(); + } catch (YarnException e) { + throw new ServiceException(e); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public CancelDelegationTokenResponseProto cancelDelegationToken( + RpcController controller, CancelDelegationTokenRequestProto proto) + throws ServiceException { + CancelDelegationTokenRequestPBImpl request = + new CancelDelegationTokenRequestPBImpl(proto); + try { + CancelDelegationTokenResponse response = + real.cancelDelegationToken(request); + return ((CancelDelegationTokenResponsePBImpl) response).getProto(); + } catch (YarnException e) { + throw new ServiceException(e); + } catch (IOException e) { + throw new ServiceException(e); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationAttemptReportRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationAttemptReportRequestPBImpl.java new file mode 100644 index 00000000000..d44ad5fa2cd --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationAttemptReportRequestPBImpl.java @@ -0,0 +1,140 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl; +import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptReportRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptReportRequestProtoOrBuilder; + +import com.google.protobuf.TextFormat; + +@Private +@Unstable +public class GetApplicationAttemptReportRequestPBImpl extends + GetApplicationAttemptReportRequest { + + GetApplicationAttemptReportRequestProto proto = + GetApplicationAttemptReportRequestProto.getDefaultInstance(); + GetApplicationAttemptReportRequestProto.Builder builder = null; + boolean viaProto = false; + + private ApplicationAttemptId applicationAttemptId = null; + + public GetApplicationAttemptReportRequestPBImpl() { + builder = GetApplicationAttemptReportRequestProto.newBuilder(); + } + + public GetApplicationAttemptReportRequestPBImpl( + GetApplicationAttemptReportRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + public GetApplicationAttemptReportRequestProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + private void mergeLocalToBuilder() { + if (applicationAttemptId != null) { + builder + .setApplicationAttemptId(convertToProtoFormat(this.applicationAttemptId)); + } + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = GetApplicationAttemptReportRequestProto.newBuilder(proto); + } + viaProto = false; + } + + @Override + public ApplicationAttemptId getApplicationAttemptId() { + if (this.applicationAttemptId != null) { + return this.applicationAttemptId; + } + GetApplicationAttemptReportRequestProtoOrBuilder p = + viaProto ? proto : builder; + if (!p.hasApplicationAttemptId()) { + return null; + } + this.applicationAttemptId = + convertFromProtoFormat(p.getApplicationAttemptId()); + return this.applicationAttemptId; + } + + @Override + public void + setApplicationAttemptId(ApplicationAttemptId applicationAttemptId) { + maybeInitBuilder(); + if (applicationAttemptId == null) { + builder.clearApplicationAttemptId(); + } + this.applicationAttemptId = applicationAttemptId; + } + + private ApplicationAttemptIdPBImpl convertFromProtoFormat( + ApplicationAttemptIdProto p) { + return new ApplicationAttemptIdPBImpl(p); + } + + private ApplicationAttemptIdProto + convertToProtoFormat(ApplicationAttemptId t) { + return ((ApplicationAttemptIdPBImpl) t).getProto(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationAttemptReportResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationAttemptReportResponsePBImpl.java new file mode 100644 index 00000000000..6e2c2d7c0d3 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationAttemptReportResponsePBImpl.java @@ -0,0 +1,140 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; +import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptReportPBImpl; +import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptReportProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptReportResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptReportResponseProtoOrBuilder; + +import com.google.protobuf.TextFormat; + +@Private +@Unstable +public class GetApplicationAttemptReportResponsePBImpl extends + GetApplicationAttemptReportResponse { + + GetApplicationAttemptReportResponseProto proto = + GetApplicationAttemptReportResponseProto.getDefaultInstance(); + GetApplicationAttemptReportResponseProto.Builder builder = null; + boolean viaProto = false; + + private ApplicationAttemptReport applicationAttemptReport = null; + + public GetApplicationAttemptReportResponsePBImpl() { + builder = GetApplicationAttemptReportResponseProto.newBuilder(); + } + + public GetApplicationAttemptReportResponsePBImpl( + GetApplicationAttemptReportResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + public GetApplicationAttemptReportResponseProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + private void mergeLocalToBuilder() { + if (this.applicationAttemptReport != null) { + builder + .setApplicationAttemptReport(convertToProtoFormat(this.applicationAttemptReport)); + } + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = GetApplicationAttemptReportResponseProto.newBuilder(proto); + } + viaProto = false; + } + + @Override + public ApplicationAttemptReport getApplicationAttemptReport() { + if (this.applicationAttemptReport != null) { + return this.applicationAttemptReport; + } + GetApplicationAttemptReportResponseProtoOrBuilder p = + viaProto ? proto : builder; + if (!p.hasApplicationAttemptReport()) { + return null; + } + this.applicationAttemptReport = + convertFromProtoFormat(p.getApplicationAttemptReport()); + return this.applicationAttemptReport; + } + + @Override + public void setApplicationAttemptReport( + ApplicationAttemptReport ApplicationAttemptReport) { + maybeInitBuilder(); + if (ApplicationAttemptReport == null) { + builder.clearApplicationAttemptReport(); + } + this.applicationAttemptReport = ApplicationAttemptReport; + } + + private ApplicationAttemptReportPBImpl convertFromProtoFormat( + ApplicationAttemptReportProto p) { + return new ApplicationAttemptReportPBImpl(p); + } + + private ApplicationAttemptReportProto convertToProtoFormat( + ApplicationAttemptReport t) { + return ((ApplicationAttemptReportPBImpl) t).getProto(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationAttemptsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationAttemptsRequestPBImpl.java new file mode 100644 index 00000000000..84700539d27 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationAttemptsRequestPBImpl.java @@ -0,0 +1,134 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl; +import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptsRequestProtoOrBuilder; + +import com.google.protobuf.TextFormat; + +@Private +@Unstable +public class GetApplicationAttemptsRequestPBImpl extends + GetApplicationAttemptsRequest { + + GetApplicationAttemptsRequestProto proto = GetApplicationAttemptsRequestProto + .getDefaultInstance(); + GetApplicationAttemptsRequestProto.Builder builder = null; + boolean viaProto = false; + + ApplicationId applicationId = null; + + public GetApplicationAttemptsRequestPBImpl() { + builder = GetApplicationAttemptsRequestProto.newBuilder(); + } + + public GetApplicationAttemptsRequestPBImpl( + GetApplicationAttemptsRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + public GetApplicationAttemptsRequestProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + private void mergeLocalToBuilder() { + if (applicationId != null) { + builder.setApplicationId(convertToProtoFormat(this.applicationId)); + } + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = GetApplicationAttemptsRequestProto.newBuilder(proto); + } + viaProto = false; + } + + @Override + public ApplicationId getApplicationId() { + if (this.applicationId != null) { + return this.applicationId; + } + GetApplicationAttemptsRequestProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasApplicationId()) { + return null; + } + this.applicationId = convertFromProtoFormat(p.getApplicationId()); + return this.applicationId; + } + + @Override + public void setApplicationId(ApplicationId applicationId) { + maybeInitBuilder(); + if (applicationId == null) { + builder.clearApplicationId(); + } + this.applicationId = applicationId; + } + + private ApplicationIdPBImpl convertFromProtoFormat(ApplicationIdProto p) { + return new ApplicationIdPBImpl(p); + } + + private ApplicationIdProto convertToProtoFormat(ApplicationId t) { + return ((ApplicationIdPBImpl) t).getProto(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationAttemptsResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationAttemptsResponsePBImpl.java new file mode 100644 index 00000000000..b79f4252ebd --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationAttemptsResponsePBImpl.java @@ -0,0 +1,186 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; +import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptReportPBImpl; +import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptReportProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptsResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptsResponseProtoOrBuilder; + +import com.google.protobuf.TextFormat; + +@Private +@Unstable +public class GetApplicationAttemptsResponsePBImpl extends + GetApplicationAttemptsResponse { + + GetApplicationAttemptsResponseProto proto = + GetApplicationAttemptsResponseProto.getDefaultInstance(); + GetApplicationAttemptsResponseProto.Builder builder = null; + boolean viaProto = false; + + List applicationAttemptList; + + public GetApplicationAttemptsResponsePBImpl() { + builder = GetApplicationAttemptsResponseProto.newBuilder(); + } + + public GetApplicationAttemptsResponsePBImpl( + GetApplicationAttemptsResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + @Override + public List getApplicationAttemptList() { + initLocalApplicationAttemptsList(); + return this.applicationAttemptList; + } + + @Override + public void setApplicationAttemptList( + List applicationAttempts) { + maybeInitBuilder(); + if (applicationAttempts == null) { + builder.clearApplicationAttempts(); + } + this.applicationAttemptList = applicationAttempts; + } + + public GetApplicationAttemptsResponseProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + private void mergeLocalToBuilder() { + if (this.applicationAttemptList != null) { + addLocalApplicationAttemptsToProto(); + } + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = GetApplicationAttemptsResponseProto.newBuilder(proto); + } + viaProto = false; + } + + // Once this is called. containerList will never be null - until a getProto + // is called. + private void initLocalApplicationAttemptsList() { + if (this.applicationAttemptList != null) { + return; + } + GetApplicationAttemptsResponseProtoOrBuilder p = viaProto ? proto : builder; + List list = p.getApplicationAttemptsList(); + applicationAttemptList = new ArrayList(); + + for (ApplicationAttemptReportProto a : list) { + applicationAttemptList.add(convertFromProtoFormat(a)); + } + } + + private void addLocalApplicationAttemptsToProto() { + maybeInitBuilder(); + builder.clearApplicationAttempts(); + if (applicationAttemptList == null) { + return; + } + Iterable iterable = + new Iterable() { + @Override + public Iterator iterator() { + return new Iterator() { + + Iterator iter = applicationAttemptList + .iterator(); + + @Override + public boolean hasNext() { + return iter.hasNext(); + } + + @Override + public ApplicationAttemptReportProto next() { + return convertToProtoFormat(iter.next()); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + + } + }; + + } + }; + builder.addAllApplicationAttempts(iterable); + } + + private ApplicationAttemptReportPBImpl convertFromProtoFormat( + ApplicationAttemptReportProto p) { + return new ApplicationAttemptReportPBImpl(p); + } + + private ApplicationAttemptReportProto convertToProtoFormat( + ApplicationAttemptReport t) { + return ((ApplicationAttemptReportPBImpl) t).getProto(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerReportRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerReportRequestPBImpl.java new file mode 100644 index 00000000000..494d666feb9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerReportRequestPBImpl.java @@ -0,0 +1,129 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl; +import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerReportRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerReportRequestProtoOrBuilder; + +import com.google.protobuf.TextFormat; + +@Private +@Unstable +public class GetContainerReportRequestPBImpl extends GetContainerReportRequest { + GetContainerReportRequestProto proto = GetContainerReportRequestProto + .getDefaultInstance(); + GetContainerReportRequestProto.Builder builder = null; + boolean viaProto = false; + + private ContainerId containerId = null; + + public GetContainerReportRequestPBImpl() { + builder = GetContainerReportRequestProto.newBuilder(); + } + + public GetContainerReportRequestPBImpl(GetContainerReportRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + public GetContainerReportRequestProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + private void mergeLocalToBuilder() { + if (containerId != null) { + builder.setContainerId(convertToProtoFormat(this.containerId)); + } + } + + private void mergeLocalToProto() { + if (viaProto) + maybeInitBuilder(); + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = GetContainerReportRequestProto.newBuilder(proto); + } + viaProto = false; + } + + @Override + public ContainerId getContainerId() { + if (this.containerId != null) { + return this.containerId; + } + GetContainerReportRequestProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasContainerId()) { + return null; + } + this.containerId = convertFromProtoFormat(p.getContainerId()); + return this.containerId; + } + + @Override + public void setContainerId(ContainerId containerId) { + maybeInitBuilder(); + if (containerId == null) { + builder.clearContainerId(); + } + this.containerId = containerId; + } + + private ContainerIdPBImpl convertFromProtoFormat(ContainerIdProto p) { + return new ContainerIdPBImpl(p); + } + + private ContainerIdProto convertToProtoFormat(ContainerId t) { + return ((ContainerIdPBImpl) t).getProto(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerReportResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerReportResponsePBImpl.java new file mode 100644 index 00000000000..1ee8a3d3f3a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerReportResponsePBImpl.java @@ -0,0 +1,127 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; + +import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse; +import org.apache.hadoop.yarn.api.records.ContainerReport; +import org.apache.hadoop.yarn.api.records.impl.pb.ContainerReportPBImpl; +import org.apache.hadoop.yarn.proto.YarnProtos.ContainerReportProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerReportResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerReportResponseProtoOrBuilder; + +import com.google.protobuf.TextFormat; + +public class GetContainerReportResponsePBImpl extends + GetContainerReportResponse { + + GetContainerReportResponseProto proto = GetContainerReportResponseProto + .getDefaultInstance(); + GetContainerReportResponseProto.Builder builder = null; + boolean viaProto = false; + + private ContainerReport containerReport = null; + + public GetContainerReportResponsePBImpl() { + builder = GetContainerReportResponseProto.newBuilder(); + } + + public GetContainerReportResponsePBImpl(GetContainerReportResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + public GetContainerReportResponseProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + private void mergeLocalToBuilder() { + if (this.containerReport != null) { + builder.setContainerReport(convertToProtoFormat(this.containerReport)); + } + } + + private void mergeLocalToProto() { + if (viaProto) + maybeInitBuilder(); + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = GetContainerReportResponseProto.newBuilder(proto); + } + viaProto = false; + } + + @Override + public ContainerReport getContainerReport() { + if (this.containerReport != null) { + return this.containerReport; + } + GetContainerReportResponseProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasContainerReport()) { + return null; + } + this.containerReport = convertFromProtoFormat(p.getContainerReport()); + return this.containerReport; + } + + @Override + public void setContainerReport(ContainerReport containerReport) { + maybeInitBuilder(); + if (containerReport == null) { + builder.clearContainerReport(); + } + this.containerReport = containerReport; + } + + private ContainerReportPBImpl convertFromProtoFormat(ContainerReportProto p) { + return new ContainerReportPBImpl(p); + } + + private ContainerReportProto convertToProtoFormat(ContainerReport t) { + return ((ContainerReportPBImpl) t).getProto(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainersRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainersRequestPBImpl.java new file mode 100644 index 00000000000..2b6a724245d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainersRequestPBImpl.java @@ -0,0 +1,131 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; + +import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl; +import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainersRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainersRequestProtoOrBuilder; + +import com.google.protobuf.TextFormat; + +public class GetContainersRequestPBImpl extends GetContainersRequest { + GetContainersRequestProto proto = GetContainersRequestProto + .getDefaultInstance(); + GetContainersRequestProto.Builder builder = null; + boolean viaProto = false; + + private ApplicationAttemptId applicationAttemptId = null; + + public GetContainersRequestPBImpl() { + builder = GetContainersRequestProto.newBuilder(); + } + + public GetContainersRequestPBImpl(GetContainersRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + public GetContainersRequestProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + private void mergeLocalToBuilder() { + if (applicationAttemptId != null) { + builder + .setApplicationAttemptId(convertToProtoFormat(this.applicationAttemptId)); + } + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = GetContainersRequestProto.newBuilder(proto); + } + viaProto = false; + } + + @Override + public ApplicationAttemptId getApplicationAttemptId() { + if (this.applicationAttemptId != null) { + return this.applicationAttemptId; + } + GetContainersRequestProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasApplicationAttemptId()) { + return null; + } + this.applicationAttemptId = + convertFromProtoFormat(p.getApplicationAttemptId()); + return this.applicationAttemptId; + } + + @Override + public void + setApplicationAttemptId(ApplicationAttemptId applicationAttemptId) { + maybeInitBuilder(); + if (applicationAttemptId == null) { + builder.clearApplicationAttemptId(); + } + this.applicationAttemptId = applicationAttemptId; + } + + private ApplicationAttemptIdPBImpl convertFromProtoFormat( + ApplicationAttemptIdProto p) { + return new ApplicationAttemptIdPBImpl(p); + } + + private ApplicationAttemptIdProto + convertToProtoFormat(ApplicationAttemptId t) { + return ((ApplicationAttemptIdPBImpl) t).getProto(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainersResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainersResponsePBImpl.java new file mode 100644 index 00000000000..6caec106261 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainersResponsePBImpl.java @@ -0,0 +1,180 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse; +import org.apache.hadoop.yarn.api.records.ContainerReport; +import org.apache.hadoop.yarn.api.records.impl.pb.ContainerReportPBImpl; +import org.apache.hadoop.yarn.proto.YarnProtos.ContainerReportProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainersResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainersResponseProtoOrBuilder; + +import com.google.protobuf.TextFormat; + +@Private +@Unstable +public class GetContainersResponsePBImpl extends GetContainersResponse { + + GetContainersResponseProto proto = GetContainersResponseProto + .getDefaultInstance(); + GetContainersResponseProto.Builder builder = null; + boolean viaProto = false; + + List containerList; + + public GetContainersResponsePBImpl() { + builder = GetContainersResponseProto.newBuilder(); + } + + public GetContainersResponsePBImpl(GetContainersResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + @Override + public List getContainerList() { + initLocalContainerList(); + return this.containerList; + } + + @Override + public void setContainerList(List containers) { + maybeInitBuilder(); + if (containers == null) { + builder.clearContainers(); + } + this.containerList = containers; + } + + public GetContainersResponseProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + private void mergeLocalToBuilder() { + if (this.containerList != null) { + addLocalContainersToProto(); + } + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = GetContainersResponseProto.newBuilder(proto); + } + viaProto = false; + } + + // Once this is called. containerList will never be null - until a getProto + // is called. + private void initLocalContainerList() { + if (this.containerList != null) { + return; + } + GetContainersResponseProtoOrBuilder p = viaProto ? proto : builder; + List list = p.getContainersList(); + containerList = new ArrayList(); + + for (ContainerReportProto c : list) { + containerList.add(convertFromProtoFormat(c)); + } + } + + private void addLocalContainersToProto() { + maybeInitBuilder(); + builder.clearContainers(); + if (containerList == null) { + return; + } + Iterable iterable = + new Iterable() { + @Override + public Iterator iterator() { + return new Iterator() { + + Iterator iter = containerList.iterator(); + + @Override + public boolean hasNext() { + return iter.hasNext(); + } + + @Override + public ContainerReportProto next() { + return convertToProtoFormat(iter.next()); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + + } + }; + + } + }; + builder.addAllContainers(iterable); + } + + private ContainerReportPBImpl convertFromProtoFormat(ContainerReportProto p) { + return new ContainerReportPBImpl(p); + } + + private ContainerReportProto convertToProtoFormat(ContainerReport t) { + return ((ContainerReportPBImpl) t).getProto(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationAttemptReportPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationAttemptReportPBImpl.java new file mode 100644 index 00000000000..89999872e0a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationAttemptReportPBImpl.java @@ -0,0 +1,270 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.records.impl.pb; + +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; +import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptReportProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptReportProtoOrBuilder; +import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto; +import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationAttemptStateProto; + +import com.google.protobuf.TextFormat; + +public class ApplicationAttemptReportPBImpl extends ApplicationAttemptReport { + ApplicationAttemptReportProto proto = ApplicationAttemptReportProto + .getDefaultInstance(); + ApplicationAttemptReportProto.Builder builder = null; + boolean viaProto = false; + + private ApplicationAttemptId ApplicationAttemptId; + private ContainerId amContainerId; + + public ApplicationAttemptReportPBImpl() { + builder = ApplicationAttemptReportProto.newBuilder(); + } + + public ApplicationAttemptReportPBImpl(ApplicationAttemptReportProto proto) { + this.proto = proto; + viaProto = true; + } + + @Override + public ApplicationAttemptId getApplicationAttemptId() { + if (this.ApplicationAttemptId != null) { + return this.ApplicationAttemptId; + } + + ApplicationAttemptReportProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasApplicationAttemptId()) { + return null; + } + this.ApplicationAttemptId = + convertFromProtoFormat(p.getApplicationAttemptId()); + return this.ApplicationAttemptId; + } + + @Override + public String getHost() { + ApplicationAttemptReportProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasHost()) { + return null; + } + return p.getHost(); + } + + @Override + public int getRpcPort() { + ApplicationAttemptReportProtoOrBuilder p = viaProto ? proto : builder; + return p.getRpcPort(); + } + + @Override + public String getTrackingUrl() { + ApplicationAttemptReportProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasTrackingUrl()) { + return null; + } + return p.getTrackingUrl(); + } + + @Override + public String getDiagnostics() { + ApplicationAttemptReportProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasDiagnostics()) { + return null; + } + return p.getDiagnostics(); + } + + @Override + public YarnApplicationAttemptState getYarnApplicationAttemptState() { + ApplicationAttemptReportProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasYarnApplicationAttemptState()) { + return null; + } + return convertFromProtoFormat(p.getYarnApplicationAttemptState()); + } + + @Override + public void setYarnApplicationAttemptState(YarnApplicationAttemptState state) { + maybeInitBuilder(); + if (state == null) { + builder.clearYarnApplicationAttemptState(); + return; + } + builder.setYarnApplicationAttemptState(convertToProtoFormat(state)); + } + + private YarnApplicationAttemptStateProto convertToProtoFormat( + YarnApplicationAttemptState state) { + return ProtoUtils.convertToProtoFormat(state); + } + + private YarnApplicationAttemptState convertFromProtoFormat( + YarnApplicationAttemptStateProto yarnApplicationAttemptState) { + return ProtoUtils.convertFromProtoFormat(yarnApplicationAttemptState); + } + + @Override + public void + setApplicationAttemptId(ApplicationAttemptId applicationAttemptId) { + maybeInitBuilder(); + if (applicationAttemptId == null) + builder.clearApplicationAttemptId(); + this.ApplicationAttemptId = applicationAttemptId; + } + + @Override + public void setHost(String host) { + maybeInitBuilder(); + if (host == null) { + builder.clearHost(); + return; + } + builder.setHost(host); + } + + @Override + public void setRpcPort(int rpcPort) { + maybeInitBuilder(); + builder.setRpcPort(rpcPort); + } + + @Override + public void setTrackingUrl(String url) { + maybeInitBuilder(); + if (url == null) { + builder.clearTrackingUrl(); + return; + } + builder.setTrackingUrl(url); + } + + @Override + public void setDiagnostics(String diagnostics) { + maybeInitBuilder(); + if (diagnostics == null) { + builder.clearDiagnostics(); + return; + } + builder.setDiagnostics(diagnostics); + } + + public ApplicationAttemptReportProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = ApplicationAttemptReportProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToProto() { + if (viaProto) + maybeInitBuilder(); + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void mergeLocalToBuilder() { + if (this.ApplicationAttemptId != null + && !((ApplicationAttemptIdPBImpl) this.ApplicationAttemptId).getProto() + .equals(builder.getApplicationAttemptId())) { + builder + .setApplicationAttemptId(convertToProtoFormat(this.ApplicationAttemptId)); + } + + if (this.amContainerId != null + && !((ContainerIdPBImpl) this.amContainerId).getProto().equals( + builder.getAmContainerId())) { + builder.setAmContainerId(convertToProtoFormat(this.amContainerId)); + } + } + + private ContainerIdProto convertToProtoFormat(ContainerId amContainerId) { + return ((ContainerIdPBImpl) amContainerId).getProto(); + } + + private ContainerIdPBImpl convertFromProtoFormat( + ContainerIdProto amContainerId) { + return new ContainerIdPBImpl(amContainerId); + } + + private ApplicationAttemptIdProto + convertToProtoFormat(ApplicationAttemptId t) { + return ((ApplicationAttemptIdPBImpl) t).getProto(); + } + + private ApplicationAttemptIdPBImpl convertFromProtoFormat( + ApplicationAttemptIdProto applicationAttemptId) { + return new ApplicationAttemptIdPBImpl(applicationAttemptId); + } + + @Override + public ContainerId getAMContainerId() { + if (this.amContainerId != null) { + return this.amContainerId; + } + + ApplicationAttemptReportProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasAmContainerId()) { + return null; + } + this.amContainerId = convertFromProtoFormat(p.getAmContainerId()); + return this.amContainerId; + } + + @Override + public void setAMContainerId(ContainerId amContainerId) { + maybeInitBuilder(); + if (amContainerId == null) + builder.clearAmContainerId(); + this.amContainerId = amContainerId; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerReportPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerReportPBImpl.java new file mode 100644 index 00000000000..fa9639000ea --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerReportPBImpl.java @@ -0,0 +1,346 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.records.impl.pb; + +import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerReport; +import org.apache.hadoop.yarn.api.records.ContainerState; +import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.Resource; + +import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ContainerReportProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ContainerReportProtoOrBuilder; +import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto; +import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStateProto; + +import com.google.protobuf.TextFormat; + +public class ContainerReportPBImpl extends ContainerReport { + + ContainerReportProto proto = ContainerReportProto.getDefaultInstance(); + ContainerReportProto.Builder builder = null; + boolean viaProto = false; + + private ContainerId containerId = null; + private Resource resource = null; + private NodeId nodeId = null; + private Priority priority = null; + + public ContainerReportPBImpl() { + builder = ContainerReportProto.newBuilder(); + } + + public ContainerReportPBImpl(ContainerReportProto proto) { + this.proto = proto; + viaProto = true; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + @Override + public Resource getAllocatedResource() { + if (this.resource != null) { + return this.resource; + } + ContainerReportProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasResource()) { + return null; + } + this.resource = convertFromProtoFormat(p.getResource()); + return this.resource; + } + + @Override + public NodeId getAssignedNode() { + if (this.nodeId != null) { + return this.nodeId; + } + ContainerReportProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasNodeId()) { + return null; + } + this.nodeId = convertFromProtoFormat(p.getNodeId()); + return this.nodeId; + } + + @Override + public ContainerId getContainerId() { + if (this.containerId != null) { + return this.containerId; + } + ContainerReportProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasContainerId()) { + return null; + } + this.containerId = convertFromProtoFormat(p.getContainerId()); + return this.containerId; + } + + @Override + public String getDiagnosticsInfo() { + ContainerReportProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasDiagnosticsInfo()) { + return null; + } + return (p.getDiagnosticsInfo()); + } + + @Override + public ContainerState getContainerState() { + ContainerReportProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasContainerState()) { + return null; + } + return convertFromProtoFormat(p.getContainerState()); + } + + @Override + public long getFinishTime() { + ContainerReportProtoOrBuilder p = viaProto ? proto : builder; + return p.getFinishTime(); + } + + @Override + public String getLogUrl() { + ContainerReportProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasLogUrl()) { + return null; + } + return (p.getLogUrl()); + } + + @Override + public Priority getPriority() { + if (this.priority != null) { + return this.priority; + } + ContainerReportProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasPriority()) { + return null; + } + this.priority = convertFromProtoFormat(p.getPriority()); + return this.priority; + } + + @Override + public long getStartTime() { + ContainerReportProtoOrBuilder p = viaProto ? proto : builder; + return p.getStartTime(); + } + + @Override + public void setAllocatedResource(Resource resource) { + maybeInitBuilder(); + if (resource == null) + builder.clearResource(); + this.resource = resource; + } + + @Override + public void setAssignedNode(NodeId nodeId) { + maybeInitBuilder(); + if (nodeId == null) + builder.clearNodeId(); + this.nodeId = nodeId; + } + + @Override + public void setContainerId(ContainerId containerId) { + maybeInitBuilder(); + if (containerId == null) + builder.clearContainerId(); + this.containerId = containerId; + } + + @Override + public void setDiagnosticsInfo(String diagnosticsInfo) { + maybeInitBuilder(); + if (diagnosticsInfo == null) { + builder.clearDiagnosticsInfo(); + return; + } + builder.setDiagnosticsInfo(diagnosticsInfo); + } + + @Override + public void setContainerState(ContainerState containerState) { + maybeInitBuilder(); + if (containerState == null) { + builder.clearContainerState(); + return; + } + builder.setContainerState(convertToProtoFormat(containerState)); + } + + @Override + public int getContainerExitStatus() { + ContainerReportProtoOrBuilder p = viaProto ? proto : builder; + return p.getContainerExitStatus(); + } + + @Override + public void setContainerExitStatus(int containerExitStatus) { + maybeInitBuilder(); + builder.setContainerExitStatus(containerExitStatus); + } + + @Override + public void setFinishTime(long finishTime) { + maybeInitBuilder(); + builder.setFinishTime(finishTime); + } + + @Override + public void setLogUrl(String logUrl) { + maybeInitBuilder(); + if (logUrl == null) { + builder.clearLogUrl(); + return; + } + builder.setLogUrl(logUrl); + } + + @Override + public void setPriority(Priority priority) { + maybeInitBuilder(); + if (priority == null) { + builder.clearPriority(); + } + this.priority = priority; + } + + @Override + public void setStartTime(long startTime) { + maybeInitBuilder(); + builder.setStartTime(startTime); + } + + public ContainerReportProto getProto() { + + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return this.getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + private void mergeLocalToBuilder() { + if (this.containerId != null + && !((ContainerIdPBImpl) containerId).getProto().equals( + builder.getContainerId())) { + builder.setContainerId(convertToProtoFormat(this.containerId)); + } + if (this.nodeId != null + && !((NodeIdPBImpl) nodeId).getProto().equals(builder.getNodeId())) { + builder.setNodeId(convertToProtoFormat(this.nodeId)); + } + if (this.resource != null + && !((ResourcePBImpl) this.resource).getProto().equals( + builder.getResource())) { + builder.setResource(convertToProtoFormat(this.resource)); + } + if (this.priority != null + && !((PriorityPBImpl) this.priority).getProto().equals( + builder.getPriority())) { + builder.setPriority(convertToProtoFormat(this.priority)); + } + } + + private void mergeLocalToProto() { + if (viaProto) + maybeInitBuilder(); + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = ContainerReportProto.newBuilder(proto); + } + viaProto = false; + } + + private ContainerIdPBImpl convertFromProtoFormat(ContainerIdProto p) { + return new ContainerIdPBImpl(p); + } + + private NodeIdPBImpl convertFromProtoFormat(NodeIdProto p) { + return new NodeIdPBImpl(p); + } + + private ContainerIdProto convertToProtoFormat(ContainerId t) { + return ((ContainerIdPBImpl) t).getProto(); + } + + private NodeIdProto convertToProtoFormat(NodeId t) { + return ((NodeIdPBImpl) t).getProto(); + } + + private ResourcePBImpl convertFromProtoFormat(ResourceProto p) { + return new ResourcePBImpl(p); + } + + private ResourceProto convertToProtoFormat(Resource t) { + return ((ResourcePBImpl) t).getProto(); + } + + private PriorityPBImpl convertFromProtoFormat(PriorityProto p) { + return new PriorityPBImpl(p); + } + + private PriorityProto convertToProtoFormat(Priority p) { + return ((PriorityPBImpl) p).getProto(); + } + + private ContainerStateProto + convertToProtoFormat(ContainerState containerState) { + return ProtoUtils.convertToProtoFormat(containerState); + } + + private ContainerState convertFromProtoFormat( + ContainerStateProto containerState) { + return ProtoUtils.convertFromProtoFormat(containerState); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java index b660f7dd577..8d737448567 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java @@ -33,6 +33,7 @@ import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.api.records.QueueState; +import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.proto.YarnProtos.AMCommandProto; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAccessTypeProto; @@ -45,6 +46,7 @@ import org.apache.hadoop.yarn.proto.YarnProtos.NodeStateProto; import org.apache.hadoop.yarn.proto.YarnProtos.QueueACLProto; import org.apache.hadoop.yarn.proto.YarnProtos.QueueStateProto; +import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationAttemptStateProto; import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationStateProto; import com.google.protobuf.ByteString; @@ -96,6 +98,21 @@ public static YarnApplicationState convertFromProtoFormat(YarnApplicationStatePr return YarnApplicationState.valueOf(e.name()); } + /* + * YarnApplicationAttemptState + */ + private static String YARN_APPLICATION_ATTEMPT_STATE_PREFIX = "APP_ATTEMPT_"; + public static YarnApplicationAttemptStateProto convertToProtoFormat( + YarnApplicationAttemptState e) { + return YarnApplicationAttemptStateProto + .valueOf(YARN_APPLICATION_ATTEMPT_STATE_PREFIX + e.name()); + } + public static YarnApplicationAttemptState convertFromProtoFormat( + YarnApplicationAttemptStateProto e) { + return YarnApplicationAttemptState.valueOf(e.name().replace( + YARN_APPLICATION_ATTEMPT_STATE_PREFIX, "")); + } + /* * ApplicationResourceUsageReport */ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AHSProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AHSProxy.java new file mode 100644 index 00000000000..d52348747b2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AHSProxy.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.client; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.security.PrivilegedAction; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.ipc.YarnRPC; + +@InterfaceAudience.Public +@InterfaceStability.Evolving +@SuppressWarnings("unchecked") +public class AHSProxy { + + private static final Log LOG = LogFactory.getLog(AHSProxy.class); + + public static T createAHSProxy(final Configuration conf, + final Class protocol, InetSocketAddress ahsAddress) throws IOException { + LOG.info("Connecting to Application History server at " + ahsAddress); + return (T) getProxy(conf, protocol, ahsAddress); + } + + protected static T getProxy(final Configuration conf, + final Class protocol, final InetSocketAddress rmAddress) + throws IOException { + return UserGroupInformation.getCurrentUser().doAs( + new PrivilegedAction() { + @Override + public T run() { + return (T) YarnRPC.create(conf).getProxy(protocol, rmAddress, conf); + } + }); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/StringHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/StringHelper.java index fd285b4141d..a48b3c0965b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/StringHelper.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/StringHelper.java @@ -178,4 +178,8 @@ private static void uappend(StringBuilder sb, String part) { public static String percent(double value) { return String.format("%.2f", value * 100); } + + public static String getPartUrl(String url, String part) { + return url.substring(url.indexOf(part)); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java index 1f59b87c927..91d2a2019ab 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java @@ -24,6 +24,7 @@ public interface YarnWebParams { String NM_NODENAME = "nm.id"; String APPLICATION_ID = "app.id"; + String APPLICATION_ATTEMPT_ID = "appattempt.id"; String CONTAINER_ID = "container.id"; String CONTAINER_LOG_TYPE= "log.type"; String ENTITY_STRING = "entity.string"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java index 065d6807647..4a288c44af9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.yarn.webapp.util; +import static org.apache.hadoop.yarn.util.StringHelper.join; + import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnknownHostException; @@ -27,7 +29,9 @@ import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.http.HttpConfig.Policy; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.util.ConverterUtils; import com.google.common.base.Joiner; @@ -144,6 +148,16 @@ public static String getNMWebAppURLWithoutScheme(Configuration conf) { YarnConfiguration.DEFAULT_NM_WEBAPP_ADDRESS); } } + + public static String getAHSWebAppURLWithoutScheme(Configuration conf) { + if (HttpConfig.isSecure()) { + return conf.get(YarnConfiguration.AHS_WEBAPP_HTTPS_ADDRESS, + YarnConfiguration.DEFAULT_AHS_WEBAPP_HTTPS_ADDRESS); + } else { + return conf.get(YarnConfiguration.AHS_WEBAPP_ADDRESS, + YarnConfiguration.DEFAULT_AHS_WEBAPP_ADDRESS); + } + } /** * if url has scheme then it will be returned as it is else it will return @@ -160,4 +174,11 @@ public static String getURLWithScheme(String schemePrefix, String url) { return schemePrefix + url; } } + + public static String getLogUrl(String nodeHttpAddress, String allocatedNode, + ContainerId containerId, String user) { + return join(HttpConfig.getSchemePrefix(), nodeHttpAddress, "/logs", "/", + allocatedNode, "/", ConverterUtils.toString(containerId), "/", + ConverterUtils.toString(containerId), "/", user); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/applicationhistory/.keep b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/applicationhistory/.keep new file mode 100644 index 00000000000..e69de29bb2d diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index ba6264e0ae3..c4937421f55 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -564,6 +564,30 @@ org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy + + Indicate to ResourceManager as well as clients whether + history-service is enabled or not. If enabled, ResourceManager starts + recording historical data that ApplicationHistory service can consume. + Similarly, clients can redirect to the history service when applications + finish if this is enabled. + yarn.ahs.enabled + false + + + + Number of worker threads that write the history data. + yarn.resourcemanager.history-writer.multi-threaded-dispatcher.pool-size + 10 + + + + The implementation class of ApplicationHistoryStore, which is + to be used by RMApplicationHistoryWriter. + + yarn.resourcemanager.history-writer.class + org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore + + The hostname of the NM. @@ -1041,6 +1065,61 @@ + + + + The hostname of the AHS. + yarn.ahs.hostname + 0.0.0.0 + + + + The http address of the AHS web application. + yarn.ahs.webapp.address + ${yarn.ahs.hostname}:8188 + + + + The https adddress of the AHS web application. + yarn.ahs.webapp.https.address + ${yarn.ahs.hostname}:8190 + + + + URI pointing to the location of the FileSystem path where + the history will be persisted. This must be supplied when using + org.apache.hadoop.yarn.server.applicationhistoryservice.FileSystemApplicationHistoryStore + as the value for yarn.resourcemanager.history-writer.store.class + yarn.ahs.fs-history-store.uri + ${hadoop.log.dir}/yarn/system/ahstore + + + + This is default address for the Application History server + to start the RPC server. + yarn.ahs.address + 0.0.0.0:10200 + + + + CLient thread count to serve the client requests. + yarn.ahs.client.thread-count + 10 + + + + T-file compression types used to compress history data. + yarn.ahs.fs-history-store.compression-type + none + + + + Store class name for history store, defaulting to file + system store + yarn.ahs.store.class + org.apache.hadoop.yarn.server.applicationhistoryservice.FileSystemApplicationHistoryStore + + The interval that the yarn client library uses to poll the diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml new file mode 100644 index 00000000000..d314d026e99 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml @@ -0,0 +1,172 @@ + + + + + hadoop-yarn-server + org.apache.hadoop + 3.0.0-SNAPSHOT + + 4.0.0 + org.apache.hadoop + hadoop-yarn-server-applicationhistoryservice + 3.0.0-SNAPSHOT + hadoop-yarn-server-applicationhistoryservice + + + + ${project.parent.parent.basedir} + + + + + javax.servlet + servlet-api + + + + org.apache.hadoop + hadoop-common + provided + + + commons-el + commons-el + + + tomcat + jasper-runtime + + + tomcat + jasper-compiler + + + org.mortbay.jetty + jsp-2.1-jetty + + + + + + + org.apache.hadoop + hadoop-annotations + + + org.mockito + mockito-all + test + + + + org.apache.hadoop + hadoop-common + test-jar + test + + + com.google.inject.extensions + guice-servlet + + + com.google.protobuf + protobuf-java + + + junit + junit + test + + + + com.google.inject + guice + + + com.sun.jersey.jersey-test-framework + jersey-test-framework-core + test + + + com.sun.jersey + jersey-json + + + com.sun.jersey.contribs + jersey-guice + + + + org.apache.hadoop + hadoop-yarn-common + test-jar + test + + + + org.apache.hadoop + hadoop-yarn-common + + + + org.apache.hadoop + hadoop-yarn-api + + + javax.xml.bind + jaxb-api + + + org.codehaus.jettison + jettison + + + com.sun.jersey + jersey-core + + + com.sun.jersey + jersey-client + + + com.google.guava + guava + + + commons-logging + commons-logging + + + + + org.apache.hadoop + hadoop-yarn-server-common + + + + + com.sun.jersey.jersey-test-framework + jersey-test-framework-grizzly2 + test + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java new file mode 100644 index 00000000000..56558dabc0a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java @@ -0,0 +1,211 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ipc.Server; +import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.yarn.api.ApplicationHistoryProtocol; +import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest; +import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse; +import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest; +import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenResponse; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ContainerReport; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException; +import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; +import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.ipc.YarnRPC; + +public class ApplicationHistoryClientService extends AbstractService { + private static final Log LOG = LogFactory + .getLog(ApplicationHistoryClientService.class); + private ApplicationHistoryManager history; + private ApplicationHistoryProtocol protocolHandler; + private Server server; + private InetSocketAddress bindAddress; + + public ApplicationHistoryClientService(ApplicationHistoryManager history) { + super("ApplicationHistoryClientService"); + this.history = history; + this.protocolHandler = new ApplicationHSClientProtocolHandler(); + } + + protected void serviceStart() throws Exception { + Configuration conf = getConfig(); + YarnRPC rpc = YarnRPC.create(conf); + InetSocketAddress address = + conf.getSocketAddr(YarnConfiguration.AHS_ADDRESS, + YarnConfiguration.DEFAULT_AHS_ADDRESS, + YarnConfiguration.DEFAULT_AHS_PORT); + + server = + rpc.getServer(ApplicationHistoryProtocol.class, protocolHandler, + address, conf, null, conf.getInt( + YarnConfiguration.AHS_CLIENT_THREAD_COUNT, + YarnConfiguration.DEFAULT_AHS_CLIENT_THREAD_COUNT)); + + server.start(); + this.bindAddress = + conf.updateConnectAddr(YarnConfiguration.AHS_ADDRESS, + server.getListenerAddress()); + LOG.info("Instantiated ApplicationHistoryClientService at " + + this.bindAddress); + + super.serviceStart(); + } + + @Override + protected void serviceStop() throws Exception { + if (server != null) { + server.stop(); + } + super.serviceStop(); + } + + @Private + public ApplicationHistoryProtocol getClientHandler() { + return this.protocolHandler; + } + + @Private + public InetSocketAddress getBindAddress() { + return this.bindAddress; + } + + private class ApplicationHSClientProtocolHandler implements + ApplicationHistoryProtocol { + + @Override + public CancelDelegationTokenResponse cancelDelegationToken( + CancelDelegationTokenRequest request) throws YarnException, IOException { + // TODO Auto-generated method stub + return null; + } + + @Override + public GetApplicationAttemptReportResponse getApplicationAttemptReport( + GetApplicationAttemptReportRequest request) throws YarnException, + IOException { + try { + GetApplicationAttemptReportResponse response = + GetApplicationAttemptReportResponse.newInstance(history + .getApplicationAttempt(request.getApplicationAttemptId())); + return response; + } catch (IOException e) { + throw new ApplicationAttemptNotFoundException(e.getMessage()); + } + } + + @Override + public GetApplicationAttemptsResponse getApplicationAttempts( + GetApplicationAttemptsRequest request) throws YarnException, + IOException { + GetApplicationAttemptsResponse response = + GetApplicationAttemptsResponse + .newInstance(new ArrayList(history + .getApplicationAttempts(request.getApplicationId()).values())); + return response; + } + + @Override + public GetApplicationReportResponse getApplicationReport( + GetApplicationReportRequest request) throws YarnException, IOException { + try { + ApplicationId applicationId = request.getApplicationId(); + GetApplicationReportResponse response = + GetApplicationReportResponse.newInstance(history + .getApplication(applicationId)); + return response; + } catch (IOException e) { + throw new ApplicationNotFoundException(e.getMessage()); + } + } + + @Override + public GetApplicationsResponse getApplications( + GetApplicationsRequest request) throws YarnException, IOException { + GetApplicationsResponse response = + GetApplicationsResponse.newInstance(new ArrayList( + history.getAllApplications().values())); + return response; + } + + @Override + public GetContainerReportResponse getContainerReport( + GetContainerReportRequest request) throws YarnException, IOException { + try { + GetContainerReportResponse response = + GetContainerReportResponse.newInstance(history.getContainer(request + .getContainerId())); + return response; + } catch (IOException e) { + throw new ContainerNotFoundException(e.getMessage()); + } + } + + @Override + public GetContainersResponse getContainers(GetContainersRequest request) + throws YarnException, IOException { + GetContainersResponse response = + GetContainersResponse.newInstance(new ArrayList( + history.getContainers(request.getApplicationAttemptId()).values())); + return response; + } + + @Override + public GetDelegationTokenResponse getDelegationToken( + GetDelegationTokenRequest request) throws YarnException, IOException { + // TODO Auto-generated method stub + return null; + } + + @Override + public RenewDelegationTokenResponse renewDelegationToken( + RenewDelegationTokenRequest request) throws YarnException, IOException { + // TODO Auto-generated method stub + return null; + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManager.java new file mode 100644 index 00000000000..db25d298b3c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManager.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.yarn.server.api.ApplicationContext; + +@InterfaceAudience.Public +@InterfaceStability.Unstable +public interface ApplicationHistoryManager extends ApplicationContext { +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java new file mode 100644 index 00000000000..1e13a233629 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java @@ -0,0 +1,222 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerReport; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptHistoryData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationHistoryData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerHistoryData; + +import com.google.common.annotations.VisibleForTesting; + +public class ApplicationHistoryManagerImpl extends AbstractService implements + ApplicationHistoryManager { + private static final Log LOG = LogFactory + .getLog(ApplicationHistoryManagerImpl.class); + private static final String UNAVAILABLE = "N/A"; + + private ApplicationHistoryStore historyStore; + + public ApplicationHistoryManagerImpl() { + super(ApplicationHistoryManagerImpl.class.getName()); + } + + @Override + protected void serviceInit(Configuration conf) throws Exception { + LOG.info("ApplicationHistory Init"); + historyStore = createApplicationHistoryStore(conf); + historyStore.init(conf); + super.serviceInit(conf); + } + + @Override + protected void serviceStart() throws Exception { + LOG.info("Starting ApplicationHistory"); + historyStore.start(); + super.serviceStart(); + } + + @Override + protected void serviceStop() throws Exception { + LOG.info("Stopping ApplicationHistory"); + historyStore.stop(); + super.serviceStop(); + } + + protected ApplicationHistoryStore createApplicationHistoryStore( + Configuration conf) { + return ReflectionUtils.newInstance(conf.getClass( + YarnConfiguration.AHS_STORE, FileSystemApplicationHistoryStore.class, + ApplicationHistoryStore.class), conf); + } + + @Override + public ContainerReport getAMContainer(ApplicationAttemptId appAttemptId) + throws IOException { + return convertToContainerReport(historyStore.getAMContainer(appAttemptId)); + } + + @Override + public Map getAllApplications() + throws IOException { + Map histData = + historyStore.getAllApplications(); + HashMap applicationsReport = + new HashMap(); + for (Entry entry : histData + .entrySet()) { + applicationsReport.put(entry.getKey(), + convertToApplicationReport(entry.getValue())); + } + return applicationsReport; + } + + @Override + public ApplicationReport getApplication(ApplicationId appId) + throws IOException { + return convertToApplicationReport(historyStore.getApplication(appId)); + } + + private ApplicationReport convertToApplicationReport( + ApplicationHistoryData appHistory) throws IOException { + ApplicationAttemptId currentApplicationAttemptId = null; + String trackingUrl = UNAVAILABLE; + String host = UNAVAILABLE; + int rpcPort = -1; + + ApplicationAttemptHistoryData lastAttempt = + getLastAttempt(appHistory.getApplicationId()); + if (lastAttempt != null) { + currentApplicationAttemptId = lastAttempt.getApplicationAttemptId(); + trackingUrl = lastAttempt.getTrackingURL(); + host = lastAttempt.getHost(); + rpcPort = lastAttempt.getRPCPort(); + } + return ApplicationReport.newInstance(appHistory.getApplicationId(), + currentApplicationAttemptId, appHistory.getUser(), appHistory.getQueue(), + appHistory.getApplicationName(), host, rpcPort, null, + appHistory.getYarnApplicationState(), appHistory.getDiagnosticsInfo(), + trackingUrl, appHistory.getStartTime(), appHistory.getFinishTime(), + appHistory.getFinalApplicationStatus(), null, "", 100, + appHistory.getApplicationType(), null); + } + + private ApplicationAttemptHistoryData getLastAttempt(ApplicationId appId) + throws IOException { + Map attempts = + historyStore.getApplicationAttempts(appId); + ApplicationAttemptId prevMaxAttemptId = null; + for (ApplicationAttemptId attemptId : attempts.keySet()) { + if (prevMaxAttemptId == null) { + prevMaxAttemptId = attemptId; + } else { + if (prevMaxAttemptId.getAttemptId() < attemptId.getAttemptId()) { + prevMaxAttemptId = attemptId; + } + } + } + return attempts.get(prevMaxAttemptId); + } + + private ApplicationAttemptReport convertToApplicationAttemptReport( + ApplicationAttemptHistoryData appAttemptHistory) { + return ApplicationAttemptReport.newInstance( + appAttemptHistory.getApplicationAttemptId(), appAttemptHistory.getHost(), + appAttemptHistory.getRPCPort(), appAttemptHistory.getTrackingURL(), + appAttemptHistory.getDiagnosticsInfo(), + appAttemptHistory.getYarnApplicationAttemptState(), + appAttemptHistory.getMasterContainerId()); + } + + @Override + public ApplicationAttemptReport getApplicationAttempt( + ApplicationAttemptId appAttemptId) throws IOException { + return convertToApplicationAttemptReport(historyStore + .getApplicationAttempt(appAttemptId)); + } + + @Override + public Map + getApplicationAttempts(ApplicationId appId) throws IOException { + Map histData = + historyStore.getApplicationAttempts(appId); + HashMap applicationAttemptsReport = + new HashMap(); + for (Entry entry : histData + .entrySet()) { + applicationAttemptsReport.put(entry.getKey(), + convertToApplicationAttemptReport(entry.getValue())); + } + return applicationAttemptsReport; + } + + @Override + public ContainerReport getContainer(ContainerId containerId) + throws IOException { + return convertToContainerReport(historyStore.getContainer(containerId)); + } + + private ContainerReport convertToContainerReport( + ContainerHistoryData containerHistory) { + return ContainerReport.newInstance(containerHistory.getContainerId(), + containerHistory.getAllocatedResource(), + containerHistory.getAssignedNode(), containerHistory.getPriority(), + containerHistory.getStartTime(), containerHistory.getFinishTime(), + containerHistory.getDiagnosticsInfo(), containerHistory.getLogURL(), + containerHistory.getContainerExitStatus(), + containerHistory.getContainerState()); + } + + @Override + public Map getContainers( + ApplicationAttemptId appAttemptId) throws IOException { + Map histData = + historyStore.getContainers(appAttemptId); + HashMap containersReport = + new HashMap(); + for (Entry entry : histData.entrySet()) { + containersReport.put(entry.getKey(), + convertToContainerReport(entry.getValue())); + } + return containersReport; + } + + @Private + @VisibleForTesting + public ApplicationHistoryStore getHistoryStore() { + return this.historyStore; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryReader.java new file mode 100644 index 00000000000..590853a3537 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryReader.java @@ -0,0 +1,117 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice; + +import java.io.IOException; +import java.util.Map; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptHistoryData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationHistoryData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerHistoryData; + +@InterfaceAudience.Public +@InterfaceStability.Unstable +public interface ApplicationHistoryReader { + + /** + * This method returns Application {@link ApplicationHistoryData} for the + * specified {@link ApplicationId}. + * + * @param appId + * + * @return {@link ApplicationHistoryData} for the ApplicationId. + * @throws IOException + */ + ApplicationHistoryData getApplication(ApplicationId appId) throws IOException; + + /** + * This method returns all Application {@link ApplicationHistoryData}s + * + * @return map of {@link ApplicationId} to {@link ApplicationHistoryData}s. + * @throws IOException + */ + Map getAllApplications() + throws IOException; + + /** + * Application can have multiple application attempts + * {@link ApplicationAttemptHistoryData}. This method returns the all + * {@link ApplicationAttemptHistoryData}s for the Application. + * + * @param appId + * + * @return all {@link ApplicationAttemptHistoryData}s for the Application. + * @throws IOException + */ + Map + getApplicationAttempts(ApplicationId appId) throws IOException; + + /** + * This method returns {@link ApplicationAttemptHistoryData} for specified + * {@link ApplicationId}. + * + * @param appAttemptId + * {@link ApplicationAttemptId} + * @return {@link ApplicationAttemptHistoryData} for ApplicationAttemptId + * @throws IOException + */ + ApplicationAttemptHistoryData getApplicationAttempt( + ApplicationAttemptId appAttemptId) throws IOException; + + /** + * This method returns {@link ContainerHistoryData} for specified + * {@link ContainerId}. + * + * @param containerId + * {@link ContainerId} + * @return {@link ContainerHistoryData} for ContainerId + * @throws IOException + */ + ContainerHistoryData getContainer(ContainerId containerId) throws IOException; + + /** + * This method returns {@link ContainerHistoryData} for specified + * {@link ApplicationAttemptId}. + * + * @param appAttemptId + * {@link ApplicationAttemptId} + * @return {@link ContainerHistoryData} for ApplicationAttemptId + * @throws IOException + */ + ContainerHistoryData getAMContainer(ApplicationAttemptId appAttemptId) + throws IOException; + + /** + * This method returns Map{@link ContainerId} to {@link ContainerHistoryData} + * for specified {@link ApplicationAttemptId}. + * + * @param appAttemptId + * {@link ApplicationAttemptId} + * @return Map{@link ContainerId} to {@link ContainerHistoryData} for + * ApplicationAttemptId + * @throws IOException + */ + Map getContainers( + ApplicationAttemptId appAttemptId) throws IOException; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java new file mode 100644 index 00000000000..3a864c80698 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java @@ -0,0 +1,159 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.metrics2.source.JvmMetrics; +import org.apache.hadoop.service.CompositeService; +import org.apache.hadoop.service.Service; +import org.apache.hadoop.util.ExitUtil; +import org.apache.hadoop.util.ShutdownHookManager; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.AHSWebApp; +import org.apache.hadoop.yarn.webapp.WebApp; +import org.apache.hadoop.yarn.webapp.WebApps; +import org.apache.hadoop.yarn.webapp.util.WebAppUtils; + +import com.google.common.annotations.VisibleForTesting; + +/** + * History server that keeps track of all types of history in the cluster. + * Application specific history to start with. + */ +public class ApplicationHistoryServer extends CompositeService { + + public static final int SHUTDOWN_HOOK_PRIORITY = 30; + private static final Log LOG = LogFactory + .getLog(ApplicationHistoryServer.class); + + ApplicationHistoryClientService ahsClientService; + ApplicationHistoryManager historyManager; + private WebApp webApp; + + public ApplicationHistoryServer() { + super(ApplicationHistoryServer.class.getName()); + } + + @Override + protected void serviceInit(Configuration conf) throws Exception { + historyManager = createApplicationHistory(); + ahsClientService = createApplicationHistoryClientService(historyManager); + addService(ahsClientService); + addService((Service) historyManager); + super.serviceInit(conf); + } + + @Override + protected void serviceStart() throws Exception { + DefaultMetricsSystem.initialize("ApplicationHistoryServer"); + JvmMetrics.initSingleton("ApplicationHistoryServer", null); + + startWebApp(); + super.serviceStart(); + } + + @Override + protected void serviceStop() throws Exception { + if (webApp != null) { + webApp.stop(); + } + + DefaultMetricsSystem.shutdown(); + super.serviceStop(); + } + + @Private + @VisibleForTesting + public ApplicationHistoryClientService getClientService() { + return this.ahsClientService; + } + + protected ApplicationHistoryClientService + createApplicationHistoryClientService( + ApplicationHistoryManager historyManager) { + return new ApplicationHistoryClientService(historyManager); + } + + protected ApplicationHistoryManager createApplicationHistory() { + return new ApplicationHistoryManagerImpl(); + } + + protected ApplicationHistoryManager getApplicationHistory() { + return this.historyManager; + } + + static ApplicationHistoryServer launchAppHistoryServer(String[] args) { + Thread + .setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler()); + StringUtils.startupShutdownMessage(ApplicationHistoryServer.class, args, + LOG); + ApplicationHistoryServer appHistoryServer = null; + try { + appHistoryServer = new ApplicationHistoryServer(); + ShutdownHookManager.get().addShutdownHook( + new CompositeServiceShutdownHook(appHistoryServer), + SHUTDOWN_HOOK_PRIORITY); + YarnConfiguration conf = new YarnConfiguration(); + appHistoryServer.init(conf); + appHistoryServer.start(); + } catch (Throwable t) { + LOG.fatal("Error starting ApplicationHistoryServer", t); + ExitUtil.terminate(-1, "Error starting ApplicationHistoryServer"); + } + return appHistoryServer; + } + + public static void main(String[] args) { + launchAppHistoryServer(args); + } + + protected ApplicationHistoryManager createApplicationHistoryManager( + Configuration conf) { + return new ApplicationHistoryManagerImpl(); + } + + protected void startWebApp() { + String bindAddress = WebAppUtils.getAHSWebAppURLWithoutScheme(getConfig()); + LOG.info("Instantiating AHSWebApp at " + bindAddress); + try { + webApp = + WebApps + .$for("applicationhistory", ApplicationHistoryClientService.class, + ahsClientService, "ws") + .with(getConfig()) + .withHttpSpnegoPrincipalKey( + YarnConfiguration.AHS_WEBAPP_SPNEGO_USER_NAME_KEY) + .withHttpSpnegoKeytabKey( + YarnConfiguration.AHS_WEBAPP_SPNEGO_KEYTAB_FILE_KEY) + .at(bindAddress).start(new AHSWebApp(historyManager)); + } catch (Exception e) { + String msg = "AHSWebApp failed to start."; + LOG.error(msg, e); + throw new YarnRuntimeException(msg, e); + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStore.java new file mode 100644 index 00000000000..c26faefb4f2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStore.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.service.Service; + +/** + * This class is the abstract of the storage of the application history data. It + * is a {@link Service}, such that the implementation of this class can make use + * of the service life cycle to initialize and cleanup the storage. Users can + * access the storage via {@link ApplicationHistoryReader} and + * {@link ApplicationHistoryWriter} interfaces. + * + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public interface ApplicationHistoryStore extends Service, + ApplicationHistoryReader, ApplicationHistoryWriter { +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryWriter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryWriter.java new file mode 100644 index 00000000000..09ba36df964 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryWriter.java @@ -0,0 +1,112 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptFinishData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptStartData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationFinishData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationStartData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerFinishData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerStartData; + +/** + * It is the interface of writing the application history, exposing the methods + * of writing {@link ApplicationStartData}, {@link ApplicationFinishData} + * {@link ApplicationAttemptStartData}, {@link ApplicationAttemptFinishData}, + * {@link ContainerStartData} and {@link ContainerFinishData}. + */ +@Private +@Unstable +public interface ApplicationHistoryWriter { + + /** + * This method writes the information of RMApp that is available + * when it starts. + * + * @param appStart + * the record of the information of RMApp that is + * available when it starts + * @throws IOException + */ + void applicationStarted(ApplicationStartData appStart) throws IOException; + + /** + * This method writes the information of RMApp that is available + * when it finishes. + * + * @param appFinish + * the record of the information of RMApp that is + * available when it finishes + * @throws IOException + */ + void applicationFinished(ApplicationFinishData appFinish) throws IOException; + + /** + * This method writes the information of RMAppAttempt that is + * available when it starts. + * + * @param appAttemptStart + * the record of the information of RMAppAttempt that is + * available when it starts + * @throws IOException + */ + void applicationAttemptStarted(ApplicationAttemptStartData appAttemptStart) + throws IOException; + + /** + * This method writes the information of RMAppAttempt that is + * available when it finishes. + * + * @param appAttemptFinish + * the record of the information of RMAppAttempt that is + * available when it finishes + * @throws IOException + */ + void + applicationAttemptFinished(ApplicationAttemptFinishData appAttemptFinish) + throws IOException; + + /** + * This method writes the information of RMContainer that is + * available when it starts. + * + * @param containerStart + * the record of the information of RMContainer that is + * available when it starts + * @throws IOException + */ + void containerStarted(ContainerStartData containerStart) throws IOException; + + /** + * This method writes the information of RMContainer that is + * available when it finishes. + * + * @param containerFinish + * the record of the information of RMContainer that is + * available when it finishes + * @throws IOException + */ + void containerFinished(ContainerFinishData containerFinish) + throws IOException; + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java new file mode 100644 index 00000000000..9109dfccb15 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java @@ -0,0 +1,841 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice; + +import java.io.DataInput; +import java.io.DataInputStream; +import java.io.DataOutput; +import java.io.DataOutputStream; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.file.tfile.TFile; +import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationAttemptFinishDataProto; +import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationAttemptStartDataProto; +import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationFinishDataProto; +import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationStartDataProto; +import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ContainerFinishDataProto; +import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ContainerStartDataProto; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptFinishData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptHistoryData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptStartData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationFinishData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationHistoryData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationStartData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerFinishData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerHistoryData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerStartData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb.ApplicationAttemptFinishDataPBImpl; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb.ApplicationAttemptStartDataPBImpl; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb.ApplicationFinishDataPBImpl; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb.ApplicationStartDataPBImpl; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb.ContainerFinishDataPBImpl; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb.ContainerStartDataPBImpl; +import org.apache.hadoop.yarn.util.ConverterUtils; + +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * File system implementation of {@link ApplicationHistoryStore}. In this + * implementation, one application will have just one file in the file system, + * which contains all the history data of one application, and its attempts and + * containers. {@link #applicationStarted(ApplicationStartData)} is supposed to + * be invoked first when writing any history data of one application and it will + * open a file, while {@link #applicationFinished(ApplicationFinishData)} is + * supposed to be last writing operation and will close the file. + */ +@Public +@Unstable +public class FileSystemApplicationHistoryStore extends AbstractService + implements ApplicationHistoryStore { + + private static final Log LOG = LogFactory + .getLog(FileSystemApplicationHistoryStore.class); + + private static final String ROOT_DIR_NAME = "ApplicationHistoryDataRoot"; + private static final int MIN_BLOCK_SIZE = 256 * 1024; + private static final String START_DATA_SUFFIX = "_start"; + private static final String FINISH_DATA_SUFFIX = "_finish"; + private static final FsPermission ROOT_DIR_UMASK = FsPermission + .createImmutable((short) 0740); + private static final FsPermission HISTORY_FILE_UMASK = FsPermission + .createImmutable((short) 0640); + + private FileSystem fs; + private Path rootDirPath; + + private ConcurrentMap outstandingWriters = + new ConcurrentHashMap(); + + public FileSystemApplicationHistoryStore() { + super(FileSystemApplicationHistoryStore.class.getName()); + } + + @Override + public void serviceInit(Configuration conf) throws Exception { + Path fsWorkingPath = + new Path(conf.get(YarnConfiguration.FS_HISTORY_STORE_URI)); + rootDirPath = new Path(fsWorkingPath, ROOT_DIR_NAME); + try { + fs = fsWorkingPath.getFileSystem(conf); + fs.mkdirs(rootDirPath); + fs.setPermission(rootDirPath, ROOT_DIR_UMASK); + } catch (IOException e) { + LOG.error("Error when initializing FileSystemHistoryStorage", e); + throw e; + } + super.serviceInit(conf); + } + + @Override + public void serviceStop() throws Exception { + try { + for (Entry entry : outstandingWriters + .entrySet()) { + entry.getValue().close(); + } + outstandingWriters.clear(); + } finally { + IOUtils.cleanup(LOG, fs); + } + super.serviceStop(); + } + + @Override + public ApplicationHistoryData getApplication(ApplicationId appId) + throws IOException { + HistoryFileReader hfReader = getHistoryFileReader(appId); + try { + boolean readStartData = false; + boolean readFinishData = false; + ApplicationHistoryData historyData = + ApplicationHistoryData.newInstance(appId, null, null, null, null, + Long.MIN_VALUE, Long.MIN_VALUE, Long.MAX_VALUE, null, + FinalApplicationStatus.UNDEFINED, null); + while ((!readStartData || !readFinishData) && hfReader.hasNext()) { + HistoryFileReader.Entry entry = hfReader.next(); + if (entry.key.id.equals(appId.toString())) { + if (entry.key.suffix.equals(START_DATA_SUFFIX)) { + ApplicationStartData startData = + parseApplicationStartData(entry.value); + mergeApplicationHistoryData(historyData, startData); + readStartData = true; + } else if (entry.key.suffix.equals(FINISH_DATA_SUFFIX)) { + ApplicationFinishData finishData = + parseApplicationFinishData(entry.value); + mergeApplicationHistoryData(historyData, finishData); + readFinishData = true; + } + } + } + if (!readStartData && !readFinishData) { + return null; + } + if (!readStartData) { + LOG.warn("Start information is missing for application " + appId); + } + if (!readFinishData) { + LOG.warn("Finish information is missing for application " + appId); + } + LOG.info("Completed reading history information of application " + appId); + return historyData; + } catch (IOException e) { + LOG.error("Error when reading history file of application " + appId); + throw e; + } finally { + hfReader.close(); + } + } + + @Override + public Map getAllApplications() + throws IOException { + Map historyDataMap = + new HashMap(); + FileStatus[] files = fs.listStatus(rootDirPath); + for (FileStatus file : files) { + ApplicationId appId = + ConverterUtils.toApplicationId(file.getPath().getName()); + try { + ApplicationHistoryData historyData = getApplication(appId); + if (historyData != null) { + historyDataMap.put(appId, historyData); + } + } catch (IOException e) { + // Eat the exception not to disturb the getting the next + // ApplicationHistoryData + LOG.error("History information of application " + appId + + " is not included into the result due to the exception", e); + } + } + return historyDataMap; + } + + @Override + public Map + getApplicationAttempts(ApplicationId appId) throws IOException { + Map historyDataMap = + new HashMap(); + Map> startFinshDataMap = + new HashMap>(); + HistoryFileReader hfReader = getHistoryFileReader(appId); + try { + while (hfReader.hasNext()) { + HistoryFileReader.Entry entry = hfReader.next(); + if (entry.key.id.startsWith(ConverterUtils.APPLICATION_ATTEMPT_PREFIX)) { + if (entry.key.suffix.equals(START_DATA_SUFFIX)) { + retrieveStartFinishData(appId, entry, startFinshDataMap, true); + } else if (entry.key.suffix.equals(FINISH_DATA_SUFFIX)) { + retrieveStartFinishData(appId, entry, startFinshDataMap, false); + } + } + } + LOG.info("Completed reading history information of all application" + + " attempts of application " + appId); + } catch (IOException e) { + LOG.info("Error when reading history information of some application" + + " attempts of application " + appId); + } finally { + hfReader.close(); + } + for (Map.Entry> entry : startFinshDataMap + .entrySet()) { + ApplicationAttemptHistoryData historyData = + ApplicationAttemptHistoryData.newInstance(entry.getKey(), null, -1, + null, null, null, FinalApplicationStatus.UNDEFINED, null); + mergeApplicationAttemptHistoryData(historyData, + entry.getValue().startData); + mergeApplicationAttemptHistoryData(historyData, + entry.getValue().finishData); + historyDataMap.put(entry.getKey(), historyData); + } + return historyDataMap; + } + + private + void + retrieveStartFinishData( + ApplicationId appId, + HistoryFileReader.Entry entry, + Map> startFinshDataMap, + boolean start) throws IOException { + ApplicationAttemptId appAttemptId = + ConverterUtils.toApplicationAttemptId(entry.key.id); + if (appAttemptId.getApplicationId().equals(appId)) { + StartFinishDataPair pair = + startFinshDataMap.get(appAttemptId); + if (pair == null) { + pair = + new StartFinishDataPair(); + startFinshDataMap.put(appAttemptId, pair); + } + if (start) { + pair.startData = parseApplicationAttemptStartData(entry.value); + } else { + pair.finishData = parseApplicationAttemptFinishData(entry.value); + } + } + } + + @Override + public ApplicationAttemptHistoryData getApplicationAttempt( + ApplicationAttemptId appAttemptId) throws IOException { + HistoryFileReader hfReader = + getHistoryFileReader(appAttemptId.getApplicationId()); + try { + boolean readStartData = false; + boolean readFinishData = false; + ApplicationAttemptHistoryData historyData = + ApplicationAttemptHistoryData.newInstance(appAttemptId, null, -1, + null, null, null, FinalApplicationStatus.UNDEFINED, null); + while ((!readStartData || !readFinishData) && hfReader.hasNext()) { + HistoryFileReader.Entry entry = hfReader.next(); + if (entry.key.id.equals(appAttemptId.toString())) { + if (entry.key.suffix.equals(START_DATA_SUFFIX)) { + ApplicationAttemptStartData startData = + parseApplicationAttemptStartData(entry.value); + mergeApplicationAttemptHistoryData(historyData, startData); + readStartData = true; + } else if (entry.key.suffix.equals(FINISH_DATA_SUFFIX)) { + ApplicationAttemptFinishData finishData = + parseApplicationAttemptFinishData(entry.value); + mergeApplicationAttemptHistoryData(historyData, finishData); + readFinishData = true; + } + } + } + if (!readStartData && !readFinishData) { + return null; + } + if (!readStartData) { + LOG.warn("Start information is missing for application attempt " + + appAttemptId); + } + if (!readFinishData) { + LOG.warn("Finish information is missing for application attempt " + + appAttemptId); + } + LOG.info("Completed reading history information of application attempt " + + appAttemptId); + return historyData; + } catch (IOException e) { + LOG.error("Error when reading history file of application attempt" + + appAttemptId); + throw e; + } finally { + hfReader.close(); + } + } + + @Override + public ContainerHistoryData getContainer(ContainerId containerId) + throws IOException { + HistoryFileReader hfReader = + getHistoryFileReader(containerId.getApplicationAttemptId() + .getApplicationId()); + try { + boolean readStartData = false; + boolean readFinishData = false; + ContainerHistoryData historyData = + ContainerHistoryData + .newInstance(containerId, null, null, null, Long.MIN_VALUE, + Long.MAX_VALUE, null, null, Integer.MAX_VALUE, null); + while ((!readStartData || !readFinishData) && hfReader.hasNext()) { + HistoryFileReader.Entry entry = hfReader.next(); + if (entry.key.id.equals(containerId.toString())) { + if (entry.key.suffix.equals(START_DATA_SUFFIX)) { + ContainerStartData startData = parseContainerStartData(entry.value); + mergeContainerHistoryData(historyData, startData); + readStartData = true; + } else if (entry.key.suffix.equals(FINISH_DATA_SUFFIX)) { + ContainerFinishData finishData = + parseContainerFinishData(entry.value); + mergeContainerHistoryData(historyData, finishData); + readFinishData = true; + } + } + } + if (!readStartData && !readFinishData) { + return null; + } + if (!readStartData) { + LOG.warn("Start information is missing for container " + containerId); + } + if (!readFinishData) { + LOG.warn("Finish information is missing for container " + containerId); + } + LOG.info("Completed reading history information of container " + + containerId); + return historyData; + } catch (IOException e) { + LOG.error("Error when reading history file of container " + containerId); + throw e; + } finally { + hfReader.close(); + } + } + + @Override + public ContainerHistoryData getAMContainer(ApplicationAttemptId appAttemptId) + throws IOException { + ApplicationAttemptHistoryData attemptHistoryData = + getApplicationAttempt(appAttemptId); + if (attemptHistoryData == null + || attemptHistoryData.getMasterContainerId() == null) { + return null; + } + return getContainer(attemptHistoryData.getMasterContainerId()); + } + + @Override + public Map getContainers( + ApplicationAttemptId appAttemptId) throws IOException { + Map historyDataMap = + new HashMap(); + Map> startFinshDataMap = + new HashMap>(); + HistoryFileReader hfReader = + getHistoryFileReader(appAttemptId.getApplicationId()); + try { + while (hfReader.hasNext()) { + HistoryFileReader.Entry entry = hfReader.next(); + if (entry.key.id.startsWith(ConverterUtils.CONTAINER_PREFIX)) { + if (entry.key.suffix.equals(START_DATA_SUFFIX)) { + retrieveStartFinishData(appAttemptId, entry, startFinshDataMap, + true); + } else if (entry.key.suffix.equals(FINISH_DATA_SUFFIX)) { + retrieveStartFinishData(appAttemptId, entry, startFinshDataMap, + false); + } + } + } + LOG.info("Completed reading history information of all conatiners" + + " of application attempt " + appAttemptId); + } catch (IOException e) { + LOG.info("Error when reading history information of some containers" + + " of application attempt " + appAttemptId); + } finally { + hfReader.close(); + } + for (Map.Entry> entry : startFinshDataMap + .entrySet()) { + ContainerHistoryData historyData = + ContainerHistoryData + .newInstance(entry.getKey(), null, null, null, Long.MIN_VALUE, + Long.MAX_VALUE, null, null, Integer.MAX_VALUE, null); + mergeContainerHistoryData(historyData, entry.getValue().startData); + mergeContainerHistoryData(historyData, entry.getValue().finishData); + historyDataMap.put(entry.getKey(), historyData); + } + return historyDataMap; + } + + private + void + retrieveStartFinishData( + ApplicationAttemptId appAttemptId, + HistoryFileReader.Entry entry, + Map> startFinshDataMap, + boolean start) throws IOException { + ContainerId containerId = ConverterUtils.toContainerId(entry.key.id); + if (containerId.getApplicationAttemptId().equals(appAttemptId)) { + StartFinishDataPair pair = + startFinshDataMap.get(containerId); + if (pair == null) { + pair = + new StartFinishDataPair(); + startFinshDataMap.put(containerId, pair); + } + if (start) { + pair.startData = parseContainerStartData(entry.value); + } else { + pair.finishData = parseContainerFinishData(entry.value); + } + } + } + + @Override + public void applicationStarted(ApplicationStartData appStart) + throws IOException { + HistoryFileWriter hfWriter = + outstandingWriters.get(appStart.getApplicationId()); + if (hfWriter == null) { + Path applicationHistoryFile = + new Path(rootDirPath, appStart.getApplicationId().toString()); + try { + hfWriter = new HistoryFileWriter(applicationHistoryFile); + LOG.info("Opened history file of application " + + appStart.getApplicationId()); + } catch (IOException e) { + LOG.error("Error when openning history file of application " + + appStart.getApplicationId()); + throw e; + } + outstandingWriters.put(appStart.getApplicationId(), hfWriter); + } else { + throw new IOException("History file of application " + + appStart.getApplicationId() + " is already opened"); + } + assert appStart instanceof ApplicationStartDataPBImpl; + try { + hfWriter.writeHistoryData(new HistoryDataKey(appStart.getApplicationId() + .toString(), START_DATA_SUFFIX), + ((ApplicationStartDataPBImpl) appStart).getProto().toByteArray()); + LOG.info("Start information of application " + + appStart.getApplicationId() + " is written"); + } catch (IOException e) { + LOG.error("Error when writing start information of application " + + appStart.getApplicationId()); + throw e; + } + } + + @Override + public void applicationFinished(ApplicationFinishData appFinish) + throws IOException { + HistoryFileWriter hfWriter = + getHistoryFileWriter(appFinish.getApplicationId()); + assert appFinish instanceof ApplicationFinishDataPBImpl; + try { + hfWriter.writeHistoryData(new HistoryDataKey(appFinish.getApplicationId() + .toString(), FINISH_DATA_SUFFIX), + ((ApplicationFinishDataPBImpl) appFinish).getProto().toByteArray()); + LOG.info("Finish information of application " + + appFinish.getApplicationId() + " is written"); + } catch (IOException e) { + LOG.error("Error when writing finish information of application " + + appFinish.getApplicationId()); + throw e; + } finally { + hfWriter.close(); + outstandingWriters.remove(appFinish.getApplicationId()); + } + } + + @Override + public void applicationAttemptStarted( + ApplicationAttemptStartData appAttemptStart) throws IOException { + HistoryFileWriter hfWriter = + getHistoryFileWriter(appAttemptStart.getApplicationAttemptId() + .getApplicationId()); + assert appAttemptStart instanceof ApplicationAttemptStartDataPBImpl; + try { + hfWriter.writeHistoryData(new HistoryDataKey(appAttemptStart + .getApplicationAttemptId().toString(), START_DATA_SUFFIX), + ((ApplicationAttemptStartDataPBImpl) appAttemptStart).getProto() + .toByteArray()); + LOG.info("Start information of application attempt " + + appAttemptStart.getApplicationAttemptId() + " is written"); + } catch (IOException e) { + LOG.error("Error when writing start information of application attempt " + + appAttemptStart.getApplicationAttemptId()); + throw e; + } + } + + @Override + public void applicationAttemptFinished( + ApplicationAttemptFinishData appAttemptFinish) throws IOException { + HistoryFileWriter hfWriter = + getHistoryFileWriter(appAttemptFinish.getApplicationAttemptId() + .getApplicationId()); + assert appAttemptFinish instanceof ApplicationAttemptFinishDataPBImpl; + try { + hfWriter.writeHistoryData(new HistoryDataKey(appAttemptFinish + .getApplicationAttemptId().toString(), FINISH_DATA_SUFFIX), + ((ApplicationAttemptFinishDataPBImpl) appAttemptFinish).getProto() + .toByteArray()); + LOG.info("Finish information of application attempt " + + appAttemptFinish.getApplicationAttemptId() + " is written"); + } catch (IOException e) { + LOG.error("Error when writing finish information of application attempt " + + appAttemptFinish.getApplicationAttemptId()); + throw e; + } + } + + @Override + public void containerStarted(ContainerStartData containerStart) + throws IOException { + HistoryFileWriter hfWriter = + getHistoryFileWriter(containerStart.getContainerId() + .getApplicationAttemptId().getApplicationId()); + assert containerStart instanceof ContainerStartDataPBImpl; + try { + hfWriter.writeHistoryData(new HistoryDataKey(containerStart + .getContainerId().toString(), START_DATA_SUFFIX), + ((ContainerStartDataPBImpl) containerStart).getProto().toByteArray()); + LOG.info("Start information of container " + + containerStart.getContainerId() + " is written"); + } catch (IOException e) { + LOG.error("Error when writing start information of container " + + containerStart.getContainerId()); + throw e; + } + } + + @Override + public void containerFinished(ContainerFinishData containerFinish) + throws IOException { + HistoryFileWriter hfWriter = + getHistoryFileWriter(containerFinish.getContainerId() + .getApplicationAttemptId().getApplicationId()); + assert containerFinish instanceof ContainerFinishDataPBImpl; + try { + hfWriter.writeHistoryData(new HistoryDataKey(containerFinish + .getContainerId().toString(), FINISH_DATA_SUFFIX), + ((ContainerFinishDataPBImpl) containerFinish).getProto().toByteArray()); + LOG.info("Finish information of container " + + containerFinish.getContainerId() + " is written"); + } catch (IOException e) { + LOG.error("Error when writing finish information of container " + + containerFinish.getContainerId()); + } + } + + private static ApplicationStartData parseApplicationStartData(byte[] value) + throws InvalidProtocolBufferException { + return new ApplicationStartDataPBImpl( + ApplicationStartDataProto.parseFrom(value)); + } + + private static ApplicationFinishData parseApplicationFinishData(byte[] value) + throws InvalidProtocolBufferException { + return new ApplicationFinishDataPBImpl( + ApplicationFinishDataProto.parseFrom(value)); + } + + private static ApplicationAttemptStartData parseApplicationAttemptStartData( + byte[] value) throws InvalidProtocolBufferException { + return new ApplicationAttemptStartDataPBImpl( + ApplicationAttemptStartDataProto.parseFrom(value)); + } + + private static ApplicationAttemptFinishData + parseApplicationAttemptFinishData(byte[] value) + throws InvalidProtocolBufferException { + return new ApplicationAttemptFinishDataPBImpl( + ApplicationAttemptFinishDataProto.parseFrom(value)); + } + + private static ContainerStartData parseContainerStartData(byte[] value) + throws InvalidProtocolBufferException { + return new ContainerStartDataPBImpl( + ContainerStartDataProto.parseFrom(value)); + } + + private static ContainerFinishData parseContainerFinishData(byte[] value) + throws InvalidProtocolBufferException { + return new ContainerFinishDataPBImpl( + ContainerFinishDataProto.parseFrom(value)); + } + + private static void mergeApplicationHistoryData( + ApplicationHistoryData historyData, ApplicationStartData startData) { + historyData.setApplicationName(startData.getApplicationName()); + historyData.setApplicationType(startData.getApplicationType()); + historyData.setQueue(startData.getQueue()); + historyData.setUser(startData.getUser()); + historyData.setSubmitTime(startData.getSubmitTime()); + historyData.setStartTime(startData.getStartTime()); + } + + private static void mergeApplicationHistoryData( + ApplicationHistoryData historyData, ApplicationFinishData finishData) { + historyData.setFinishTime(finishData.getFinishTime()); + historyData.setDiagnosticsInfo(finishData.getDiagnosticsInfo()); + historyData.setFinalApplicationStatus(finishData + .getFinalApplicationStatus()); + historyData.setYarnApplicationState(finishData.getYarnApplicationState()); + } + + private static void mergeApplicationAttemptHistoryData( + ApplicationAttemptHistoryData historyData, + ApplicationAttemptStartData startData) { + historyData.setHost(startData.getHost()); + historyData.setRPCPort(startData.getRPCPort()); + historyData.setMasterContainerId(startData.getMasterContainerId()); + } + + private static void mergeApplicationAttemptHistoryData( + ApplicationAttemptHistoryData historyData, + ApplicationAttemptFinishData finishData) { + historyData.setDiagnosticsInfo(finishData.getDiagnosticsInfo()); + historyData.setTrackingURL(finishData.getTrackingURL()); + historyData.setFinalApplicationStatus(finishData + .getFinalApplicationStatus()); + historyData.setYarnApplicationAttemptState(finishData + .getYarnApplicationAttemptState()); + } + + private static void mergeContainerHistoryData( + ContainerHistoryData historyData, ContainerStartData startData) { + historyData.setAllocatedResource(startData.getAllocatedResource()); + historyData.setAssignedNode(startData.getAssignedNode()); + historyData.setPriority(startData.getPriority()); + historyData.setStartTime(startData.getStartTime()); + } + + private static void mergeContainerHistoryData( + ContainerHistoryData historyData, ContainerFinishData finishData) { + historyData.setFinishTime(finishData.getFinishTime()); + historyData.setDiagnosticsInfo(finishData.getDiagnosticsInfo()); + historyData.setLogURL(finishData.getLogURL()); + historyData.setContainerExitStatus(finishData.getContainerExitStatus()); + historyData.setContainerState(finishData.getContainerState()); + } + + private HistoryFileWriter getHistoryFileWriter(ApplicationId appId) + throws IOException { + HistoryFileWriter hfWriter = outstandingWriters.get(appId); + if (hfWriter == null) { + throw new IOException("History file of application " + appId + + " is not opened"); + } + return hfWriter; + } + + private HistoryFileReader getHistoryFileReader(ApplicationId appId) + throws IOException { + Path applicationHistoryFile = new Path(rootDirPath, appId.toString()); + if (!fs.exists(applicationHistoryFile)) { + throw new IOException("History file for application " + appId + + " is not found"); + } + // The history file is still under writing + if (outstandingWriters.containsKey(appId)) { + throw new IOException("History file for application " + appId + + " is under writing"); + } + return new HistoryFileReader(applicationHistoryFile); + } + + private class HistoryFileReader { + + private class Entry { + + private HistoryDataKey key; + private byte[] value; + + public Entry(HistoryDataKey key, byte[] value) { + this.key = key; + this.value = value; + } + } + + private TFile.Reader reader; + private TFile.Reader.Scanner scanner; + + public HistoryFileReader(Path historyFile) throws IOException { + FSDataInputStream fsdis = fs.open(historyFile); + reader = + new TFile.Reader(fsdis, fs.getFileStatus(historyFile).getLen(), + getConfig()); + reset(); + } + + public boolean hasNext() { + return !scanner.atEnd(); + } + + public Entry next() throws IOException { + TFile.Reader.Scanner.Entry entry = scanner.entry(); + DataInputStream dis = entry.getKeyStream(); + HistoryDataKey key = new HistoryDataKey(); + key.readFields(dis); + dis = entry.getValueStream(); + byte[] value = new byte[entry.getValueLength()]; + dis.read(value); + scanner.advance(); + return new Entry(key, value); + } + + public void reset() throws IOException { + IOUtils.cleanup(LOG, scanner); + scanner = reader.createScanner(); + } + + public void close() { + IOUtils.cleanup(LOG, scanner, reader); + } + + } + + private class HistoryFileWriter { + + private FSDataOutputStream fsdos; + private TFile.Writer writer; + + public HistoryFileWriter(Path historyFile) throws IOException { + if (fs.exists(historyFile)) { + fsdos = fs.append(historyFile); + } else { + fsdos = fs.create(historyFile); + } + fs.setPermission(historyFile, HISTORY_FILE_UMASK); + writer = + new TFile.Writer(fsdos, MIN_BLOCK_SIZE, getConfig().get( + YarnConfiguration.FS_HISTORY_STORE_COMPRESSION_TYPE, + YarnConfiguration.DEFAULT_FS_HISTORY_STORE_COMPRESSION_TYPE), null, + getConfig()); + } + + public synchronized void close() { + IOUtils.cleanup(LOG, writer, fsdos); + } + + public synchronized void writeHistoryData(HistoryDataKey key, byte[] value) + throws IOException { + DataOutputStream dos = null; + try { + dos = writer.prepareAppendKey(-1); + key.write(dos); + } finally { + IOUtils.cleanup(LOG, dos); + } + try { + dos = writer.prepareAppendValue(value.length); + dos.write(value); + } finally { + IOUtils.cleanup(LOG, dos); + } + } + + } + + private static class HistoryDataKey implements Writable { + + private String id; + + private String suffix; + + public HistoryDataKey() { + this(null, null); + } + + public HistoryDataKey(String id, String suffix) { + this.id = id; + this.suffix = suffix; + } + + @Override + public void write(DataOutput out) throws IOException { + out.writeUTF(id); + out.writeUTF(suffix); + } + + @Override + public void readFields(DataInput in) throws IOException { + id = in.readUTF(); + suffix = in.readUTF(); + } + + } + + private static class StartFinishDataPair { + + private S startData; + private F finishData; + + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/MemoryApplicationHistoryStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/MemoryApplicationHistoryStore.java new file mode 100644 index 00000000000..916335e8e20 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/MemoryApplicationHistoryStore.java @@ -0,0 +1,275 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptFinishData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptHistoryData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptStartData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationFinishData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationHistoryData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationStartData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerFinishData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerHistoryData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerStartData; + +/** + * In-memory implementation of {@link ApplicationHistoryStore}. This + * implementation is for test purpose only. If users improperly instantiate it, + * they may encounter reading and writing history data in different memory + * store. + * + */ +@Private +@Unstable +public class MemoryApplicationHistoryStore extends AbstractService implements + ApplicationHistoryStore { + + private final ConcurrentMap applicationData = + new ConcurrentHashMap(); + private final ConcurrentMap> applicationAttemptData = + new ConcurrentHashMap>(); + private final ConcurrentMap> containerData = + new ConcurrentHashMap>(); + + public MemoryApplicationHistoryStore() { + super(MemoryApplicationHistoryStore.class.getName()); + } + + @Override + public Map getAllApplications() { + return new HashMap(applicationData); + } + + @Override + public ApplicationHistoryData getApplication(ApplicationId appId) { + return applicationData.get(appId); + } + + @Override + public Map + getApplicationAttempts(ApplicationId appId) { + ConcurrentMap subMap = + applicationAttemptData.get(appId); + if (subMap == null) { + return Collections + . emptyMap(); + } else { + return new HashMap( + subMap); + } + } + + @Override + public ApplicationAttemptHistoryData getApplicationAttempt( + ApplicationAttemptId appAttemptId) { + ConcurrentMap subMap = + applicationAttemptData.get(appAttemptId.getApplicationId()); + if (subMap == null) { + return null; + } else { + return subMap.get(appAttemptId); + } + } + + @Override + public ContainerHistoryData getAMContainer(ApplicationAttemptId appAttemptId) { + ApplicationAttemptHistoryData appAttempt = + getApplicationAttempt(appAttemptId); + if (appAttempt == null || appAttempt.getMasterContainerId() == null) { + return null; + } else { + return getContainer(appAttempt.getMasterContainerId()); + } + } + + @Override + public ContainerHistoryData getContainer(ContainerId containerId) { + Map subMap = + containerData.get(containerId.getApplicationAttemptId()); + if (subMap == null) { + return null; + } else { + return subMap.get(containerId); + } + } + + @Override + public Map getContainers( + ApplicationAttemptId appAttemptId) throws IOException { + ConcurrentMap subMap = + containerData.get(appAttemptId); + if (subMap == null) { + return Collections. emptyMap(); + } else { + return new HashMap(subMap); + } + } + + @Override + public void applicationStarted(ApplicationStartData appStart) + throws IOException { + ApplicationHistoryData oldData = + applicationData.putIfAbsent(appStart.getApplicationId(), + ApplicationHistoryData.newInstance(appStart.getApplicationId(), + appStart.getApplicationName(), appStart.getApplicationType(), + appStart.getQueue(), appStart.getUser(), appStart.getSubmitTime(), + appStart.getStartTime(), Long.MAX_VALUE, null, null, null)); + if (oldData != null) { + throw new IOException("The start information of application " + + appStart.getApplicationId() + " is already stored."); + } + } + + @Override + public void applicationFinished(ApplicationFinishData appFinish) + throws IOException { + ApplicationHistoryData data = + applicationData.get(appFinish.getApplicationId()); + if (data == null) { + throw new IOException("The finish information of application " + + appFinish.getApplicationId() + " is stored before the start" + + " information."); + } + // Make the assumption that YarnApplicationState should not be null if + // the finish information is already recorded + if (data.getYarnApplicationState() != null) { + throw new IOException("The finish information of application " + + appFinish.getApplicationId() + " is already stored."); + } + data.setFinishTime(appFinish.getFinishTime()); + data.setDiagnosticsInfo(appFinish.getDiagnosticsInfo()); + data.setFinalApplicationStatus(appFinish.getFinalApplicationStatus()); + data.setYarnApplicationState(appFinish.getYarnApplicationState()); + } + + @Override + public void applicationAttemptStarted( + ApplicationAttemptStartData appAttemptStart) throws IOException { + ConcurrentMap subMap = + getSubMap(appAttemptStart.getApplicationAttemptId().getApplicationId()); + ApplicationAttemptHistoryData oldData = + subMap.putIfAbsent(appAttemptStart.getApplicationAttemptId(), + ApplicationAttemptHistoryData.newInstance( + appAttemptStart.getApplicationAttemptId(), + appAttemptStart.getHost(), appAttemptStart.getRPCPort(), + appAttemptStart.getMasterContainerId(), null, null, null, null)); + if (oldData != null) { + throw new IOException("The start information of application attempt " + + appAttemptStart.getApplicationAttemptId() + " is already stored."); + } + } + + @Override + public void applicationAttemptFinished( + ApplicationAttemptFinishData appAttemptFinish) throws IOException { + ConcurrentMap subMap = + getSubMap(appAttemptFinish.getApplicationAttemptId().getApplicationId()); + ApplicationAttemptHistoryData data = + subMap.get(appAttemptFinish.getApplicationAttemptId()); + if (data == null) { + throw new IOException("The finish information of application attempt " + + appAttemptFinish.getApplicationAttemptId() + " is stored before" + + " the start information."); + } + // Make the assumption that YarnApplicationAttemptState should not be null + // if the finish information is already recorded + if (data.getYarnApplicationAttemptState() != null) { + throw new IOException("The finish information of application attempt " + + appAttemptFinish.getApplicationAttemptId() + " is already stored."); + } + data.setTrackingURL(appAttemptFinish.getTrackingURL()); + data.setDiagnosticsInfo(appAttemptFinish.getDiagnosticsInfo()); + data + .setFinalApplicationStatus(appAttemptFinish.getFinalApplicationStatus()); + data.setYarnApplicationAttemptState(appAttemptFinish + .getYarnApplicationAttemptState()); + } + + private ConcurrentMap + getSubMap(ApplicationId appId) { + applicationAttemptData + .putIfAbsent( + appId, + new ConcurrentHashMap()); + return applicationAttemptData.get(appId); + } + + @Override + public void containerStarted(ContainerStartData containerStart) + throws IOException { + ConcurrentMap subMap = + getSubMap(containerStart.getContainerId().getApplicationAttemptId()); + ContainerHistoryData oldData = + subMap.putIfAbsent(containerStart.getContainerId(), + ContainerHistoryData.newInstance(containerStart.getContainerId(), + containerStart.getAllocatedResource(), + containerStart.getAssignedNode(), containerStart.getPriority(), + containerStart.getStartTime(), Long.MAX_VALUE, null, null, + Integer.MAX_VALUE, null)); + if (oldData != null) { + throw new IOException("The start information of container " + + containerStart.getContainerId() + " is already stored."); + } + } + + @Override + public void containerFinished(ContainerFinishData containerFinish) + throws IOException { + ConcurrentMap subMap = + getSubMap(containerFinish.getContainerId().getApplicationAttemptId()); + ContainerHistoryData data = subMap.get(containerFinish.getContainerId()); + if (data == null) { + throw new IOException("The finish information of container " + + containerFinish.getContainerId() + " is stored before" + + " the start information."); + } + // Make the assumption that ContainerState should not be null if + // the finish information is already recorded + if (data.getContainerState() != null) { + throw new IOException("The finish information of container " + + containerFinish.getContainerId() + " is already stored."); + } + data.setFinishTime(containerFinish.getFinishTime()); + data.setDiagnosticsInfo(containerFinish.getDiagnosticsInfo()); + data.setLogURL(containerFinish.getLogURL()); + data.setContainerExitStatus(containerFinish.getContainerExitStatus()); + data.setContainerState(containerFinish.getContainerState()); + } + + private ConcurrentMap getSubMap( + ApplicationAttemptId appAttemptId) { + containerData.putIfAbsent(appAttemptId, + new ConcurrentHashMap()); + return containerData.get(appAttemptId); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/NullApplicationHistoryStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/NullApplicationHistoryStore.java new file mode 100644 index 00000000000..3660c10befd --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/NullApplicationHistoryStore.java @@ -0,0 +1,127 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptFinishData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptHistoryData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptStartData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationFinishData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationHistoryData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationStartData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerFinishData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerHistoryData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerStartData; + +/** + * Dummy implementation of {@link ApplicationHistoryStore}. If this + * implementation is used, no history data will be persisted. + * + */ +@Unstable +@Private +public class NullApplicationHistoryStore extends AbstractService implements + ApplicationHistoryStore { + + public NullApplicationHistoryStore() { + super(NullApplicationHistoryStore.class.getName()); + } + + @Override + public void applicationStarted(ApplicationStartData appStart) + throws IOException { + } + + @Override + public void applicationFinished(ApplicationFinishData appFinish) + throws IOException { + } + + @Override + public void applicationAttemptStarted( + ApplicationAttemptStartData appAttemptStart) throws IOException { + } + + @Override + public void applicationAttemptFinished( + ApplicationAttemptFinishData appAttemptFinish) throws IOException { + } + + @Override + public void containerStarted(ContainerStartData containerStart) + throws IOException { + } + + @Override + public void containerFinished(ContainerFinishData containerFinish) + throws IOException { + } + + @Override + public ApplicationHistoryData getApplication(ApplicationId appId) + throws IOException { + return null; + } + + @Override + public Map getAllApplications() + throws IOException { + return Collections.emptyMap(); + } + + @Override + public Map + getApplicationAttempts(ApplicationId appId) throws IOException { + return Collections.emptyMap(); + } + + @Override + public ApplicationAttemptHistoryData getApplicationAttempt( + ApplicationAttemptId appAttemptId) throws IOException { + return null; + } + + @Override + public ContainerHistoryData getContainer(ContainerId containerId) + throws IOException { + return null; + } + + @Override + public ContainerHistoryData getAMContainer(ApplicationAttemptId appAttemptId) + throws IOException { + return null; + } + + @Override + public Map getContainers( + ApplicationAttemptId appAttemptId) throws IOException { + return Collections.emptyMap(); + } + +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptFinishData.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptFinishData.java new file mode 100644 index 00000000000..7ba51af800a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptFinishData.java @@ -0,0 +1,95 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice.records; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; +import org.apache.hadoop.yarn.util.Records; + +/** + * The class contains the fields that can be determined when + * RMAppAttempt finishes, and that need to be stored persistently. + */ +@Public +@Unstable +public abstract class ApplicationAttemptFinishData { + + @Public + @Unstable + public static ApplicationAttemptFinishData newInstance( + ApplicationAttemptId appAttemptId, String diagnosticsInfo, + String trackingURL, FinalApplicationStatus finalApplicationStatus, + YarnApplicationAttemptState yarnApplicationAttemptState) { + ApplicationAttemptFinishData appAttemptFD = + Records.newRecord(ApplicationAttemptFinishData.class); + appAttemptFD.setApplicationAttemptId(appAttemptId); + appAttemptFD.setDiagnosticsInfo(diagnosticsInfo); + appAttemptFD.setTrackingURL(trackingURL); + appAttemptFD.setFinalApplicationStatus(finalApplicationStatus); + appAttemptFD.setYarnApplicationAttemptState(yarnApplicationAttemptState); + return appAttemptFD; + } + + @Public + @Unstable + public abstract ApplicationAttemptId getApplicationAttemptId(); + + @Public + @Unstable + public abstract void setApplicationAttemptId( + ApplicationAttemptId applicationAttemptId); + + @Public + @Unstable + public abstract String getTrackingURL(); + + @Public + @Unstable + public abstract void setTrackingURL(String trackingURL); + + @Public + @Unstable + public abstract String getDiagnosticsInfo(); + + @Public + @Unstable + public abstract void setDiagnosticsInfo(String diagnosticsInfo); + + @Public + @Unstable + public abstract FinalApplicationStatus getFinalApplicationStatus(); + + @Public + @Unstable + public abstract void setFinalApplicationStatus( + FinalApplicationStatus finalApplicationStatus); + + @Public + @Unstable + public abstract YarnApplicationAttemptState getYarnApplicationAttemptState(); + + @Public + @Unstable + public abstract void setYarnApplicationAttemptState( + YarnApplicationAttemptState yarnApplicationAttemptState); + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptHistoryData.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptHistoryData.java new file mode 100644 index 00000000000..b759ab18035 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptHistoryData.java @@ -0,0 +1,171 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice.records; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; + +/** + * The class contains all the fields that are stored persistently for + * RMAppAttempt. + */ +@Public +@Unstable +public class ApplicationAttemptHistoryData { + + private ApplicationAttemptId applicationAttemptId; + + private String host; + + private int rpcPort; + + private String trackingURL; + + private String diagnosticsInfo; + + private FinalApplicationStatus finalApplicationStatus; + + private ContainerId masterContainerId; + + private YarnApplicationAttemptState yarnApplicationAttemptState; + + @Public + @Unstable + public static ApplicationAttemptHistoryData newInstance( + ApplicationAttemptId appAttemptId, String host, int rpcPort, + ContainerId masterContainerId, String diagnosticsInfo, + String trackingURL, FinalApplicationStatus finalApplicationStatus, + YarnApplicationAttemptState yarnApplicationAttemptState) { + ApplicationAttemptHistoryData appAttemptHD = + new ApplicationAttemptHistoryData(); + appAttemptHD.setApplicationAttemptId(appAttemptId); + appAttemptHD.setHost(host); + appAttemptHD.setRPCPort(rpcPort); + appAttemptHD.setMasterContainerId(masterContainerId); + appAttemptHD.setDiagnosticsInfo(diagnosticsInfo); + appAttemptHD.setTrackingURL(trackingURL); + appAttemptHD.setFinalApplicationStatus(finalApplicationStatus); + appAttemptHD.setYarnApplicationAttemptState(yarnApplicationAttemptState); + return appAttemptHD; + } + + @Public + @Unstable + public ApplicationAttemptId getApplicationAttemptId() { + return applicationAttemptId; + } + + @Public + @Unstable + public void + setApplicationAttemptId(ApplicationAttemptId applicationAttemptId) { + this.applicationAttemptId = applicationAttemptId; + } + + @Public + @Unstable + public String getHost() { + return host; + } + + @Public + @Unstable + public void setHost(String host) { + this.host = host; + } + + @Public + @Unstable + public int getRPCPort() { + return rpcPort; + } + + @Public + @Unstable + public void setRPCPort(int rpcPort) { + this.rpcPort = rpcPort; + } + + @Public + @Unstable + public String getTrackingURL() { + return trackingURL; + } + + @Public + @Unstable + public void setTrackingURL(String trackingURL) { + this.trackingURL = trackingURL; + } + + @Public + @Unstable + public String getDiagnosticsInfo() { + return diagnosticsInfo; + } + + @Public + @Unstable + public void setDiagnosticsInfo(String diagnosticsInfo) { + this.diagnosticsInfo = diagnosticsInfo; + } + + @Public + @Unstable + public FinalApplicationStatus getFinalApplicationStatus() { + return finalApplicationStatus; + } + + @Public + @Unstable + public void setFinalApplicationStatus( + FinalApplicationStatus finalApplicationStatus) { + this.finalApplicationStatus = finalApplicationStatus; + } + + @Public + @Unstable + public ContainerId getMasterContainerId() { + return masterContainerId; + } + + @Public + @Unstable + public void setMasterContainerId(ContainerId masterContainerId) { + this.masterContainerId = masterContainerId; + } + + @Public + @Unstable + public YarnApplicationAttemptState getYarnApplicationAttemptState() { + return yarnApplicationAttemptState; + } + + @Public + @Unstable + public void setYarnApplicationAttemptState( + YarnApplicationAttemptState yarnApplicationAttemptState) { + this.yarnApplicationAttemptState = yarnApplicationAttemptState; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptStartData.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptStartData.java new file mode 100644 index 00000000000..7ca43fa8ea6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptStartData.java @@ -0,0 +1,82 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice.records; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.util.Records; + +/** + * The class contains the fields that can be determined when + * RMAppAttempt starts, and that need to be stored persistently. + */ +@Public +@Unstable +public abstract class ApplicationAttemptStartData { + + @Public + @Unstable + public static ApplicationAttemptStartData newInstance( + ApplicationAttemptId appAttemptId, String host, int rpcPort, + ContainerId masterContainerId) { + ApplicationAttemptStartData appAttemptSD = + Records.newRecord(ApplicationAttemptStartData.class); + appAttemptSD.setApplicationAttemptId(appAttemptId); + appAttemptSD.setHost(host); + appAttemptSD.setRPCPort(rpcPort); + appAttemptSD.setMasterContainerId(masterContainerId); + return appAttemptSD; + } + + @Public + @Unstable + public abstract ApplicationAttemptId getApplicationAttemptId(); + + @Public + @Unstable + public abstract void setApplicationAttemptId( + ApplicationAttemptId applicationAttemptId); + + @Public + @Unstable + public abstract String getHost(); + + @Public + @Unstable + public abstract void setHost(String host); + + @Public + @Unstable + public abstract int getRPCPort(); + + @Public + @Unstable + public abstract void setRPCPort(int rpcPort); + + @Public + @Unstable + public abstract ContainerId getMasterContainerId(); + + @Public + @Unstable + public abstract void setMasterContainerId(ContainerId masterContainerId); + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationFinishData.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationFinishData.java new file mode 100644 index 00000000000..997fa6cbfd8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationFinishData.java @@ -0,0 +1,94 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice.records; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.util.Records; + +/** + * The class contains the fields that can be determined when RMApp + * finishes, and that need to be stored persistently. + */ +@Public +@Unstable +public abstract class ApplicationFinishData { + + @Public + @Unstable + public static ApplicationFinishData newInstance(ApplicationId applicationId, + long finishTime, String diagnosticsInfo, + FinalApplicationStatus finalApplicationStatus, + YarnApplicationState yarnApplicationState) { + ApplicationFinishData appFD = + Records.newRecord(ApplicationFinishData.class); + appFD.setApplicationId(applicationId); + appFD.setFinishTime(finishTime); + appFD.setDiagnosticsInfo(diagnosticsInfo); + appFD.setFinalApplicationStatus(finalApplicationStatus); + appFD.setYarnApplicationState(yarnApplicationState); + return appFD; + } + + @Public + @Unstable + public abstract ApplicationId getApplicationId(); + + @Public + @Unstable + public abstract void setApplicationId(ApplicationId applicationId); + + @Public + @Unstable + public abstract long getFinishTime(); + + @Public + @Unstable + public abstract void setFinishTime(long finishTime); + + @Public + @Unstable + public abstract String getDiagnosticsInfo(); + + @Public + @Unstable + public abstract void setDiagnosticsInfo(String diagnosticsInfo); + + @Public + @Unstable + public abstract FinalApplicationStatus getFinalApplicationStatus(); + + @Public + @Unstable + public abstract void setFinalApplicationStatus( + FinalApplicationStatus finalApplicationStatus); + + @Public + @Unstable + public abstract YarnApplicationState getYarnApplicationState(); + + @Public + @Unstable + public abstract void setYarnApplicationState( + YarnApplicationState yarnApplicationState); + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationHistoryData.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationHistoryData.java new file mode 100644 index 00000000000..b7d16f3c412 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationHistoryData.java @@ -0,0 +1,213 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice.records; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; + +/** + * The class contains all the fields that are stored persistently for + * RMApp. + */ +@Public +@Unstable +public class ApplicationHistoryData { + + private ApplicationId applicationId; + + private String applicationName; + + private String applicationType; + + private String user; + + private String queue; + + private long submitTime; + + private long startTime; + + private long finishTime; + + private String diagnosticsInfo; + + private FinalApplicationStatus finalApplicationStatus; + + private YarnApplicationState yarnApplicationState; + + @Public + @Unstable + public static ApplicationHistoryData newInstance(ApplicationId applicationId, + String applicationName, String applicationType, String queue, + String user, long submitTime, long startTime, long finishTime, + String diagnosticsInfo, FinalApplicationStatus finalApplicationStatus, + YarnApplicationState yarnApplicationState) { + ApplicationHistoryData appHD = new ApplicationHistoryData(); + appHD.setApplicationId(applicationId); + appHD.setApplicationName(applicationName); + appHD.setApplicationType(applicationType); + appHD.setQueue(queue); + appHD.setUser(user); + appHD.setSubmitTime(submitTime); + appHD.setStartTime(startTime); + appHD.setFinishTime(finishTime); + appHD.setDiagnosticsInfo(diagnosticsInfo); + appHD.setFinalApplicationStatus(finalApplicationStatus); + appHD.setYarnApplicationState(yarnApplicationState); + return appHD; + } + + @Public + @Unstable + public ApplicationId getApplicationId() { + return applicationId; + } + + @Public + @Unstable + public void setApplicationId(ApplicationId applicationId) { + this.applicationId = applicationId; + } + + @Public + @Unstable + public String getApplicationName() { + return applicationName; + } + + @Public + @Unstable + public void setApplicationName(String applicationName) { + this.applicationName = applicationName; + } + + @Public + @Unstable + public String getApplicationType() { + return applicationType; + } + + @Public + @Unstable + public void setApplicationType(String applicationType) { + this.applicationType = applicationType; + } + + @Public + @Unstable + public String getUser() { + return user; + } + + @Public + @Unstable + public void setUser(String user) { + this.user = user; + } + + @Public + @Unstable + public String getQueue() { + return queue; + } + + @Public + @Unstable + public void setQueue(String queue) { + this.queue = queue; + } + + @Public + @Unstable + public long getSubmitTime() { + return submitTime; + } + + @Public + @Unstable + public void setSubmitTime(long submitTime) { + this.submitTime = submitTime; + } + + @Public + @Unstable + public long getStartTime() { + return startTime; + } + + @Public + @Unstable + public void setStartTime(long startTime) { + this.startTime = startTime; + } + + @Public + @Unstable + public long getFinishTime() { + return finishTime; + } + + @Public + @Unstable + public void setFinishTime(long finishTime) { + this.finishTime = finishTime; + } + + @Public + @Unstable + public String getDiagnosticsInfo() { + return diagnosticsInfo; + } + + @Public + @Unstable + public void setDiagnosticsInfo(String diagnosticsInfo) { + this.diagnosticsInfo = diagnosticsInfo; + } + + @Public + @Unstable + public FinalApplicationStatus getFinalApplicationStatus() { + return finalApplicationStatus; + } + + @Public + @Unstable + public void setFinalApplicationStatus( + FinalApplicationStatus finalApplicationStatus) { + this.finalApplicationStatus = finalApplicationStatus; + } + + @Public + @Unstable + public YarnApplicationState getYarnApplicationState() { + return this.yarnApplicationState; + } + + @Public + @Unstable + public void + setYarnApplicationState(YarnApplicationState yarnApplicationState) { + this.yarnApplicationState = yarnApplicationState; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationStartData.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationStartData.java new file mode 100644 index 00000000000..6bc13239203 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationStartData.java @@ -0,0 +1,106 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice.records; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.util.Records; + +/** + * The class contains the fields that can be determined when RMApp + * starts, and that need to be stored persistently. + */ +@Public +@Unstable +public abstract class ApplicationStartData { + + @Public + @Unstable + public static ApplicationStartData newInstance(ApplicationId applicationId, + String applicationName, String applicationType, String queue, + String user, long submitTime, long startTime) { + ApplicationStartData appSD = Records.newRecord(ApplicationStartData.class); + appSD.setApplicationId(applicationId); + appSD.setApplicationName(applicationName); + appSD.setApplicationType(applicationType); + appSD.setQueue(queue); + appSD.setUser(user); + appSD.setSubmitTime(submitTime); + appSD.setStartTime(startTime); + return appSD; + } + + @Public + @Unstable + public abstract ApplicationId getApplicationId(); + + @Public + @Unstable + public abstract void setApplicationId(ApplicationId applicationId); + + @Public + @Unstable + public abstract String getApplicationName(); + + @Public + @Unstable + public abstract void setApplicationName(String applicationName); + + @Public + @Unstable + public abstract String getApplicationType(); + + @Public + @Unstable + public abstract void setApplicationType(String applicationType); + + @Public + @Unstable + public abstract String getUser(); + + @Public + @Unstable + public abstract void setUser(String user); + + @Public + @Unstable + public abstract String getQueue(); + + @Public + @Unstable + public abstract void setQueue(String queue); + + @Public + @Unstable + public abstract long getSubmitTime(); + + @Public + @Unstable + public abstract void setSubmitTime(long submitTime); + + @Public + @Unstable + public abstract long getStartTime(); + + @Public + @Unstable + public abstract void setStartTime(long startTime); + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerFinishData.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerFinishData.java new file mode 100644 index 00000000000..1c449e3f6b0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerFinishData.java @@ -0,0 +1,99 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice.records; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerState; +import org.apache.hadoop.yarn.util.Records; + +/** + * The class contains the fields that can be determined when + * RMContainer finishes, and that need to be stored persistently. + */ +@Public +@Unstable +public abstract class ContainerFinishData { + + @Public + @Unstable + public static ContainerFinishData newInstance(ContainerId containerId, + long finishTime, String diagnosticsInfo, String logURL, + int containerExitCode, ContainerState containerState) { + ContainerFinishData containerFD = + Records.newRecord(ContainerFinishData.class); + containerFD.setContainerId(containerId); + containerFD.setFinishTime(finishTime); + containerFD.setDiagnosticsInfo(diagnosticsInfo); + containerFD.setLogURL(logURL); + containerFD.setContainerExitStatus(containerExitCode); + containerFD.setContainerState(containerState); + return containerFD; + } + + @Public + @Unstable + public abstract ContainerId getContainerId(); + + @Public + @Unstable + public abstract void setContainerId(ContainerId containerId); + + @Public + @Unstable + public abstract long getFinishTime(); + + @Public + @Unstable + public abstract void setFinishTime(long finishTime); + + @Public + @Unstable + public abstract String getDiagnosticsInfo(); + + @Public + @Unstable + public abstract void setDiagnosticsInfo(String diagnosticsInfo); + + @Public + @Unstable + public abstract String getLogURL(); + + @Public + @Unstable + public abstract void setLogURL(String logURL); + + @Public + @Unstable + public abstract int getContainerExitStatus(); + + @Public + @Unstable + public abstract void setContainerExitStatus(int containerExitStatus); + + @Public + @Unstable + public abstract ContainerState getContainerState(); + + @Public + @Unstable + public abstract void setContainerState(ContainerState containerState); + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerHistoryData.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerHistoryData.java new file mode 100644 index 00000000000..f7660b21930 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerHistoryData.java @@ -0,0 +1,197 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice.records; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerState; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.Resource; + +/** + * The class contains all the fields that are stored persistently for + * RMContainer. + */ +@Public +@Unstable +public class ContainerHistoryData { + + private ContainerId containerId; + + private Resource allocatedResource; + + private NodeId assignedNode; + + private Priority priority; + + private long startTime; + + private long finishTime; + + private String diagnosticsInfo; + + private String logURL; + + private int containerExitStatus; + + private ContainerState containerState; + + @Public + @Unstable + public static ContainerHistoryData newInstance(ContainerId containerId, + Resource allocatedResource, NodeId assignedNode, Priority priority, + long startTime, long finishTime, String diagnosticsInfo, String logURL, + int containerExitCode, ContainerState containerState) { + ContainerHistoryData containerHD = new ContainerHistoryData(); + containerHD.setContainerId(containerId); + containerHD.setAllocatedResource(allocatedResource); + containerHD.setAssignedNode(assignedNode); + containerHD.setPriority(priority); + containerHD.setStartTime(startTime); + containerHD.setFinishTime(finishTime); + containerHD.setDiagnosticsInfo(diagnosticsInfo); + containerHD.setLogURL(logURL); + containerHD.setContainerExitStatus(containerExitCode); + containerHD.setContainerState(containerState); + return containerHD; + } + + @Public + @Unstable + public ContainerId getContainerId() { + return containerId; + } + + @Public + @Unstable + public void setContainerId(ContainerId containerId) { + this.containerId = containerId; + } + + @Public + @Unstable + public Resource getAllocatedResource() { + return allocatedResource; + } + + @Public + @Unstable + public void setAllocatedResource(Resource resource) { + this.allocatedResource = resource; + } + + @Public + @Unstable + public NodeId getAssignedNode() { + return assignedNode; + } + + @Public + @Unstable + public void setAssignedNode(NodeId nodeId) { + this.assignedNode = nodeId; + } + + @Public + @Unstable + public Priority getPriority() { + return priority; + } + + @Public + @Unstable + public void setPriority(Priority priority) { + this.priority = priority; + } + + @Public + @Unstable + public long getStartTime() { + return startTime; + } + + @Public + @Unstable + public void setStartTime(long startTime) { + this.startTime = startTime; + } + + @Public + @Unstable + public long getFinishTime() { + return finishTime; + } + + @Public + @Unstable + public void setFinishTime(long finishTime) { + this.finishTime = finishTime; + } + + @Public + @Unstable + public String getDiagnosticsInfo() { + return diagnosticsInfo; + } + + @Public + @Unstable + public void setDiagnosticsInfo(String diagnosticsInfo) { + this.diagnosticsInfo = diagnosticsInfo; + } + + @Public + @Unstable + public String getLogURL() { + return logURL; + } + + @Public + @Unstable + public void setLogURL(String logURL) { + this.logURL = logURL; + } + + @Public + @Unstable + public int getContainerExitStatus() { + return containerExitStatus; + } + + @Public + @Unstable + public void setContainerExitStatus(int containerExitStatus) { + this.containerExitStatus = containerExitStatus; + } + + @Public + @Unstable + public ContainerState getContainerState() { + return containerState; + } + + @Public + @Unstable + public void setContainerState(ContainerState containerState) { + this.containerState = containerState; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerStartData.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerStartData.java new file mode 100644 index 00000000000..0c6dd81a63f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerStartData.java @@ -0,0 +1,92 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice.records; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.util.Records; + +/** + * The class contains the fields that can be determined when + * RMContainer starts, and that need to be stored persistently. + */ +@Public +@Unstable +public abstract class ContainerStartData { + + @Public + @Unstable + public static ContainerStartData newInstance(ContainerId containerId, + Resource allocatedResource, NodeId assignedNode, Priority priority, + long startTime) { + ContainerStartData containerSD = + Records.newRecord(ContainerStartData.class); + containerSD.setContainerId(containerId); + containerSD.setAllocatedResource(allocatedResource); + containerSD.setAssignedNode(assignedNode); + containerSD.setPriority(priority); + containerSD.setStartTime(startTime); + return containerSD; + } + + @Public + @Unstable + public abstract ContainerId getContainerId(); + + @Public + @Unstable + public abstract void setContainerId(ContainerId containerId); + + @Public + @Unstable + public abstract Resource getAllocatedResource(); + + @Public + @Unstable + public abstract void setAllocatedResource(Resource resource); + + @Public + @Unstable + public abstract NodeId getAssignedNode(); + + @Public + @Unstable + public abstract void setAssignedNode(NodeId nodeId); + + @Public + @Unstable + public abstract Priority getPriority(); + + @Public + @Unstable + public abstract void setPriority(Priority priority); + + @Public + @Unstable + public abstract long getStartTime(); + + @Public + @Unstable + public abstract void setStartTime(long startTime); + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationAttemptFinishDataPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationAttemptFinishDataPBImpl.java new file mode 100644 index 00000000000..945c12f1ca5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationAttemptFinishDataPBImpl.java @@ -0,0 +1,239 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb; + +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; +import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils; +import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationAttemptFinishDataProto; +import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationAttemptFinishDataProtoOrBuilder; +import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto; +import org.apache.hadoop.yarn.proto.YarnProtos.FinalApplicationStatusProto; +import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationAttemptStateProto; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptFinishData; + +import com.google.protobuf.TextFormat; + +public class ApplicationAttemptFinishDataPBImpl extends + ApplicationAttemptFinishData { + + ApplicationAttemptFinishDataProto proto = ApplicationAttemptFinishDataProto + .getDefaultInstance(); + ApplicationAttemptFinishDataProto.Builder builder = null; + boolean viaProto = false; + + public ApplicationAttemptFinishDataPBImpl() { + builder = ApplicationAttemptFinishDataProto.newBuilder(); + } + + public ApplicationAttemptFinishDataPBImpl( + ApplicationAttemptFinishDataProto proto) { + this.proto = proto; + viaProto = true; + } + + private ApplicationAttemptId applicationAttemptId; + + @Override + public ApplicationAttemptId getApplicationAttemptId() { + if (this.applicationAttemptId != null) { + return this.applicationAttemptId; + } + ApplicationAttemptFinishDataProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasApplicationAttemptId()) { + return null; + } + this.applicationAttemptId = + convertFromProtoFormat(p.getApplicationAttemptId()); + return this.applicationAttemptId; + } + + @Override + public void + setApplicationAttemptId(ApplicationAttemptId applicationAttemptId) { + maybeInitBuilder(); + if (applicationAttemptId == null) { + builder.clearApplicationAttemptId(); + } + this.applicationAttemptId = applicationAttemptId; + } + + @Override + public String getTrackingURL() { + ApplicationAttemptFinishDataProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasTrackingUrl()) { + return null; + } + return p.getTrackingUrl(); + } + + @Override + public void setTrackingURL(String trackingURL) { + maybeInitBuilder(); + if (trackingURL == null) { + builder.clearTrackingUrl(); + return; + } + builder.setTrackingUrl(trackingURL); + } + + @Override + public String getDiagnosticsInfo() { + ApplicationAttemptFinishDataProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasDiagnosticsInfo()) { + return null; + } + return p.getDiagnosticsInfo(); + } + + @Override + public void setDiagnosticsInfo(String diagnosticsInfo) { + maybeInitBuilder(); + if (diagnosticsInfo == null) { + builder.clearDiagnosticsInfo(); + return; + } + builder.setDiagnosticsInfo(diagnosticsInfo); + } + + @Override + public FinalApplicationStatus getFinalApplicationStatus() { + ApplicationAttemptFinishDataProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasFinalApplicationStatus()) { + return null; + } + return convertFromProtoFormat(p.getFinalApplicationStatus()); + } + + @Override + public void setFinalApplicationStatus( + FinalApplicationStatus finalApplicationStatus) { + maybeInitBuilder(); + if (finalApplicationStatus == null) { + builder.clearFinalApplicationStatus(); + return; + } + builder + .setFinalApplicationStatus(convertToProtoFormat(finalApplicationStatus)); + } + + @Override + public YarnApplicationAttemptState getYarnApplicationAttemptState() { + ApplicationAttemptFinishDataProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasYarnApplicationAttemptState()) { + return null; + } + return convertFromProtoFormat(p.getYarnApplicationAttemptState()); + } + + @Override + public void setYarnApplicationAttemptState(YarnApplicationAttemptState state) { + maybeInitBuilder(); + if (state == null) { + builder.clearYarnApplicationAttemptState(); + return; + } + builder.setYarnApplicationAttemptState(convertToProtoFormat(state)); + } + + public ApplicationAttemptFinishDataProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + private void mergeLocalToBuilder() { + if (this.applicationAttemptId != null + && !((ApplicationAttemptIdPBImpl) this.applicationAttemptId).getProto() + .equals(builder.getApplicationAttemptId())) { + builder + .setApplicationAttemptId(convertToProtoFormat(this.applicationAttemptId)); + } + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = ApplicationAttemptFinishDataProto.newBuilder(proto); + } + viaProto = false; + } + + private ApplicationAttemptIdPBImpl convertFromProtoFormat( + ApplicationAttemptIdProto applicationAttemptId) { + return new ApplicationAttemptIdPBImpl(applicationAttemptId); + } + + private ApplicationAttemptIdProto convertToProtoFormat( + ApplicationAttemptId applicationAttemptId) { + return ((ApplicationAttemptIdPBImpl) applicationAttemptId).getProto(); + } + + private FinalApplicationStatus convertFromProtoFormat( + FinalApplicationStatusProto finalApplicationStatus) { + return ProtoUtils.convertFromProtoFormat(finalApplicationStatus); + } + + private FinalApplicationStatusProto convertToProtoFormat( + FinalApplicationStatus finalApplicationStatus) { + return ProtoUtils.convertToProtoFormat(finalApplicationStatus); + } + + private YarnApplicationAttemptStateProto convertToProtoFormat( + YarnApplicationAttemptState state) { + return ProtoUtils.convertToProtoFormat(state); + } + + private YarnApplicationAttemptState convertFromProtoFormat( + YarnApplicationAttemptStateProto yarnApplicationAttemptState) { + return ProtoUtils.convertFromProtoFormat(yarnApplicationAttemptState); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationAttemptStartDataPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationAttemptStartDataPBImpl.java new file mode 100644 index 00000000000..1f67fc7f061 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationAttemptStartDataPBImpl.java @@ -0,0 +1,208 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb; + +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl; +import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationAttemptStartDataProto; +import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationAttemptStartDataProtoOrBuilder; +import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptStartData; + +import com.google.protobuf.TextFormat; + +public class ApplicationAttemptStartDataPBImpl extends + ApplicationAttemptStartData { + + ApplicationAttemptStartDataProto proto = ApplicationAttemptStartDataProto + .getDefaultInstance(); + ApplicationAttemptStartDataProto.Builder builder = null; + boolean viaProto = false; + + public ApplicationAttemptStartDataPBImpl() { + builder = ApplicationAttemptStartDataProto.newBuilder(); + } + + public ApplicationAttemptStartDataPBImpl( + ApplicationAttemptStartDataProto proto) { + this.proto = proto; + viaProto = true; + } + + private ApplicationAttemptId applicationAttemptId; + private ContainerId masterContainerId; + + @Override + public ApplicationAttemptId getApplicationAttemptId() { + if (this.applicationAttemptId != null) { + return this.applicationAttemptId; + } + ApplicationAttemptStartDataProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasApplicationAttemptId()) { + return null; + } + this.applicationAttemptId = + convertFromProtoFormat(p.getApplicationAttemptId()); + return this.applicationAttemptId; + } + + @Override + public void + setApplicationAttemptId(ApplicationAttemptId applicationAttemptId) { + maybeInitBuilder(); + if (applicationAttemptId == null) { + builder.clearApplicationAttemptId(); + } + this.applicationAttemptId = applicationAttemptId; + } + + @Override + public String getHost() { + ApplicationAttemptStartDataProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasHost()) { + return null; + } + return p.getHost(); + } + + @Override + public void setHost(String host) { + maybeInitBuilder(); + if (host == null) { + builder.clearHost(); + return; + } + builder.setHost(host); + } + + @Override + public int getRPCPort() { + ApplicationAttemptStartDataProtoOrBuilder p = viaProto ? proto : builder; + return p.getRpcPort(); + } + + @Override + public void setRPCPort(int rpcPort) { + maybeInitBuilder(); + builder.setRpcPort(rpcPort); + } + + @Override + public ContainerId getMasterContainerId() { + if (this.masterContainerId != null) { + return this.masterContainerId; + } + ApplicationAttemptStartDataProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasApplicationAttemptId()) { + return null; + } + this.masterContainerId = convertFromProtoFormat(p.getMasterContainerId()); + return this.masterContainerId; + } + + @Override + public void setMasterContainerId(ContainerId masterContainerId) { + maybeInitBuilder(); + if (masterContainerId == null) { + builder.clearMasterContainerId(); + } + this.masterContainerId = masterContainerId; + } + + public ApplicationAttemptStartDataProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + private void mergeLocalToBuilder() { + if (this.applicationAttemptId != null + && !((ApplicationAttemptIdPBImpl) this.applicationAttemptId).getProto() + .equals(builder.getApplicationAttemptId())) { + builder + .setApplicationAttemptId(convertToProtoFormat(this.applicationAttemptId)); + } + if (this.masterContainerId != null + && !((ContainerIdPBImpl) this.masterContainerId).getProto().equals( + builder.getMasterContainerId())) { + builder + .setMasterContainerId(convertToProtoFormat(this.masterContainerId)); + } + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = ApplicationAttemptStartDataProto.newBuilder(proto); + } + viaProto = false; + } + + private ApplicationAttemptIdPBImpl convertFromProtoFormat( + ApplicationAttemptIdProto applicationAttemptId) { + return new ApplicationAttemptIdPBImpl(applicationAttemptId); + } + + private ApplicationAttemptIdProto convertToProtoFormat( + ApplicationAttemptId applicationAttemptId) { + return ((ApplicationAttemptIdPBImpl) applicationAttemptId).getProto(); + } + + private ContainerIdPBImpl + convertFromProtoFormat(ContainerIdProto containerId) { + return new ContainerIdPBImpl(containerId); + } + + private ContainerIdProto convertToProtoFormat(ContainerId masterContainerId) { + return ((ContainerIdPBImpl) masterContainerId).getProto(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationFinishDataPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationFinishDataPBImpl.java new file mode 100644 index 00000000000..337426d0284 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationFinishDataPBImpl.java @@ -0,0 +1,226 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb; + +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils; +import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationFinishDataProto; +import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationFinishDataProtoOrBuilder; +import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; +import org.apache.hadoop.yarn.proto.YarnProtos.FinalApplicationStatusProto; +import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationStateProto; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationFinishData; + +import com.google.protobuf.TextFormat; + +public class ApplicationFinishDataPBImpl extends ApplicationFinishData { + + ApplicationFinishDataProto proto = ApplicationFinishDataProto + .getDefaultInstance(); + ApplicationFinishDataProto.Builder builder = null; + boolean viaProto = false; + + private ApplicationId applicationId; + + public ApplicationFinishDataPBImpl() { + builder = ApplicationFinishDataProto.newBuilder(); + } + + public ApplicationFinishDataPBImpl(ApplicationFinishDataProto proto) { + this.proto = proto; + viaProto = true; + } + + @Override + public ApplicationId getApplicationId() { + if (this.applicationId != null) { + return this.applicationId; + } + ApplicationFinishDataProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasApplicationId()) { + return null; + } + this.applicationId = convertFromProtoFormat(p.getApplicationId()); + return this.applicationId; + } + + @Override + public void setApplicationId(ApplicationId applicationId) { + maybeInitBuilder(); + if (applicationId == null) { + builder.clearApplicationId(); + } + this.applicationId = applicationId; + } + + @Override + public long getFinishTime() { + ApplicationFinishDataProtoOrBuilder p = viaProto ? proto : builder; + return p.getFinishTime(); + } + + @Override + public void setFinishTime(long finishTime) { + maybeInitBuilder(); + builder.setFinishTime(finishTime); + } + + @Override + public String getDiagnosticsInfo() { + ApplicationFinishDataProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasDiagnosticsInfo()) { + return null; + } + return p.getDiagnosticsInfo(); + } + + @Override + public void setDiagnosticsInfo(String diagnosticsInfo) { + maybeInitBuilder(); + if (diagnosticsInfo == null) { + builder.clearDiagnosticsInfo(); + return; + } + builder.setDiagnosticsInfo(diagnosticsInfo); + } + + @Override + public FinalApplicationStatus getFinalApplicationStatus() { + ApplicationFinishDataProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasFinalApplicationStatus()) { + return null; + } + return convertFromProtoFormat(p.getFinalApplicationStatus()); + } + + @Override + public void setFinalApplicationStatus( + FinalApplicationStatus finalApplicationStatus) { + maybeInitBuilder(); + if (finalApplicationStatus == null) { + builder.clearFinalApplicationStatus(); + return; + } + builder + .setFinalApplicationStatus(convertToProtoFormat(finalApplicationStatus)); + } + + @Override + public YarnApplicationState getYarnApplicationState() { + ApplicationFinishDataProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasYarnApplicationState()) { + return null; + } + return convertFromProtoFormat(p.getYarnApplicationState()); + } + + @Override + public void setYarnApplicationState(YarnApplicationState state) { + maybeInitBuilder(); + if (state == null) { + builder.clearYarnApplicationState(); + return; + } + builder.setYarnApplicationState(convertToProtoFormat(state)); + } + + public ApplicationFinishDataProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + private void mergeLocalToBuilder() { + if (this.applicationId != null + && !((ApplicationIdPBImpl) this.applicationId).getProto().equals( + builder.getApplicationId())) { + builder.setApplicationId(convertToProtoFormat(this.applicationId)); + } + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = ApplicationFinishDataProto.newBuilder(proto); + } + viaProto = false; + } + + private ApplicationIdProto convertToProtoFormat(ApplicationId applicationId) { + return ((ApplicationIdPBImpl) applicationId).getProto(); + } + + private ApplicationIdPBImpl convertFromProtoFormat( + ApplicationIdProto applicationId) { + return new ApplicationIdPBImpl(applicationId); + } + + private FinalApplicationStatus convertFromProtoFormat( + FinalApplicationStatusProto finalApplicationStatus) { + return ProtoUtils.convertFromProtoFormat(finalApplicationStatus); + } + + private FinalApplicationStatusProto convertToProtoFormat( + FinalApplicationStatus finalApplicationStatus) { + return ProtoUtils.convertToProtoFormat(finalApplicationStatus); + } + + private YarnApplicationStateProto convertToProtoFormat( + YarnApplicationState state) { + return ProtoUtils.convertToProtoFormat(state); + } + + private YarnApplicationState convertFromProtoFormat( + YarnApplicationStateProto yarnApplicationState) { + return ProtoUtils.convertFromProtoFormat(yarnApplicationState); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationStartDataPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationStartDataPBImpl.java new file mode 100644 index 00000000000..56f7aff26c0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationStartDataPBImpl.java @@ -0,0 +1,229 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb; + +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl; +import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationStartDataProto; +import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationStartDataProtoOrBuilder; +import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationStartData; + +import com.google.protobuf.TextFormat; + +public class ApplicationStartDataPBImpl extends ApplicationStartData { + + ApplicationStartDataProto proto = ApplicationStartDataProto + .getDefaultInstance(); + ApplicationStartDataProto.Builder builder = null; + boolean viaProto = false; + + private ApplicationId applicationId; + + public ApplicationStartDataPBImpl() { + builder = ApplicationStartDataProto.newBuilder(); + } + + public ApplicationStartDataPBImpl(ApplicationStartDataProto proto) { + this.proto = proto; + viaProto = true; + } + + @Override + public ApplicationId getApplicationId() { + if (this.applicationId != null) { + return this.applicationId; + } + ApplicationStartDataProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasApplicationId()) { + return null; + } + this.applicationId = convertFromProtoFormat(p.getApplicationId()); + return this.applicationId; + } + + @Override + public void setApplicationId(ApplicationId applicationId) { + maybeInitBuilder(); + if (applicationId == null) { + builder.clearApplicationId(); + } + this.applicationId = applicationId; + } + + @Override + public String getApplicationName() { + ApplicationStartDataProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasApplicationName()) { + return null; + } + return p.getApplicationName(); + } + + @Override + public void setApplicationName(String applicationName) { + maybeInitBuilder(); + if (applicationName == null) { + builder.clearApplicationName(); + return; + } + builder.setApplicationName(applicationName); + } + + @Override + public String getApplicationType() { + ApplicationStartDataProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasApplicationType()) { + return null; + } + return p.getApplicationType(); + } + + @Override + public void setApplicationType(String applicationType) { + maybeInitBuilder(); + if (applicationType == null) { + builder.clearApplicationType(); + return; + } + builder.setApplicationType(applicationType); + } + + @Override + public String getUser() { + ApplicationStartDataProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasUser()) { + return null; + } + return p.getUser(); + } + + @Override + public void setUser(String user) { + maybeInitBuilder(); + if (user == null) { + builder.clearUser(); + return; + } + builder.setUser(user); + } + + @Override + public String getQueue() { + ApplicationStartDataProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasQueue()) { + return null; + } + return p.getQueue(); + } + + @Override + public void setQueue(String queue) { + maybeInitBuilder(); + if (queue == null) { + builder.clearQueue(); + return; + } + builder.setQueue(queue); + } + + @Override + public long getSubmitTime() { + ApplicationStartDataProtoOrBuilder p = viaProto ? proto : builder; + return p.getSubmitTime(); + } + + @Override + public void setSubmitTime(long submitTime) { + maybeInitBuilder(); + builder.setSubmitTime(submitTime); + } + + @Override + public long getStartTime() { + ApplicationStartDataProtoOrBuilder p = viaProto ? proto : builder; + return p.getStartTime(); + } + + @Override + public void setStartTime(long startTime) { + maybeInitBuilder(); + builder.setStartTime(startTime); + } + + public ApplicationStartDataProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + private void mergeLocalToBuilder() { + if (this.applicationId != null + && !((ApplicationIdPBImpl) this.applicationId).getProto().equals( + builder.getApplicationId())) { + builder.setApplicationId(convertToProtoFormat(this.applicationId)); + } + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = ApplicationStartDataProto.newBuilder(proto); + } + viaProto = false; + } + + private ApplicationIdProto convertToProtoFormat(ApplicationId applicationId) { + return ((ApplicationIdPBImpl) applicationId).getProto(); + } + + private ApplicationIdPBImpl convertFromProtoFormat( + ApplicationIdProto applicationId) { + return new ApplicationIdPBImpl(applicationId); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ContainerFinishDataPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ContainerFinishDataPBImpl.java new file mode 100644 index 00000000000..225d53c322d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ContainerFinishDataPBImpl.java @@ -0,0 +1,223 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb; + +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerState; +import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils; +import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ContainerFinishDataProto; +import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ContainerFinishDataProtoOrBuilder; +import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStateProto; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerFinishData; + +import com.google.protobuf.TextFormat; + +public class ContainerFinishDataPBImpl extends ContainerFinishData { + + ContainerFinishDataProto proto = ContainerFinishDataProto + .getDefaultInstance(); + ContainerFinishDataProto.Builder builder = null; + boolean viaProto = false; + + private ContainerId containerId; + + public ContainerFinishDataPBImpl() { + builder = ContainerFinishDataProto.newBuilder(); + } + + public ContainerFinishDataPBImpl(ContainerFinishDataProto proto) { + this.proto = proto; + viaProto = true; + } + + @Override + public ContainerId getContainerId() { + if (this.containerId != null) { + return this.containerId; + } + ContainerFinishDataProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasContainerId()) { + return null; + } + this.containerId = convertFromProtoFormat(p.getContainerId()); + return this.containerId; + } + + @Override + public void setContainerId(ContainerId containerId) { + maybeInitBuilder(); + if (containerId == null) { + builder.clearContainerId(); + } + this.containerId = containerId; + } + + @Override + public long getFinishTime() { + ContainerFinishDataProtoOrBuilder p = viaProto ? proto : builder; + return p.getFinishTime(); + } + + @Override + public void setFinishTime(long finishTime) { + maybeInitBuilder(); + builder.setFinishTime(finishTime); + } + + @Override + public String getDiagnosticsInfo() { + ContainerFinishDataProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasDiagnosticsInfo()) { + return null; + } + return p.getDiagnosticsInfo(); + } + + @Override + public void setDiagnosticsInfo(String diagnosticsInfo) { + maybeInitBuilder(); + if (diagnosticsInfo == null) { + builder.clearDiagnosticsInfo(); + return; + } + builder.setDiagnosticsInfo(diagnosticsInfo); + } + + @Override + public String getLogURL() { + ContainerFinishDataProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasLogUrl()) { + return null; + } + return p.getLogUrl(); + } + + @Override + public void setLogURL(String logURL) { + maybeInitBuilder(); + if (logURL == null) { + builder.clearLogUrl(); + return; + } + builder.setLogUrl(logURL); + } + + @Override + public int getContainerExitStatus() { + ContainerFinishDataProtoOrBuilder p = viaProto ? proto : builder; + return p.getContainerExitStatus(); + } + + @Override + public ContainerState getContainerState() { + ContainerFinishDataProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasContainerState()) { + return null; + } + return convertFromProtoFormat(p.getContainerState()); + } + + @Override + public void setContainerState(ContainerState state) { + maybeInitBuilder(); + if (state == null) { + builder.clearContainerState(); + return; + } + builder.setContainerState(convertToProtoFormat(state)); + } + + @Override + public void setContainerExitStatus(int containerExitStatus) { + maybeInitBuilder(); + builder.setContainerExitStatus(containerExitStatus); + } + + public ContainerFinishDataProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + private void mergeLocalToBuilder() { + if (this.containerId != null + && !((ContainerIdPBImpl) this.containerId).getProto().equals( + builder.getContainerId())) { + builder.setContainerId(convertToProtoFormat(this.containerId)); + } + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = ContainerFinishDataProto.newBuilder(proto); + } + viaProto = false; + } + + private ContainerIdProto convertToProtoFormat(ContainerId containerId) { + return ((ContainerIdPBImpl) containerId).getProto(); + } + + private ContainerIdPBImpl + convertFromProtoFormat(ContainerIdProto containerId) { + return new ContainerIdPBImpl(containerId); + } + + private ContainerStateProto convertToProtoFormat(ContainerState state) { + return ProtoUtils.convertToProtoFormat(state); + } + + private ContainerState convertFromProtoFormat( + ContainerStateProto containerState) { + return ProtoUtils.convertFromProtoFormat(containerState); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ContainerStartDataPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ContainerStartDataPBImpl.java new file mode 100644 index 00000000000..6d248b2d9e8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ContainerStartDataPBImpl.java @@ -0,0 +1,258 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb; + +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.PriorityPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl; +import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ContainerStartDataProto; +import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ContainerStartDataProtoOrBuilder; +import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto; +import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto; +import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerStartData; + +import com.google.protobuf.TextFormat; + +public class ContainerStartDataPBImpl extends ContainerStartData { + + ContainerStartDataProto proto = ContainerStartDataProto.getDefaultInstance(); + ContainerStartDataProto.Builder builder = null; + boolean viaProto = false; + + private ContainerId containerId; + private Resource resource; + private NodeId nodeId; + private Priority priority; + + public ContainerStartDataPBImpl() { + builder = ContainerStartDataProto.newBuilder(); + } + + public ContainerStartDataPBImpl(ContainerStartDataProto proto) { + this.proto = proto; + viaProto = true; + } + + @Override + public ContainerId getContainerId() { + if (this.containerId != null) { + return this.containerId; + } + ContainerStartDataProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasContainerId()) { + return null; + } + this.containerId = convertFromProtoFormat(p.getContainerId()); + return this.containerId; + } + + @Override + public void setContainerId(ContainerId containerId) { + maybeInitBuilder(); + if (containerId == null) { + builder.clearContainerId(); + } + this.containerId = containerId; + } + + @Override + public Resource getAllocatedResource() { + if (this.resource != null) { + return this.resource; + } + ContainerStartDataProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasAllocatedResource()) { + return null; + } + this.resource = convertFromProtoFormat(p.getAllocatedResource()); + return this.resource; + } + + @Override + public void setAllocatedResource(Resource resource) { + maybeInitBuilder(); + if (resource == null) { + builder.clearAllocatedResource(); + } + this.resource = resource; + } + + @Override + public NodeId getAssignedNode() { + if (this.nodeId != null) { + return this.nodeId; + } + ContainerStartDataProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasAssignedNodeId()) { + return null; + } + this.nodeId = convertFromProtoFormat(p.getAssignedNodeId()); + return this.nodeId; + } + + @Override + public void setAssignedNode(NodeId nodeId) { + maybeInitBuilder(); + if (nodeId == null) { + builder.clearAssignedNodeId(); + } + this.nodeId = nodeId; + } + + @Override + public Priority getPriority() { + if (this.priority != null) { + return this.priority; + } + ContainerStartDataProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasPriority()) { + return null; + } + this.priority = convertFromProtoFormat(p.getPriority()); + return this.priority; + } + + @Override + public void setPriority(Priority priority) { + maybeInitBuilder(); + if (priority == null) { + builder.clearPriority(); + } + this.priority = priority; + } + + @Override + public long getStartTime() { + ContainerStartDataProtoOrBuilder p = viaProto ? proto : builder; + return p.getStartTime(); + } + + @Override + public void setStartTime(long startTime) { + maybeInitBuilder(); + builder.setStartTime(startTime); + } + + public ContainerStartDataProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + private void mergeLocalToBuilder() { + if (this.containerId != null + && !((ContainerIdPBImpl) this.containerId).getProto().equals( + builder.getContainerId())) { + builder.setContainerId(convertToProtoFormat(this.containerId)); + } + if (this.resource != null + && !((ResourcePBImpl) this.resource).getProto().equals( + builder.getAllocatedResource())) { + builder.setAllocatedResource(convertToProtoFormat(this.resource)); + } + if (this.nodeId != null + && !((NodeIdPBImpl) this.nodeId).getProto().equals( + builder.getAssignedNodeId())) { + builder.setAssignedNodeId(convertToProtoFormat(this.nodeId)); + } + if (this.priority != null + && !((PriorityPBImpl) this.priority).getProto().equals( + builder.getPriority())) { + builder.setPriority(convertToProtoFormat(this.priority)); + } + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = ContainerStartDataProto.newBuilder(proto); + } + viaProto = false; + } + + private ContainerIdProto convertToProtoFormat(ContainerId containerId) { + return ((ContainerIdPBImpl) containerId).getProto(); + } + + private ContainerIdPBImpl + convertFromProtoFormat(ContainerIdProto containerId) { + return new ContainerIdPBImpl(containerId); + } + + private ResourceProto convertToProtoFormat(Resource resource) { + return ((ResourcePBImpl) resource).getProto(); + } + + private ResourcePBImpl convertFromProtoFormat(ResourceProto resource) { + return new ResourcePBImpl(resource); + } + + private NodeIdProto convertToProtoFormat(NodeId nodeId) { + return ((NodeIdPBImpl) nodeId).getProto(); + } + + private NodeIdPBImpl convertFromProtoFormat(NodeIdProto nodeId) { + return new NodeIdPBImpl(nodeId); + } + + private PriorityProto convertToProtoFormat(Priority priority) { + return ((PriorityPBImpl) priority).getProto(); + } + + private PriorityPBImpl convertFromProtoFormat(PriorityProto priority) { + return new PriorityPBImpl(priority); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSController.java new file mode 100644 index 00000000000..4e00bc8fe0b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSController.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp; + +import org.apache.hadoop.yarn.webapp.Controller; + +import com.google.inject.Inject; + +public class AHSController extends Controller { + + @Inject + AHSController(RequestContext ctx) { + super(ctx); + } + + @Override + public void index() { + setTitle("Application History"); + } + + public void app() { + render(AppPage.class); + } + + public void appattempt() { + render(AppAttemptPage.class); + } + + public void container() { + render(ContainerPage.class); + } + + /** + * Render the logs page. + */ + public void logs() { + render(AHSLogsPage.class); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSLogsPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSLogsPage.java new file mode 100644 index 00000000000..8821bc02dcb --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSLogsPage.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp; + +import static org.apache.hadoop.yarn.webapp.YarnWebParams.CONTAINER_ID; +import static org.apache.hadoop.yarn.webapp.YarnWebParams.ENTITY_STRING; + +import org.apache.hadoop.yarn.webapp.SubView; +import org.apache.hadoop.yarn.webapp.log.AggregatedLogsBlock; + +public class AHSLogsPage extends AHSView { + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.AHSView# + * preHead(org.apache.hadoop .yarn.webapp.hamlet.Hamlet.HTML) + */ + @Override + protected void preHead(Page.HTML<_> html) { + String logEntity = $(ENTITY_STRING); + if (logEntity == null || logEntity.isEmpty()) { + logEntity = $(CONTAINER_ID); + } + if (logEntity == null || logEntity.isEmpty()) { + logEntity = "UNKNOWN"; + } + commonPreHead(html); + } + + /** + * The content of this page is the AggregatedLogsBlock + * + * @return AggregatedLogsBlock.class + */ + @Override + protected Class content() { + return AggregatedLogsBlock.class; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java new file mode 100644 index 00000000000..4baa75d1bdd --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java @@ -0,0 +1,90 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp; + +import static org.apache.hadoop.yarn.util.StringHelper.sjoin; +import static org.apache.hadoop.yarn.webapp.YarnWebParams.APP_STATE; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION_ID; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit; + +import org.apache.hadoop.yarn.server.webapp.AppsBlock; +import org.apache.hadoop.yarn.webapp.SubView; +import org.apache.hadoop.yarn.webapp.view.TwoColumnLayout; + +// Do NOT rename/refactor this to AHSView as it will wreak havoc +// on Mac OS HFS +public class AHSView extends TwoColumnLayout { + static final int MAX_DISPLAY_ROWS = 100; // direct table rendering + static final int MAX_FAST_ROWS = 1000; // inline js array + + @Override + protected void preHead(Page.HTML<_> html) { + commonPreHead(html); + set(DATATABLES_ID, "apps"); + set(initID(DATATABLES, "apps"), appsTableInit()); + setTableStyles(html, "apps", ".queue {width:6em}", ".ui {width:8em}"); + + // Set the correct title. + String reqState = $(APP_STATE); + reqState = (reqState == null || reqState.isEmpty() ? "All" : reqState); + setTitle(sjoin(reqState, "Applications")); + } + + protected void commonPreHead(Page.HTML<_> html) { + set(ACCORDION_ID, "nav"); + set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}"); + } + + @Override + protected Class nav() { + return NavBlock.class; + } + + @Override + protected Class content() { + return AppsBlock.class; + } + + private String appsTableInit() { + // id, user, name, queue, starttime, finishtime, state, status, progress, ui + return tableInit().append(", 'aaData': appsTableData") + .append(", bDeferRender: true").append(", bProcessing: true") + + .append("\n, aoColumnDefs: ").append(getAppsTableColumnDefs()) + + // Sort by id upon page load + .append(", aaSorting: [[0, 'desc']]}").toString(); + } + + protected String getAppsTableColumnDefs() { + StringBuilder sb = new StringBuilder(); + return sb.append("[\n").append("{'sType':'numeric', 'aTargets': [0]") + .append(", 'mRender': parseHadoopID }") + + .append("\n, {'sType':'numeric', 'aTargets': [5, 6]") + .append(", 'mRender': renderHadoopDate }") + + .append("\n, {'sType':'numeric', bSearchable:false, 'aTargets': [9]") + .append(", 'mRender': parseHadoopProgress }]").toString(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebApp.java new file mode 100644 index 00000000000..81f838396d7 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebApp.java @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp; + +import static org.apache.hadoop.yarn.util.StringHelper.pajoin; + +import org.apache.hadoop.yarn.server.api.ApplicationContext; +import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryManager; +import org.apache.hadoop.yarn.webapp.GenericExceptionHandler; +import org.apache.hadoop.yarn.webapp.WebApp; +import org.apache.hadoop.yarn.webapp.YarnWebParams; + +public class AHSWebApp extends WebApp implements YarnWebParams { + + private final ApplicationHistoryManager applicationHistoryManager; + + public AHSWebApp(ApplicationHistoryManager applicationHistoryManager) { + this.applicationHistoryManager = applicationHistoryManager; + } + + @Override + public void setup() { + bind(JAXBContextResolver.class); + bind(AHSWebServices.class); + bind(GenericExceptionHandler.class); + bind(ApplicationContext.class).toInstance(applicationHistoryManager); + route("/", AHSController.class); + route(pajoin("/apps", APP_STATE), AHSController.class); + route(pajoin("/app", APPLICATION_ID), AHSController.class, "app"); + route(pajoin("/appattempt", APPLICATION_ATTEMPT_ID), AHSController.class, + "appattempt"); + route(pajoin("/container", CONTAINER_ID), AHSController.class, "container"); + route( + pajoin("/logs", NM_NODENAME, CONTAINER_ID, ENTITY_STRING, APP_OWNER, + CONTAINER_LOG_TYPE), AHSController.class, "logs"); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java new file mode 100644 index 00000000000..2040f575141 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java @@ -0,0 +1,162 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp; + +import java.util.Collections; +import java.util.Set; + +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.MediaType; + +import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.server.api.ApplicationContext; +import org.apache.hadoop.yarn.server.webapp.WebServices; +import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo; +import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo; +import org.apache.hadoop.yarn.server.webapp.dao.AppInfo; +import org.apache.hadoop.yarn.server.webapp.dao.AppsInfo; +import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo; +import org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo; +import org.apache.hadoop.yarn.webapp.BadRequestException; + +import com.google.inject.Inject; +import com.google.inject.Singleton; + +@Singleton +@Path("/ws/v1/applicationhistory") +public class AHSWebServices extends WebServices { + + @Inject + public AHSWebServices(ApplicationContext appContext) { + super(appContext); + } + + @GET + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public AppsInfo get(@Context HttpServletRequest req, + @Context HttpServletResponse res) { + return getApps(req, res, null, Collections. emptySet(), null, null, + null, null, null, null, null, null, Collections. emptySet()); + } + + @GET + @Path("/apps") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + @Override + public AppsInfo getApps(@Context HttpServletRequest req, + @Context HttpServletResponse res, @QueryParam("state") String stateQuery, + @QueryParam("states") Set statesQuery, + @QueryParam("finalStatus") String finalStatusQuery, + @QueryParam("user") String userQuery, + @QueryParam("queue") String queueQuery, + @QueryParam("limit") String count, + @QueryParam("startedTimeBegin") String startedBegin, + @QueryParam("startedTimeEnd") String startedEnd, + @QueryParam("finishedTimeBegin") String finishBegin, + @QueryParam("finishedTimeEnd") String finishEnd, + @QueryParam("applicationTypes") Set applicationTypes) { + init(res); + validateStates(stateQuery, statesQuery); + return super.getApps(req, res, stateQuery, statesQuery, finalStatusQuery, + userQuery, queueQuery, count, startedBegin, startedEnd, finishBegin, + finishEnd, applicationTypes); + } + + @GET + @Path("/apps/{appid}") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + @Override + public AppInfo getApp(@Context HttpServletRequest req, + @Context HttpServletResponse res, @PathParam("appid") String appId) { + init(res); + return super.getApp(req, res, appId); + } + + @GET + @Path("/apps/{appid}/appattempts") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + @Override + public AppAttemptsInfo getAppAttempts(@Context HttpServletRequest req, + @Context HttpServletResponse res, @PathParam("appid") String appId) { + init(res); + return super.getAppAttempts(req, res, appId); + } + + @GET + @Path("/apps/{appid}/appattempts/{appattemptid}") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + @Override + public AppAttemptInfo getAppAttempt(@Context HttpServletRequest req, + @Context HttpServletResponse res, @PathParam("appid") String appId, + @PathParam("appattemptid") String appAttemptId) { + init(res); + return super.getAppAttempt(req, res, appId, appAttemptId); + } + + @GET + @Path("/apps/{appid}/appattempts/{appattemptid}/containers") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + @Override + public ContainersInfo getContainers(@Context HttpServletRequest req, + @Context HttpServletResponse res, @PathParam("appid") String appId, + @PathParam("appattemptid") String appAttemptId) { + init(res); + return super.getContainers(req, res, appId, appAttemptId); + } + + @GET + @Path("/apps/{appid}/appattempts/{appattemptid}/containers/{containerid}") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + @Override + public ContainerInfo getContainer(@Context HttpServletRequest req, + @Context HttpServletResponse res, @PathParam("appid") String appId, + @PathParam("appattemptid") String appAttemptId, + @PathParam("containerid") String containerId) { + init(res); + return super.getContainer(req, res, appId, appAttemptId, containerId); + } + + private static void + validateStates(String stateQuery, Set statesQuery) { + // stateQuery is deprecated. + if (stateQuery != null && !stateQuery.isEmpty()) { + statesQuery.add(stateQuery); + } + Set appStates = parseQueries(statesQuery, true); + for (String appState : appStates) { + switch (YarnApplicationState.valueOf(appState.toUpperCase())) { + case FINISHED: + case FAILED: + case KILLED: + continue; + default: + throw new BadRequestException("Invalid application-state " + appState + + " specified. It should be a final state"); + } + } + } + +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java new file mode 100644 index 00000000000..63b44bde663 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java @@ -0,0 +1,69 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp; + +import static org.apache.hadoop.yarn.util.StringHelper.join; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit; + +import org.apache.hadoop.yarn.server.webapp.AppAttemptBlock; +import org.apache.hadoop.yarn.webapp.SubView; +import org.apache.hadoop.yarn.webapp.YarnWebParams; + +public class AppAttemptPage extends AHSView { + + @Override + protected void preHead(Page.HTML<_> html) { + commonPreHead(html); + + String appAttemptId = $(YarnWebParams.APPLICATION_ATTEMPT_ID); + set( + TITLE, + appAttemptId.isEmpty() ? "Bad request: missing application attempt ID" + : join("Application Attempt ", + $(YarnWebParams.APPLICATION_ATTEMPT_ID))); + + set(DATATABLES_ID, "containers"); + set(initID(DATATABLES, "containers"), containersTableInit()); + setTableStyles(html, "containers", ".queue {width:6em}", ".ui {width:8em}"); + } + + @Override + protected Class content() { + return AppAttemptBlock.class; + } + + private String containersTableInit() { + return tableInit().append(", 'aaData': containersTableData") + .append(", bDeferRender: true").append(", bProcessing: true") + + .append("\n, aoColumnDefs: ").append(getContainersTableColumnDefs()) + + // Sort by id upon page load + .append(", aaSorting: [[0, 'desc']]}").toString(); + } + + protected String getContainersTableColumnDefs() { + StringBuilder sb = new StringBuilder(); + return sb.append("[\n").append("{'sType':'numeric', 'aTargets': [0]") + .append(", 'mRender': parseHadoopID }]").toString(); + } + +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java new file mode 100644 index 00000000000..96ca65918b0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp; + +import static org.apache.hadoop.yarn.util.StringHelper.join; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit; + +import org.apache.hadoop.yarn.server.webapp.AppBlock; +import org.apache.hadoop.yarn.webapp.SubView; +import org.apache.hadoop.yarn.webapp.YarnWebParams; + +public class AppPage extends AHSView { + + @Override + protected void preHead(Page.HTML<_> html) { + commonPreHead(html); + + String appId = $(YarnWebParams.APPLICATION_ID); + set( + TITLE, + appId.isEmpty() ? "Bad request: missing application ID" : join( + "Application ", $(YarnWebParams.APPLICATION_ID))); + + set(DATATABLES_ID, "attempts"); + set(initID(DATATABLES, "attempts"), attemptsTableInit()); + setTableStyles(html, "attempts", ".queue {width:6em}", ".ui {width:8em}"); + } + + @Override + protected Class content() { + return AppBlock.class; + } + + private String attemptsTableInit() { + return tableInit().append(", 'aaData': attemptsTableData") + .append(", bDeferRender: true").append(", bProcessing: true") + + .append("\n, aoColumnDefs: ").append(getAttemptsTableColumnDefs()) + + // Sort by id upon page load + .append(", aaSorting: [[0, 'desc']]}").toString(); + } + + protected String getAttemptsTableColumnDefs() { + StringBuilder sb = new StringBuilder(); + return sb.append("[\n").append("{'sType':'numeric', 'aTargets': [0]") + .append(", 'mRender': parseHadoopID }") + + .append("\n, {'sType':'numeric', 'aTargets': [1]") + .append(", 'mRender': renderHadoopDate }]").toString(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContainerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContainerPage.java new file mode 100644 index 00000000000..1be8a26136d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContainerPage.java @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp; + +import static org.apache.hadoop.yarn.util.StringHelper.join; + +import org.apache.hadoop.yarn.server.webapp.ContainerBlock; +import org.apache.hadoop.yarn.webapp.SubView; +import org.apache.hadoop.yarn.webapp.YarnWebParams; + +public class ContainerPage extends AHSView { + + @Override + protected void preHead(Page.HTML<_> html) { + commonPreHead(html); + + String containerId = $(YarnWebParams.CONTAINER_ID); + set(TITLE, containerId.isEmpty() ? "Bad request: missing container ID" + : join("Container ", $(YarnWebParams.CONTAINER_ID))); + } + + @Override + protected Class content() { + return ContainerBlock.class; + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/JAXBContextResolver.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/JAXBContextResolver.java new file mode 100644 index 00000000000..5fd01244f73 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/JAXBContextResolver.java @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; + +import javax.ws.rs.ext.ContextResolver; +import javax.ws.rs.ext.Provider; +import javax.xml.bind.JAXBContext; + +import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo; +import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo; +import org.apache.hadoop.yarn.server.webapp.dao.AppInfo; +import org.apache.hadoop.yarn.server.webapp.dao.AppsInfo; +import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo; +import org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo; + +import com.google.inject.Singleton; +import com.sun.jersey.api.json.JSONConfiguration; +import com.sun.jersey.api.json.JSONJAXBContext; + +@Singleton +@Provider +@SuppressWarnings("rawtypes") +public class JAXBContextResolver implements ContextResolver { + + private JAXBContext context; + private final Set types; + + // you have to specify all the dao classes here + private final Class[] cTypes = { AppInfo.class, AppsInfo.class, + AppAttemptInfo.class, AppAttemptsInfo.class, ContainerInfo.class, + ContainersInfo.class }; + + public JAXBContextResolver() throws Exception { + this.types = new HashSet(Arrays.asList(cTypes)); + this.context = + new JSONJAXBContext(JSONConfiguration.natural().rootUnwrapping(false) + .build(), cTypes); + } + + @Override + public JAXBContext getContext(Class objectType) { + return (types.contains(objectType)) ? context : null; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java new file mode 100644 index 00000000000..e84ddec2207 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java @@ -0,0 +1,51 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp; + +import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.webapp.view.HtmlBlock; + +public class NavBlock extends HtmlBlock { + + @Override + public void render(Block html) { + html. + div("#nav"). + h3("Application History"). + ul(). + li().a(url("apps"), "Applications"). + ul(). + li().a(url("apps", + YarnApplicationState.FINISHED.toString()), + YarnApplicationState.FINISHED.toString()). + _(). + li().a(url("apps", + YarnApplicationState.FAILED.toString()), + YarnApplicationState.FAILED.toString()). + _(). + li().a(url("apps", + YarnApplicationState.KILLED.toString()), + YarnApplicationState.KILLED.toString()). + _(). + _(). + _(). + _(). + _(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStoreTestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStoreTestUtils.java new file mode 100644 index 00000000000..81e52d9e680 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStoreTestUtils.java @@ -0,0 +1,85 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice; + +import java.io.IOException; + +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerState; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptFinishData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptStartData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationFinishData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationStartData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerFinishData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerStartData; + +public class ApplicationHistoryStoreTestUtils { + + protected ApplicationHistoryStore store; + + protected void writeApplicationStartData(ApplicationId appId) + throws IOException { + store.applicationStarted(ApplicationStartData.newInstance(appId, + appId.toString(), "test type", "test queue", "test user", 0, 0)); + } + + protected void writeApplicationFinishData(ApplicationId appId) + throws IOException { + store.applicationFinished(ApplicationFinishData.newInstance(appId, 0, + appId.toString(), FinalApplicationStatus.UNDEFINED, + YarnApplicationState.FINISHED)); + } + + protected void writeApplicationAttemptStartData( + ApplicationAttemptId appAttemptId) throws IOException { + store.applicationAttemptStarted(ApplicationAttemptStartData.newInstance( + appAttemptId, appAttemptId.toString(), 0, + ContainerId.newInstance(appAttemptId, 1))); + } + + protected void writeApplicationAttemptFinishData( + ApplicationAttemptId appAttemptId) throws IOException { + store.applicationAttemptFinished(ApplicationAttemptFinishData.newInstance( + appAttemptId, appAttemptId.toString(), "test tracking url", + FinalApplicationStatus.UNDEFINED, YarnApplicationAttemptState.FINISHED)); + } + + protected void writeContainerStartData(ContainerId containerId) + throws IOException { + store.containerStarted(ContainerStartData.newInstance(containerId, + Resource.newInstance(0, 0), NodeId.newInstance("localhost", 0), + Priority.newInstance(containerId.getId()), 0)); + } + + protected void writeContainerFinishData(ContainerId containerId) + throws IOException { + store.containerFinished(ContainerFinishData.newInstance(containerId, 0, + containerId.toString(), "http://localhost:0/log", 0, + ContainerState.COMPLETE)); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java new file mode 100644 index 00000000000..ceeb909c9aa --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java @@ -0,0 +1,195 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice; + +import java.io.IOException; +import java.util.List; + +import junit.framework.Assert; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerReport; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +public class TestApplicationHistoryClientService extends + ApplicationHistoryStoreTestUtils { + + ApplicationHistoryServer historyServer = null; + + @Before + public void setup() { + historyServer = new ApplicationHistoryServer(); + Configuration config = new YarnConfiguration(); + config.setClass(YarnConfiguration.AHS_STORE, + MemoryApplicationHistoryStore.class, ApplicationHistoryStore.class); + historyServer.init(config); + historyServer.start(); + store = + ((ApplicationHistoryManagerImpl) historyServer.getApplicationHistory()) + .getHistoryStore(); + } + + @After + public void tearDown() throws Exception { + historyServer.stop(); + } + + @Test + public void testApplicationReport() throws IOException, YarnException { + ApplicationId appId = null; + appId = ApplicationId.newInstance(0, 1); + writeApplicationStartData(appId); + writeApplicationFinishData(appId); + GetApplicationReportRequest request = + GetApplicationReportRequest.newInstance(appId); + GetApplicationReportResponse response = + historyServer.getClientService().getClientHandler() + .getApplicationReport(request); + ApplicationReport appReport = response.getApplicationReport(); + Assert.assertNotNull(appReport); + Assert.assertEquals("application_0_0001", appReport.getApplicationId() + .toString()); + Assert.assertEquals("test type", appReport.getApplicationType().toString()); + Assert.assertEquals("test queue", appReport.getQueue().toString()); + } + + @Test + public void testApplications() throws IOException, YarnException { + ApplicationId appId = null; + appId = ApplicationId.newInstance(0, 1); + writeApplicationStartData(appId); + writeApplicationFinishData(appId); + ApplicationId appId1 = ApplicationId.newInstance(0, 2); + writeApplicationStartData(appId1); + writeApplicationFinishData(appId1); + GetApplicationsRequest request = GetApplicationsRequest.newInstance(); + GetApplicationsResponse response = + historyServer.getClientService().getClientHandler() + .getApplications(request); + List appReport = response.getApplicationList(); + Assert.assertNotNull(appReport); + Assert.assertEquals(appId, appReport.get(0).getApplicationId()); + Assert.assertEquals(appId1, appReport.get(1).getApplicationId()); + } + + @Test + public void testApplicationAttemptReport() throws IOException, YarnException { + ApplicationId appId = ApplicationId.newInstance(0, 1); + ApplicationAttemptId appAttemptId = + ApplicationAttemptId.newInstance(appId, 1); + writeApplicationAttemptStartData(appAttemptId); + writeApplicationAttemptFinishData(appAttemptId); + GetApplicationAttemptReportRequest request = + GetApplicationAttemptReportRequest.newInstance(appAttemptId); + GetApplicationAttemptReportResponse response = + historyServer.getClientService().getClientHandler() + .getApplicationAttemptReport(request); + ApplicationAttemptReport attemptReport = + response.getApplicationAttemptReport(); + Assert.assertNotNull(attemptReport); + Assert.assertEquals("appattempt_0_0001_000001", attemptReport + .getApplicationAttemptId().toString()); + } + + @Test + public void testApplicationAttempts() throws IOException, YarnException { + ApplicationId appId = ApplicationId.newInstance(0, 1); + ApplicationAttemptId appAttemptId = + ApplicationAttemptId.newInstance(appId, 1); + ApplicationAttemptId appAttemptId1 = + ApplicationAttemptId.newInstance(appId, 2); + writeApplicationAttemptStartData(appAttemptId); + writeApplicationAttemptFinishData(appAttemptId); + writeApplicationAttemptStartData(appAttemptId1); + writeApplicationAttemptFinishData(appAttemptId1); + GetApplicationAttemptsRequest request = + GetApplicationAttemptsRequest.newInstance(appId); + GetApplicationAttemptsResponse response = + historyServer.getClientService().getClientHandler() + .getApplicationAttempts(request); + List attemptReports = + response.getApplicationAttemptList(); + Assert.assertNotNull(attemptReports); + Assert.assertEquals(appAttemptId, attemptReports.get(0) + .getApplicationAttemptId()); + Assert.assertEquals(appAttemptId1, attemptReports.get(1) + .getApplicationAttemptId()); + } + + @Test + public void testContainerReport() throws IOException, YarnException { + ApplicationId appId = ApplicationId.newInstance(0, 1); + ApplicationAttemptId appAttemptId = + ApplicationAttemptId.newInstance(appId, 1); + ContainerId containerId = ContainerId.newInstance(appAttemptId, 1); + writeContainerStartData(containerId); + writeContainerFinishData(containerId); + GetContainerReportRequest request = + GetContainerReportRequest.newInstance(containerId); + GetContainerReportResponse response = + historyServer.getClientService().getClientHandler() + .getContainerReport(request); + ContainerReport container = response.getContainerReport(); + Assert.assertNotNull(container); + Assert.assertEquals(containerId, container.getContainerId()); + } + + @Test + public void testContainers() throws IOException, YarnException { + ApplicationId appId = ApplicationId.newInstance(0, 1); + ApplicationAttemptId appAttemptId = + ApplicationAttemptId.newInstance(appId, 1); + ContainerId containerId = ContainerId.newInstance(appAttemptId, 1); + ContainerId containerId1 = ContainerId.newInstance(appAttemptId, 2); + writeContainerStartData(containerId); + writeContainerFinishData(containerId); + writeContainerStartData(containerId1); + writeContainerFinishData(containerId1); + GetContainersRequest request = + GetContainersRequest.newInstance(appAttemptId); + GetContainersResponse response = + historyServer.getClientService().getClientHandler() + .getContainers(request); + List containers = response.getContainerList(); + Assert.assertNotNull(containers); + Assert.assertEquals(containerId, containers.get(1).getContainerId()); + Assert.assertEquals(containerId1, containers.get(0).getContainerId()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerImpl.java new file mode 100644 index 00000000000..bab85c62d92 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerImpl.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +public class TestApplicationHistoryManagerImpl extends + ApplicationHistoryStoreTestUtils { + ApplicationHistoryManagerImpl applicationHistoryManagerImpl = null; + + @Before + public void setup() throws Exception { + Configuration config = new Configuration(); + config.setClass(YarnConfiguration.AHS_STORE, + MemoryApplicationHistoryStore.class, ApplicationHistoryStore.class); + applicationHistoryManagerImpl = new ApplicationHistoryManagerImpl(); + applicationHistoryManagerImpl.init(config); + applicationHistoryManagerImpl.start(); + store = applicationHistoryManagerImpl.getHistoryStore(); + } + + @After + public void tearDown() throws Exception { + applicationHistoryManagerImpl.stop(); + } + + @Test + public void testApplicationReport() throws IOException, YarnException { + ApplicationId appId = null; + appId = ApplicationId.newInstance(0, 1); + writeApplicationStartData(appId); + writeApplicationFinishData(appId); + ApplicationAttemptId appAttemptId = + ApplicationAttemptId.newInstance(appId, 1); + writeApplicationAttemptStartData(appAttemptId); + writeApplicationAttemptFinishData(appAttemptId); + ApplicationReport appReport = + applicationHistoryManagerImpl.getApplication(appId); + Assert.assertNotNull(appReport); + Assert.assertEquals(appId, appReport.getApplicationId()); + Assert.assertEquals(appAttemptId, + appReport.getCurrentApplicationAttemptId()); + Assert.assertEquals(appAttemptId.toString(), appReport.getHost()); + Assert.assertEquals("test type", appReport.getApplicationType().toString()); + Assert.assertEquals("test queue", appReport.getQueue().toString()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java new file mode 100644 index 00000000000..8bd515b6abe --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.service.Service.STATE; +import org.apache.hadoop.util.ExitUtil; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.junit.After; +import org.junit.Test; + +public class TestApplicationHistoryServer { + + ApplicationHistoryServer historyServer = null; + + // simple test init/start/stop ApplicationHistoryServer. Status should change. + @Test(timeout = 50000) + public void testStartStopServer() throws Exception { + historyServer = new ApplicationHistoryServer(); + Configuration config = new YarnConfiguration(); + historyServer.init(config); + assertEquals(STATE.INITED, historyServer.getServiceState()); + assertEquals(2, historyServer.getServices().size()); + ApplicationHistoryClientService historyService = + historyServer.getClientService(); + assertNotNull(historyServer.getClientService()); + assertEquals(STATE.INITED, historyService.getServiceState()); + + historyServer.start(); + assertEquals(STATE.STARTED, historyServer.getServiceState()); + assertEquals(STATE.STARTED, historyService.getServiceState()); + historyServer.stop(); + assertEquals(STATE.STOPPED, historyServer.getServiceState()); + } + + // test launch method + @Test(timeout = 60000) + public void testLaunch() throws Exception { + + ExitUtil.disableSystemExit(); + try { + historyServer = + ApplicationHistoryServer.launchAppHistoryServer(new String[0]); + } catch (ExitUtil.ExitException e) { + assertEquals(0, e.status); + ExitUtil.resetFirstExitException(); + fail(); + } + } + + @After + public void stop() { + if (historyServer != null) { + historyServer.stop(); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java new file mode 100644 index 00000000000..c31efab1bb6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java @@ -0,0 +1,196 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice; + +import java.io.IOException; +import java.net.URI; + +import junit.framework.Assert; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RawLocalFileSystem; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptHistoryData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationHistoryData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerHistoryData; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +public class TestFileSystemApplicationHistoryStore extends + ApplicationHistoryStoreTestUtils { + + private FileSystem fs; + private Path fsWorkingPath; + + @Before + public void setup() throws Exception { + fs = new RawLocalFileSystem(); + Configuration conf = new Configuration(); + fs.initialize(new URI("/"), conf); + fsWorkingPath = new Path("Test"); + fs.delete(fsWorkingPath, true); + conf.set(YarnConfiguration.FS_HISTORY_STORE_URI, fsWorkingPath.toString()); + store = new FileSystemApplicationHistoryStore(); + store.init(conf); + store.start(); + } + + @After + public void tearDown() throws Exception { + store.stop(); + fs.delete(fsWorkingPath, true); + fs.close(); + } + + @Test + public void testReadWriteHistoryData() throws IOException { + testWriteHistoryData(5); + testReadHistoryData(5); + } + + private void testWriteHistoryData(int num) throws IOException { + // write application history data + for (int i = 1; i <= num; ++i) { + ApplicationId appId = ApplicationId.newInstance(0, i); + writeApplicationStartData(appId); + + // write application attempt history data + for (int j = 1; j <= num; ++j) { + ApplicationAttemptId appAttemptId = + ApplicationAttemptId.newInstance(appId, j); + writeApplicationAttemptStartData(appAttemptId); + + // write container history data + for (int k = 1; k <= num; ++k) { + ContainerId containerId = ContainerId.newInstance(appAttemptId, k); + writeContainerStartData(containerId); + writeContainerFinishData(containerId); + + writeApplicationAttemptFinishData(appAttemptId); + } + } + + writeApplicationFinishData(appId); + } + } + + private void testReadHistoryData(int num) throws IOException { + // read application history data + Assert.assertEquals(num, store.getAllApplications().size()); + for (int i = 1; i <= num; ++i) { + ApplicationId appId = ApplicationId.newInstance(0, i); + ApplicationHistoryData appData = store.getApplication(appId); + Assert.assertNotNull(appData); + Assert.assertEquals(appId.toString(), appData.getApplicationName()); + Assert.assertEquals(appId.toString(), appData.getDiagnosticsInfo()); + + // read application attempt history data + Assert.assertEquals(num, store.getApplicationAttempts(appId).size()); + for (int j = 1; j <= num; ++j) { + ApplicationAttemptId appAttemptId = + ApplicationAttemptId.newInstance(appId, j); + ApplicationAttemptHistoryData attemptData = + store.getApplicationAttempt(appAttemptId); + Assert.assertNotNull(attemptData); + Assert.assertEquals(appAttemptId.toString(), attemptData.getHost()); + Assert.assertEquals(appAttemptId.toString(), + attemptData.getDiagnosticsInfo()); + + // read container history data + Assert.assertEquals(num, store.getContainers(appAttemptId).size()); + for (int k = 1; k <= num; ++k) { + ContainerId containerId = ContainerId.newInstance(appAttemptId, k); + ContainerHistoryData containerData = store.getContainer(containerId); + Assert.assertNotNull(containerData); + Assert.assertEquals(Priority.newInstance(containerId.getId()), + containerData.getPriority()); + Assert.assertEquals(containerId.toString(), + containerData.getDiagnosticsInfo()); + } + ContainerHistoryData masterContainer = + store.getAMContainer(appAttemptId); + Assert.assertNotNull(masterContainer); + Assert.assertEquals(ContainerId.newInstance(appAttemptId, 1), + masterContainer.getContainerId()); + } + } + } + + @Test + public void testWriteAfterApplicationFinish() throws IOException { + ApplicationId appId = ApplicationId.newInstance(0, 1); + writeApplicationStartData(appId); + writeApplicationFinishData(appId); + // write application attempt history data + ApplicationAttemptId appAttemptId = + ApplicationAttemptId.newInstance(appId, 1); + try { + writeApplicationAttemptStartData(appAttemptId); + Assert.fail(); + } catch (IOException e) { + Assert.assertTrue(e.getMessage().contains("is not opened")); + } + try { + writeApplicationAttemptFinishData(appAttemptId); + Assert.fail(); + } catch (IOException e) { + Assert.assertTrue(e.getMessage().contains("is not opened")); + } + // write container history data + ContainerId containerId = ContainerId.newInstance(appAttemptId, 1); + try { + writeContainerStartData(containerId); + Assert.fail(); + } catch (IOException e) { + Assert.assertTrue(e.getMessage().contains("is not opened")); + } + try { + writeContainerFinishData(containerId); + Assert.fail(); + } catch (IOException e) { + Assert.assertTrue(e.getMessage().contains("is not opened")); + } + } + + @Test + public void testMassiveWriteContainerHistoryData() throws IOException { + long mb = 1024 * 1024; + long usedDiskBefore = fs.getContentSummary(fsWorkingPath).getLength() / mb; + ApplicationId appId = ApplicationId.newInstance(0, 1); + writeApplicationStartData(appId); + ApplicationAttemptId appAttemptId = + ApplicationAttemptId.newInstance(appId, 1); + for (int i = 1; i <= 100000; ++i) { + ContainerId containerId = ContainerId.newInstance(appAttemptId, i); + writeContainerStartData(containerId); + writeContainerFinishData(containerId); + } + writeApplicationFinishData(appId); + long usedDiskAfter = fs.getContentSummary(fsWorkingPath).getLength() / mb; + Assert.assertTrue((usedDiskAfter - usedDiskBefore) < 20); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestMemoryApplicationHistoryStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestMemoryApplicationHistoryStore.java new file mode 100644 index 00000000000..7a454053932 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestMemoryApplicationHistoryStore.java @@ -0,0 +1,204 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice; + +import java.io.IOException; + +import junit.framework.Assert; + +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptHistoryData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationHistoryData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerHistoryData; +import org.junit.Before; +import org.junit.Test; + +public class TestMemoryApplicationHistoryStore extends + ApplicationHistoryStoreTestUtils { + + @Before + public void setup() { + store = new MemoryApplicationHistoryStore(); + } + + @Test + public void testReadWriteApplicationHistory() throws Exception { + // Out of order + ApplicationId appId = ApplicationId.newInstance(0, 1); + try { + writeApplicationFinishData(appId); + Assert.fail(); + } catch (IOException e) { + Assert.assertTrue(e.getMessage().contains( + "is stored before the start information")); + } + // Normal + int numApps = 5; + for (int i = 1; i <= numApps; ++i) { + appId = ApplicationId.newInstance(0, i); + writeApplicationStartData(appId); + writeApplicationFinishData(appId); + } + Assert.assertEquals(numApps, store.getAllApplications().size()); + for (int i = 1; i <= numApps; ++i) { + appId = ApplicationId.newInstance(0, i); + ApplicationHistoryData data = store.getApplication(appId); + Assert.assertNotNull(data); + Assert.assertEquals(appId.toString(), data.getApplicationName()); + Assert.assertEquals(appId.toString(), data.getDiagnosticsInfo()); + } + // Write again + appId = ApplicationId.newInstance(0, 1); + try { + writeApplicationStartData(appId); + Assert.fail(); + } catch (IOException e) { + Assert.assertTrue(e.getMessage().contains("is already stored")); + } + try { + writeApplicationFinishData(appId); + Assert.fail(); + } catch (IOException e) { + Assert.assertTrue(e.getMessage().contains("is already stored")); + } + } + + @Test + public void testReadWriteApplicationAttemptHistory() throws Exception { + // Out of order + ApplicationId appId = ApplicationId.newInstance(0, 1); + ApplicationAttemptId appAttemptId = + ApplicationAttemptId.newInstance(appId, 1); + try { + writeApplicationAttemptFinishData(appAttemptId); + Assert.fail(); + } catch (IOException e) { + Assert.assertTrue(e.getMessage().contains( + "is stored before the start information")); + } + // Normal + int numAppAttempts = 5; + writeApplicationStartData(appId); + for (int i = 1; i <= numAppAttempts; ++i) { + appAttemptId = ApplicationAttemptId.newInstance(appId, i); + writeApplicationAttemptStartData(appAttemptId); + writeApplicationAttemptFinishData(appAttemptId); + } + Assert.assertEquals(numAppAttempts, store.getApplicationAttempts(appId) + .size()); + for (int i = 1; i <= numAppAttempts; ++i) { + appAttemptId = ApplicationAttemptId.newInstance(appId, i); + ApplicationAttemptHistoryData data = + store.getApplicationAttempt(appAttemptId); + Assert.assertNotNull(data); + Assert.assertEquals(appAttemptId.toString(), data.getHost()); + Assert.assertEquals(appAttemptId.toString(), data.getDiagnosticsInfo()); + } + writeApplicationFinishData(appId); + // Write again + appAttemptId = ApplicationAttemptId.newInstance(appId, 1); + try { + writeApplicationAttemptStartData(appAttemptId); + Assert.fail(); + } catch (IOException e) { + Assert.assertTrue(e.getMessage().contains("is already stored")); + } + try { + writeApplicationAttemptFinishData(appAttemptId); + Assert.fail(); + } catch (IOException e) { + Assert.assertTrue(e.getMessage().contains("is already stored")); + } + } + + @Test + public void testReadWriteContainerHistory() throws Exception { + // Out of order + ApplicationId appId = ApplicationId.newInstance(0, 1); + ApplicationAttemptId appAttemptId = + ApplicationAttemptId.newInstance(appId, 1); + ContainerId containerId = ContainerId.newInstance(appAttemptId, 1); + try { + writeContainerFinishData(containerId); + Assert.fail(); + } catch (IOException e) { + Assert.assertTrue(e.getMessage().contains( + "is stored before the start information")); + } + // Normal + writeApplicationAttemptStartData(appAttemptId); + int numContainers = 5; + for (int i = 1; i <= numContainers; ++i) { + containerId = ContainerId.newInstance(appAttemptId, i); + writeContainerStartData(containerId); + writeContainerFinishData(containerId); + } + Assert + .assertEquals(numContainers, store.getContainers(appAttemptId).size()); + for (int i = 1; i <= numContainers; ++i) { + containerId = ContainerId.newInstance(appAttemptId, i); + ContainerHistoryData data = store.getContainer(containerId); + Assert.assertNotNull(data); + Assert.assertEquals(Priority.newInstance(containerId.getId()), + data.getPriority()); + Assert.assertEquals(containerId.toString(), data.getDiagnosticsInfo()); + } + ContainerHistoryData masterContainer = store.getAMContainer(appAttemptId); + Assert.assertNotNull(masterContainer); + Assert.assertEquals(ContainerId.newInstance(appAttemptId, 1), + masterContainer.getContainerId()); + writeApplicationAttemptFinishData(appAttemptId); + // Write again + containerId = ContainerId.newInstance(appAttemptId, 1); + try { + writeContainerStartData(containerId); + Assert.fail(); + } catch (IOException e) { + Assert.assertTrue(e.getMessage().contains("is already stored")); + } + try { + writeContainerFinishData(containerId); + Assert.fail(); + } catch (IOException e) { + Assert.assertTrue(e.getMessage().contains("is already stored")); + } + } + + @Test + public void testMassiveWriteContainerHistory() throws IOException { + long mb = 1024 * 1024; + Runtime runtime = Runtime.getRuntime(); + long usedMemoryBefore = (runtime.totalMemory() - runtime.freeMemory()) / mb; + int numContainers = 100000; + ApplicationId appId = ApplicationId.newInstance(0, 1); + ApplicationAttemptId appAttemptId = + ApplicationAttemptId.newInstance(appId, 1); + for (int i = 1; i <= numContainers; ++i) { + ContainerId containerId = ContainerId.newInstance(appAttemptId, i); + writeContainerStartData(containerId); + writeContainerFinishData(containerId); + } + long usedMemoryAfter = (runtime.totalMemory() - runtime.freeMemory()) / mb; + Assert.assertTrue((usedMemoryAfter - usedMemoryBefore) < 200); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java new file mode 100644 index 00000000000..0e65a509b3e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java @@ -0,0 +1,182 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp; + +import static org.apache.hadoop.yarn.webapp.Params.TITLE; +import static org.mockito.Mockito.mock; +import junit.framework.Assert; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.api.ApplicationContext; +import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryManager; +import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryManagerImpl; +import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryStore; +import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryStoreTestUtils; +import org.apache.hadoop.yarn.server.applicationhistoryservice.MemoryApplicationHistoryStore; +import org.apache.hadoop.yarn.util.StringHelper; +import org.apache.hadoop.yarn.webapp.YarnWebParams; +import org.apache.hadoop.yarn.webapp.test.WebAppTests; +import org.junit.Before; +import org.junit.Test; + +import com.google.inject.Injector; + +public class TestAHSWebApp extends ApplicationHistoryStoreTestUtils { + + public void setApplicationHistoryStore(ApplicationHistoryStore store) { + this.store = store; + } + + @Before + public void setup() { + store = new MemoryApplicationHistoryStore(); + } + + @Test + public void testAppControllerIndex() throws Exception { + ApplicationHistoryManager ahManager = mock(ApplicationHistoryManager.class); + Injector injector = + WebAppTests.createMockInjector(ApplicationHistoryManager.class, + ahManager); + AHSController controller = injector.getInstance(AHSController.class); + controller.index(); + Assert + .assertEquals("Application History", controller.get(TITLE, "unknown")); + } + + @Test + public void testView() throws Exception { + Injector injector = + WebAppTests.createMockInjector(ApplicationContext.class, + mockApplicationHistoryManager(5, 1, 1)); + AHSView ahsViewInstance = injector.getInstance(AHSView.class); + + ahsViewInstance.render(); + WebAppTests.flushOutput(injector); + + ahsViewInstance.set(YarnWebParams.APP_STATE, + YarnApplicationState.FAILED.toString()); + ahsViewInstance.render(); + WebAppTests.flushOutput(injector); + + ahsViewInstance.set(YarnWebParams.APP_STATE, StringHelper.cjoin( + YarnApplicationState.FAILED.toString(), YarnApplicationState.KILLED)); + ahsViewInstance.render(); + WebAppTests.flushOutput(injector); + } + + @Test + public void testAppPage() throws Exception { + Injector injector = + WebAppTests.createMockInjector(ApplicationContext.class, + mockApplicationHistoryManager(1, 5, 1)); + AppPage appPageInstance = injector.getInstance(AppPage.class); + + appPageInstance.render(); + WebAppTests.flushOutput(injector); + + appPageInstance.set(YarnWebParams.APPLICATION_ID, ApplicationId + .newInstance(0, 1).toString()); + appPageInstance.render(); + WebAppTests.flushOutput(injector); + } + + @Test + public void testAppAttemptPage() throws Exception { + Injector injector = + WebAppTests.createMockInjector(ApplicationContext.class, + mockApplicationHistoryManager(1, 1, 5)); + AppAttemptPage appAttemptPageInstance = + injector.getInstance(AppAttemptPage.class); + + appAttemptPageInstance.render(); + WebAppTests.flushOutput(injector); + + appAttemptPageInstance.set(YarnWebParams.APPLICATION_ATTEMPT_ID, + ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 1), 1) + .toString()); + appAttemptPageInstance.render(); + WebAppTests.flushOutput(injector); + } + + @Test + public void testContainerPage() throws Exception { + Injector injector = + WebAppTests.createMockInjector(ApplicationContext.class, + mockApplicationHistoryManager(1, 1, 1)); + ContainerPage containerPageInstance = + injector.getInstance(ContainerPage.class); + + containerPageInstance.render(); + WebAppTests.flushOutput(injector); + + containerPageInstance.set( + YarnWebParams.CONTAINER_ID, + ContainerId + .newInstance( + ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 1), 1), + 1).toString()); + containerPageInstance.render(); + WebAppTests.flushOutput(injector); + } + + ApplicationHistoryManager mockApplicationHistoryManager(int numApps, + int numAppAttempts, int numContainers) throws Exception { + ApplicationHistoryManager ahManager = + new MockApplicationHistoryManagerImpl(store); + for (int i = 1; i <= numApps; ++i) { + ApplicationId appId = ApplicationId.newInstance(0, i); + writeApplicationStartData(appId); + for (int j = 1; j <= numAppAttempts; ++j) { + ApplicationAttemptId appAttemptId = + ApplicationAttemptId.newInstance(appId, j); + writeApplicationAttemptStartData(appAttemptId); + for (int k = 1; k <= numContainers; ++k) { + ContainerId containerId = ContainerId.newInstance(appAttemptId, k); + writeContainerStartData(containerId); + writeContainerFinishData(containerId); + } + writeApplicationAttemptFinishData(appAttemptId); + } + writeApplicationFinishData(appId); + } + return ahManager; + } + + class MockApplicationHistoryManagerImpl extends ApplicationHistoryManagerImpl { + + public MockApplicationHistoryManagerImpl(ApplicationHistoryStore store) { + super(); + init(new YarnConfiguration()); + start(); + } + + @Override + protected ApplicationHistoryStore createApplicationHistoryStore( + Configuration conf) { + return store; + } + }; + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java new file mode 100644 index 00000000000..fa759bf78f8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java @@ -0,0 +1,295 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import javax.ws.rs.core.MediaType; + +import junit.framework.Assert; + +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerState; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.server.api.ApplicationContext; +import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryManager; +import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryStore; +import org.apache.hadoop.yarn.server.applicationhistoryservice.MemoryApplicationHistoryStore; +import org.apache.hadoop.yarn.webapp.GenericExceptionHandler; +import org.apache.hadoop.yarn.webapp.WebServicesTestUtils; +import org.codehaus.jettison.json.JSONArray; +import org.codehaus.jettison.json.JSONException; +import org.codehaus.jettison.json.JSONObject; +import org.junit.Before; +import org.junit.Test; + +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.google.inject.servlet.GuiceServletContextListener; +import com.google.inject.servlet.ServletModule; +import com.sun.jersey.api.client.ClientResponse; +import com.sun.jersey.api.client.ClientResponse.Status; +import com.sun.jersey.api.client.UniformInterfaceException; +import com.sun.jersey.api.client.WebResource; +import com.sun.jersey.guice.spi.container.servlet.GuiceContainer; +import com.sun.jersey.test.framework.JerseyTest; +import com.sun.jersey.test.framework.WebAppDescriptor; + +public class TestAHSWebServices extends JerseyTest { + + private static ApplicationHistoryManager ahManager; + + private Injector injector = Guice.createInjector(new ServletModule() { + + @Override + protected void configureServlets() { + bind(JAXBContextResolver.class); + bind(AHSWebServices.class); + bind(GenericExceptionHandler.class); + try { + ahManager = mockApplicationHistoryManager(); + } catch (Exception e) { + Assert.fail(); + } + bind(ApplicationContext.class).toInstance(ahManager); + serve("/*").with(GuiceContainer.class); + } + }); + + public class GuiceServletConfig extends GuiceServletContextListener { + + @Override + protected Injector getInjector() { + return injector; + } + } + + private ApplicationHistoryManager mockApplicationHistoryManager() + throws Exception { + ApplicationHistoryStore store = new MemoryApplicationHistoryStore(); + TestAHSWebApp testAHSWebApp = new TestAHSWebApp(); + testAHSWebApp.setApplicationHistoryStore(store); + ApplicationHistoryManager ahManager = + testAHSWebApp.mockApplicationHistoryManager(5, 5, 5); + return ahManager; + } + + public TestAHSWebServices() { + super(new WebAppDescriptor.Builder( + "org.apache.hadoop.yarn.server.applicationhistoryservice.webapp") + .contextListenerClass(GuiceServletConfig.class) + .filterClass(com.google.inject.servlet.GuiceFilter.class) + .contextPath("jersey-guice-filter").servletPath("/").build()); + } + + @Before + @Override + public void setUp() throws Exception { + super.setUp(); + } + + @Test + public void testInvalidUri() throws JSONException, Exception { + WebResource r = resource(); + String responseStr = ""; + try { + responseStr = + r.path("ws").path("v1").path("applicationhistory").path("bogus") + .accept(MediaType.APPLICATION_JSON).get(String.class); + fail("should have thrown exception on invalid uri"); + } catch (UniformInterfaceException ue) { + ClientResponse response = ue.getResponse(); + assertEquals(Status.NOT_FOUND, response.getClientResponseStatus()); + + WebServicesTestUtils.checkStringMatch( + "error string exists and shouldn't", "", responseStr); + } + } + + @Test + public void testInvalidUri2() throws JSONException, Exception { + WebResource r = resource(); + String responseStr = ""; + try { + responseStr = r.accept(MediaType.APPLICATION_JSON).get(String.class); + fail("should have thrown exception on invalid uri"); + } catch (UniformInterfaceException ue) { + ClientResponse response = ue.getResponse(); + assertEquals(Status.NOT_FOUND, response.getClientResponseStatus()); + WebServicesTestUtils.checkStringMatch( + "error string exists and shouldn't", "", responseStr); + } + } + + @Test + public void testInvalidAccept() throws JSONException, Exception { + WebResource r = resource(); + String responseStr = ""; + try { + responseStr = + r.path("ws").path("v1").path("applicationhistory") + .accept(MediaType.TEXT_PLAIN).get(String.class); + fail("should have thrown exception on invalid uri"); + } catch (UniformInterfaceException ue) { + ClientResponse response = ue.getResponse(); + assertEquals(Status.INTERNAL_SERVER_ERROR, + response.getClientResponseStatus()); + WebServicesTestUtils.checkStringMatch( + "error string exists and shouldn't", "", responseStr); + } + } + + @Test + public void testAppsQuery() throws Exception { + WebResource r = resource(); + ClientResponse response = + r.path("ws").path("v1").path("applicationhistory").path("apps") + .queryParam("state", YarnApplicationState.FINISHED.toString()) + .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + JSONObject json = response.getEntity(JSONObject.class); + assertEquals("incorrect number of elements", 1, json.length()); + JSONObject apps = json.getJSONObject("apps"); + assertEquals("incorrect number of elements", 1, apps.length()); + JSONArray array = apps.getJSONArray("app"); + assertEquals("incorrect number of elements", 5, array.length()); + } + + @Test + public void testSingleApp() throws Exception { + ApplicationId appId = ApplicationId.newInstance(0, 1); + WebResource r = resource(); + ClientResponse response = + r.path("ws").path("v1").path("applicationhistory").path("apps") + .path(appId.toString()).accept(MediaType.APPLICATION_JSON) + .get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + JSONObject json = response.getEntity(JSONObject.class); + assertEquals("incorrect number of elements", 1, json.length()); + JSONObject app = json.getJSONObject("app"); + assertEquals(appId.toString(), app.getString("appId")); + assertEquals(appId.toString(), app.get("name")); + assertEquals(appId.toString(), app.get("diagnosticsInfo")); + assertEquals("test queue", app.get("queue")); + assertEquals("test user", app.get("user")); + assertEquals("test type", app.get("type")); + assertEquals(FinalApplicationStatus.UNDEFINED.toString(), + app.get("finalAppStatus")); + assertEquals(YarnApplicationState.FINISHED.toString(), app.get("appState")); + } + + @Test + public void testMultipleAttempts() throws Exception { + ApplicationId appId = ApplicationId.newInstance(0, 1); + WebResource r = resource(); + ClientResponse response = + r.path("ws").path("v1").path("applicationhistory").path("apps") + .path(appId.toString()).path("appattempts") + .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + JSONObject json = response.getEntity(JSONObject.class); + assertEquals("incorrect number of elements", 1, json.length()); + JSONObject appAttempts = json.getJSONObject("appAttempts"); + assertEquals("incorrect number of elements", 1, appAttempts.length()); + JSONArray array = appAttempts.getJSONArray("appAttempt"); + assertEquals("incorrect number of elements", 5, array.length()); + } + + @Test + public void testSingleAttempt() throws Exception { + ApplicationId appId = ApplicationId.newInstance(0, 1); + ApplicationAttemptId appAttemptId = + ApplicationAttemptId.newInstance(appId, 1); + WebResource r = resource(); + ClientResponse response = + r.path("ws").path("v1").path("applicationhistory").path("apps") + .path(appId.toString()).path("appattempts") + .path(appAttemptId.toString()).accept(MediaType.APPLICATION_JSON) + .get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + JSONObject json = response.getEntity(JSONObject.class); + assertEquals("incorrect number of elements", 1, json.length()); + JSONObject appAttempt = json.getJSONObject("appAttempt"); + assertEquals(appAttemptId.toString(), appAttempt.getString("appAttemptId")); + assertEquals(appAttemptId.toString(), appAttempt.getString("host")); + assertEquals(appAttemptId.toString(), + appAttempt.getString("diagnosticsInfo")); + assertEquals("test tracking url", appAttempt.getString("trackingUrl")); + assertEquals(YarnApplicationAttemptState.FINISHED.toString(), + appAttempt.get("appAttemptState")); + } + + @Test + public void testMultipleContainers() throws Exception { + ApplicationId appId = ApplicationId.newInstance(0, 1); + ApplicationAttemptId appAttemptId = + ApplicationAttemptId.newInstance(appId, 1); + WebResource r = resource(); + ClientResponse response = + r.path("ws").path("v1").path("applicationhistory").path("apps") + .path(appId.toString()).path("appattempts") + .path(appAttemptId.toString()).path("containers") + .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + JSONObject json = response.getEntity(JSONObject.class); + assertEquals("incorrect number of elements", 1, json.length()); + JSONObject containers = json.getJSONObject("containers"); + assertEquals("incorrect number of elements", 1, containers.length()); + JSONArray array = containers.getJSONArray("container"); + assertEquals("incorrect number of elements", 5, array.length()); + } + + @Test + public void testSingleContainer() throws Exception { + ApplicationId appId = ApplicationId.newInstance(0, 1); + ApplicationAttemptId appAttemptId = + ApplicationAttemptId.newInstance(appId, 1); + ContainerId containerId = ContainerId.newInstance(appAttemptId, 1); + WebResource r = resource(); + ClientResponse response = + r.path("ws").path("v1").path("applicationhistory").path("apps") + .path(appId.toString()).path("appattempts") + .path(appAttemptId.toString()).path("containers") + .path(containerId.toString()).accept(MediaType.APPLICATION_JSON) + .get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + JSONObject json = response.getEntity(JSONObject.class); + assertEquals("incorrect number of elements", 1, json.length()); + JSONObject container = json.getJSONObject("container"); + assertEquals(containerId.toString(), container.getString("containerId")); + assertEquals(containerId.toString(), container.getString("diagnosticsInfo")); + assertEquals("0", container.getString("allocatedMB")); + assertEquals("0", container.getString("allocatedVCores")); + assertEquals(NodeId.newInstance("localhost", 0).toString(), + container.getString("assignedNodeId")); + assertEquals(Priority.newInstance(containerId.getId()).toString(), + container.getString("priority")); + assertEquals("http://localhost:0/log", container.getString("logUrl")); + assertEquals(ContainerState.COMPLETE.toString(), + container.getString("containerState")); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ApplicationContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ApplicationContext.java new file mode 100644 index 00000000000..78ae0dd935a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ApplicationContext.java @@ -0,0 +1,129 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api; + +import java.io.IOException; +import java.util.Map; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerReport; + +@Public +@Unstable +public interface ApplicationContext { + /** + * This method returns Application {@link ApplicationReport} for the specified + * {@link ApplicationId}. + * + * @param appId + * + * @return {@link ApplicationReport} for the ApplicationId. + * @throws IOException + */ + @Public + @Unstable + ApplicationReport getApplication(ApplicationId appId) throws IOException; + + /** + * This method returns all Application {@link ApplicationReport}s + * + * @return map of {@link ApplicationId} to {@link ApplicationReport}s. + * @throws IOException + */ + @Public + @Unstable + Map getAllApplications() throws IOException; + + /** + * Application can have multiple application attempts + * {@link ApplicationAttemptReport}. This method returns the all + * {@link ApplicationAttemptReport}s for the Application. + * + * @param appId + * + * @return all {@link ApplicationAttemptReport}s for the Application. + * @throws IOException + */ + @Public + @Unstable + Map getApplicationAttempts( + ApplicationId appId) throws IOException; + + /** + * This method returns {@link ApplicationAttemptReport} for specified + * {@link ApplicationId}. + * + * @param appAttemptId + * {@link ApplicationAttemptId} + * @return {@link ApplicationAttemptReport} for ApplicationAttemptId + * @throws IOException + */ + @Public + @Unstable + ApplicationAttemptReport getApplicationAttempt( + ApplicationAttemptId appAttemptId) throws IOException; + + /** + * This method returns {@link ContainerReport} for specified + * {@link ContainerId}. + * + * @param containerId + * {@link ContainerId} + * @return {@link ContainerReport} for ContainerId + * @throws IOException + */ + @Public + @Unstable + ContainerReport getContainer(ContainerId containerId) throws IOException; + + /** + * This method returns {@link ContainerReport} for specified + * {@link ApplicationAttemptId}. + * + * @param appAttemptId + * {@link ApplicationAttemptId} + * @return {@link ContainerReport} for ApplicationAttemptId + * @throws IOException + */ + @Public + @Unstable + ContainerReport getAMContainer(ApplicationAttemptId appAttemptId) + throws IOException; + + /** + * This method returns Map of {@link ContainerId} to {@link ContainerReport} + * for specified {@link ApplicationAttemptId}. + * + * @param appAttemptId + * {@link ApplicationAttemptId} + * @return Map of {@link ContainerId} to {@link ContainerReport} for + * ApplicationAttemptId + * @throws IOException + */ + @Public + @Unstable + Map getContainers( + ApplicationAttemptId appAttemptId) throws IOException; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java new file mode 100644 index 00000000000..4bde1a37749 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java @@ -0,0 +1,160 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.webapp; + +import static org.apache.hadoop.yarn.util.StringHelper.join; +import static org.apache.hadoop.yarn.util.StringHelper.getPartUrl; +import static org.apache.hadoop.yarn.webapp.YarnWebParams.APPLICATION_ATTEMPT_ID; + +import java.io.IOException; +import java.util.Collection; + +import org.apache.commons.lang.StringEscapeUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; +import org.apache.hadoop.yarn.api.records.ContainerReport; +import org.apache.hadoop.yarn.server.api.ApplicationContext; +import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo; +import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo; +import org.apache.hadoop.yarn.util.ConverterUtils; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; +import org.apache.hadoop.yarn.webapp.view.HtmlBlock; +import org.apache.hadoop.yarn.webapp.view.InfoBlock; + +import com.google.inject.Inject; + +public class AppAttemptBlock extends HtmlBlock { + + private static final Log LOG = LogFactory.getLog(AppAttemptBlock.class); + private final ApplicationContext appContext; + + @Inject + public AppAttemptBlock(ApplicationContext appContext) { + this.appContext = appContext; + } + + @Override + protected void render(Block html) { + String attemptid = $(APPLICATION_ATTEMPT_ID); + if (attemptid.isEmpty()) { + puts("Bad request: requires application attempt ID"); + return; + } + + ApplicationAttemptId appAttemptId = null; + try { + appAttemptId = ConverterUtils.toApplicationAttemptId(attemptid); + } catch (IllegalArgumentException e) { + puts("Invalid application attempt ID: " + attemptid); + return; + } + + ApplicationAttemptReport appAttemptReport; + try { + appAttemptReport = appContext.getApplicationAttempt(appAttemptId); + } catch (IOException e) { + String message = + "Failed to read the application attempt " + appAttemptId + "."; + LOG.error(message, e); + html.p()._(message)._(); + return; + } + if (appAttemptReport == null) { + puts("Application Attempt not found: " + attemptid); + return; + } + AppAttemptInfo appAttempt = new AppAttemptInfo(appAttemptReport); + + setTitle(join("Application Attempt ", attemptid)); + + String node = "N/A"; + if (appAttempt.getHost() != null && appAttempt.getRpcPort() >= 0 + && appAttempt.getRpcPort() < 65536) { + node = appAttempt.getHost() + ":" + appAttempt.getRpcPort(); + } + info("Application Attempt Overview") + ._("State", appAttempt.getAppAttemptState()) + ._( + "Master Container", + appAttempt.getAmContainerId() == null ? "#" : root_url("container", + appAttempt.getAmContainerId()), + String.valueOf(appAttempt.getAmContainerId())) + ._("Node:", node) + ._( + "Tracking URL:", + appAttempt.getTrackingUrl() == null ? "#" : root_url(appAttempt + .getTrackingUrl()), "History") + ._("Diagnostics Info:", appAttempt.getDiagnosticsInfo()); + + html._(InfoBlock.class); + + Collection containers; + try { + containers = appContext.getContainers(appAttemptId).values(); + } catch (IOException e) { + html + .p() + ._( + "Sorry, Failed to get containers for application attempt" + attemptid + + ".")._(); + return; + } + + // Container Table + TBODY> tbody = + html.table("#containers").thead().tr().th(".id", "Container ID") + .th(".node", "Node").th(".exitstatus", "Container Exit Status") + .th(".logs", "Logs")._()._().tbody(); + + StringBuilder containersTableData = new StringBuilder("[\n"); + for (ContainerReport containerReport : containers) { + String logURL = containerReport.getLogUrl(); + logURL = getPartUrl(logURL, "log"); + ContainerInfo container = new ContainerInfo(containerReport); + // ConatinerID numerical value parsed by parseHadoopID in + // yarn.dt.plugins.js + containersTableData + .append("[\"") + .append(container.getContainerId()) + .append("\",\"") + .append( + StringEscapeUtils.escapeJavaScript(StringEscapeUtils + .escapeHtml(container.getAssignedNodeId()))).append("\",\"") + .append(container.getContainerExitStatus()).append("\",\"") + .append(logURL == null ? "N/A" : "Logs").append("\"],\n"); + } + if (containersTableData.charAt(containersTableData.length() - 2) == ',') { + containersTableData.delete(containersTableData.length() - 2, + containersTableData.length() - 1); + } + containersTableData.append("]"); + html.script().$type("text/javascript") + ._("var containersTableData=" + containersTableData)._(); + + tbody._()._(); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java new file mode 100644 index 00000000000..9a15fe78369 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java @@ -0,0 +1,185 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.webapp; + +import static org.apache.hadoop.yarn.util.StringHelper.join; +import static org.apache.hadoop.yarn.util.StringHelper.getPartUrl; +import static org.apache.hadoop.yarn.webapp.YarnWebParams.APPLICATION_ID; + +import java.io.IOException; +import java.util.Collection; + +import org.apache.commons.lang.StringEscapeUtils; +import org.apache.hadoop.http.HttpConfig; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ContainerReport; +import org.apache.hadoop.yarn.server.api.ApplicationContext; +import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo; +import org.apache.hadoop.yarn.server.webapp.dao.AppInfo; +import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo; +import org.apache.hadoop.yarn.util.Apps; +import org.apache.hadoop.yarn.util.Times; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; +import org.apache.hadoop.yarn.webapp.view.HtmlBlock; +import org.apache.hadoop.yarn.webapp.view.InfoBlock; + +import com.google.inject.Inject; + +public class AppBlock extends HtmlBlock { + + protected ApplicationContext appContext; + + @Inject + AppBlock(ApplicationContext appContext, ViewContext ctx) { + super(ctx); + this.appContext = appContext; + } + + @Override + protected void render(Block html) { + String aid = $(APPLICATION_ID); + if (aid.isEmpty()) { + puts("Bad request: requires Application ID"); + return; + } + + ApplicationId appID = null; + try { + appID = Apps.toAppID(aid); + } catch (Exception e) { + puts("Invalid Application ID: " + aid); + return; + } + + ApplicationReport appReport; + try { + appReport = appContext.getApplication(appID); + } catch (IOException e) { + String message = "Failed to read the application " + appID + "."; + LOG.error(message, e); + html.p()._(message)._(); + return; + } + if (appReport == null) { + puts("Application not found: " + aid); + return; + } + AppInfo app = new AppInfo(appReport); + + setTitle(join("Application ", aid)); + + info("Application Overview") + ._("User:", app.getUser()) + ._("Name:", app.getName()) + ._("Application Type:", app.getType()) + ._("State:", app.getAppState()) + ._("FinalStatus:", app.getFinalAppStatus()) + ._("Started:", Times.format(app.getStartedTime())) + ._( + "Elapsed:", + StringUtils.formatTime(Times.elapsed(app.getStartedTime(), + app.getFinishedTime()))) + ._("Tracking URL:", + app.getTrackingUrl() == null ? "#" : root_url(app.getTrackingUrl()), + "History")._("Diagnostics:", app.getDiagnosticsInfo()); + + html._(InfoBlock.class); + + Collection attempts; + try { + attempts = appContext.getApplicationAttempts(appID).values(); + } catch (IOException e) { + String message = + "Failed to read the attempts of the application " + appID + "."; + LOG.error(message, e); + html.p()._(message)._(); + return; + } + + // Application Attempt Table + TBODY> tbody = + html.table("#attempts").thead().tr().th(".id", "Attempt ID") + .th(".started", "Started").th(".node", "Node").th(".logs", "Logs") + ._()._().tbody(); + + StringBuilder attemptsTableData = new StringBuilder("[\n"); + for (ApplicationAttemptReport appAttemptReport : attempts) { + AppAttemptInfo appAttempt = new AppAttemptInfo(appAttemptReport); + ContainerReport containerReport; + try { + containerReport = + appContext.getAMContainer(appAttemptReport + .getApplicationAttemptId()); + } catch (IOException e) { + String message = + "Failed to read the AM container of the application attempt " + + appAttemptReport.getApplicationAttemptId() + "."; + LOG.error(message, e); + html.p()._(message)._(); + return; + } + long startTime = Long.MAX_VALUE; + String logsLink = null; + if (containerReport != null) { + ContainerInfo container = new ContainerInfo(containerReport); + startTime = container.getStartedTime(); + logsLink = containerReport.getLogUrl(); + logsLink = getPartUrl(logsLink, "log"); + } + String nodeLink = null; + if (appAttempt.getHost() != null && appAttempt.getRpcPort() >= 0 + && appAttempt.getRpcPort() < 65536) { + nodeLink = appAttempt.getHost() + ":" + appAttempt.getRpcPort(); + } + // AppAttemptID numerical value parsed by parseHadoopID in + // yarn.dt.plugins.js + attemptsTableData + .append("[\"") + .append(appAttempt.getAppAttemptId()) + .append("\",\"") + .append(startTime) + .append("\",\"") + .append( + nodeLink == null ? "N/A" : StringEscapeUtils + .escapeJavaScript(StringEscapeUtils.escapeHtml(nodeLink))) + .append("\",\"") + .append(nodeLink == null ? "N/A" : "Logs").append("\"],\n"); + } + if (attemptsTableData.charAt(attemptsTableData.length() - 2) == ',') { + attemptsTableData.delete(attemptsTableData.length() - 2, + attemptsTableData.length() - 1); + } + attemptsTableData.append("]"); + html.script().$type("text/javascript") + ._("var attemptsTableData=" + attemptsTableData)._(); + + tbody._()._(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java new file mode 100644 index 00000000000..d4a77a8af6c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java @@ -0,0 +1,142 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.webapp; + +import static org.apache.hadoop.yarn.util.StringHelper.join; +import static org.apache.hadoop.yarn.webapp.YarnWebParams.APP_STATE; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI.C_PROGRESSBAR; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI.C_PROGRESSBAR_VALUE; + +import java.io.IOException; +import java.util.Collection; +import java.util.HashSet; + +import org.apache.commons.lang.StringEscapeUtils; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.server.api.ApplicationContext; +import org.apache.hadoop.yarn.server.webapp.dao.AppInfo; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; +import org.apache.hadoop.yarn.webapp.view.HtmlBlock; + +import com.google.inject.Inject; + +public class AppsBlock extends HtmlBlock { + + protected ApplicationContext appContext; + + @Inject + AppsBlock(ApplicationContext appContext, ViewContext ctx) { + super(ctx); + this.appContext = appContext; + } + + @Override + public void render(Block html) { + setTitle("Applications"); + + TBODY> tbody = + html.table("#apps").thead().tr().th(".id", "ID").th(".user", "User") + .th(".name", "Name").th(".type", "Application Type") + .th(".queue", "Queue").th(".starttime", "StartTime") + .th(".finishtime", "FinishTime").th(".state", "State") + .th(".finalstatus", "FinalStatus").th(".progress", "Progress") + .th(".ui", "Tracking UI")._()._().tbody(); + Collection reqAppStates = null; + String reqStateString = $(APP_STATE); + if (reqStateString != null && !reqStateString.isEmpty()) { + String[] appStateStrings = reqStateString.split(","); + reqAppStates = new HashSet(appStateStrings.length); + for (String stateString : appStateStrings) { + reqAppStates.add(YarnApplicationState.valueOf(stateString)); + } + } + + Collection appReports; + try { + appReports = appContext.getAllApplications().values(); + } catch (IOException e) { + String message = "Failed to read the applications."; + LOG.error(message, e); + html.p()._(message)._(); + return; + } + StringBuilder appsTableData = new StringBuilder("[\n"); + for (ApplicationReport appReport : appReports) { + if (reqAppStates != null + && !reqAppStates.contains(appReport.getYarnApplicationState())) { + continue; + } + AppInfo app = new AppInfo(appReport); + String percent = String.format("%.1f", app.getProgress()); + // AppID numerical value parsed by parseHadoopID in yarn.dt.plugins.js + appsTableData + .append("[\"") + .append(app.getAppId()) + .append("\",\"") + .append( + StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(app + .getUser()))) + .append("\",\"") + .append( + StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(app + .getName()))) + .append("\",\"") + .append( + StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(app + .getType()))) + .append("\",\"") + .append( + StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(app + .getQueue()))).append("\",\"").append(app.getStartedTime()) + .append("\",\"").append(app.getFinishedTime()) + .append("\",\"") + .append(app.getAppState()) + .append("\",\"") + .append(app.getFinalAppStatus()) + .append("\",\"") + // Progress bar + .append("
").append("
").append("\",\"").append("History") + .append("\"],\n"); + + } + if (appsTableData.charAt(appsTableData.length() - 2) == ',') { + appsTableData.delete(appsTableData.length() - 2, + appsTableData.length() - 1); + } + appsTableData.append("]"); + html.script().$type("text/javascript") + ._("var appsTableData=" + appsTableData)._(); + + tbody._()._(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java new file mode 100644 index 00000000000..0b1d27defcf --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java @@ -0,0 +1,105 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.webapp; + +import static org.apache.hadoop.yarn.util.StringHelper.join; +import static org.apache.hadoop.yarn.util.StringHelper.getPartUrl; +import static org.apache.hadoop.yarn.webapp.YarnWebParams.CONTAINER_ID; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerReport; +import org.apache.hadoop.yarn.server.api.ApplicationContext; +import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo; +import org.apache.hadoop.yarn.util.ConverterUtils; +import org.apache.hadoop.yarn.util.Times; +import org.apache.hadoop.yarn.webapp.view.HtmlBlock; +import org.apache.hadoop.yarn.webapp.view.InfoBlock; + +import com.google.inject.Inject; + +public class ContainerBlock extends HtmlBlock { + + private static final Log LOG = LogFactory.getLog(ContainerBlock.class); + private final ApplicationContext appContext; + + @Inject + public ContainerBlock(ApplicationContext appContext, ViewContext ctx) { + super(ctx); + this.appContext = appContext; + } + + @Override + protected void render(Block html) { + String containerid = $(CONTAINER_ID); + if (containerid.isEmpty()) { + puts("Bad request: requires container ID"); + return; + } + + ContainerId containerId = null; + try { + containerId = ConverterUtils.toContainerId(containerid); + } catch (IllegalArgumentException e) { + puts("Invalid container ID: " + containerid); + return; + } + + ContainerReport containerReport; + try { + containerReport = appContext.getContainer(containerId); + } catch (IOException e) { + String message = "Failed to read the container " + containerid + "."; + LOG.error(message, e); + html.p()._(message)._(); + return; + } + if (containerReport == null) { + puts("Container not found: " + containerid); + return; + } + + ContainerInfo container = new ContainerInfo(containerReport); + String logURL = containerReport.getLogUrl(); + logURL = getPartUrl(logURL, "log"); + setTitle(join("Container ", containerid)); + + info("Container Overview") + ._("State:", container.getContainerState()) + ._("Exit Status:", container.getContainerExitStatus()) + ._("Node:", container.getAssignedNodeId()) + ._("Priority:", container.getPriority()) + ._("Started:", Times.format(container.getStartedTime())) + ._( + "Elapsed:", + StringUtils.formatTime(Times.elapsed(container.getStartedTime(), + container.getFinishedTime()))) + ._( + "Resource:", + container.getAllocatedMB() + " Memory, " + + container.getAllocatedVCores() + " VCores") + ._("Logs:", logURL == null ? "#" : url(logURL), "Logs") + ._("Diagnostics:", container.getDiagnosticsInfo()); + + html._(InfoBlock.class); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java new file mode 100644 index 00000000000..5a825514704 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java @@ -0,0 +1,367 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.webapp; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.Set; + +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import javax.ws.rs.WebApplicationException; + +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerReport; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.server.api.ApplicationContext; +import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo; +import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo; +import org.apache.hadoop.yarn.server.webapp.dao.AppInfo; +import org.apache.hadoop.yarn.server.webapp.dao.AppsInfo; +import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo; +import org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo; +import org.apache.hadoop.yarn.util.ConverterUtils; +import org.apache.hadoop.yarn.webapp.BadRequestException; +import org.apache.hadoop.yarn.webapp.NotFoundException; + +public class WebServices { + + protected ApplicationContext appContext; + + public WebServices(ApplicationContext appContext) { + this.appContext = appContext; + } + + public AppsInfo getApps(HttpServletRequest req, HttpServletResponse res, + String stateQuery, Set statesQuery, String finalStatusQuery, + String userQuery, String queueQuery, String count, String startedBegin, + String startedEnd, String finishBegin, String finishEnd, + Set applicationTypes) { + long num = 0; + boolean checkCount = false; + boolean checkStart = false; + boolean checkEnd = false; + boolean checkAppTypes = false; + boolean checkAppStates = false; + long countNum = 0; + + // set values suitable in case both of begin/end not specified + long sBegin = 0; + long sEnd = Long.MAX_VALUE; + long fBegin = 0; + long fEnd = Long.MAX_VALUE; + + if (count != null && !count.isEmpty()) { + checkCount = true; + countNum = Long.parseLong(count); + if (countNum <= 0) { + throw new BadRequestException("limit value must be greater then 0"); + } + } + + if (startedBegin != null && !startedBegin.isEmpty()) { + checkStart = true; + sBegin = Long.parseLong(startedBegin); + if (sBegin < 0) { + throw new BadRequestException("startedTimeBegin must be greater than 0"); + } + } + if (startedEnd != null && !startedEnd.isEmpty()) { + checkStart = true; + sEnd = Long.parseLong(startedEnd); + if (sEnd < 0) { + throw new BadRequestException("startedTimeEnd must be greater than 0"); + } + } + if (sBegin > sEnd) { + throw new BadRequestException( + "startedTimeEnd must be greater than startTimeBegin"); + } + + if (finishBegin != null && !finishBegin.isEmpty()) { + checkEnd = true; + fBegin = Long.parseLong(finishBegin); + if (fBegin < 0) { + throw new BadRequestException("finishTimeBegin must be greater than 0"); + } + } + if (finishEnd != null && !finishEnd.isEmpty()) { + checkEnd = true; + fEnd = Long.parseLong(finishEnd); + if (fEnd < 0) { + throw new BadRequestException("finishTimeEnd must be greater than 0"); + } + } + if (fBegin > fEnd) { + throw new BadRequestException( + "finishTimeEnd must be greater than finishTimeBegin"); + } + + Set appTypes = parseQueries(applicationTypes, false); + if (!appTypes.isEmpty()) { + checkAppTypes = true; + } + + // stateQuery is deprecated. + if (stateQuery != null && !stateQuery.isEmpty()) { + statesQuery.add(stateQuery); + } + Set appStates = parseQueries(statesQuery, true); + if (!appStates.isEmpty()) { + checkAppStates = true; + } + + AppsInfo allApps = new AppsInfo(); + Collection appReports = null; + try { + appReports = appContext.getAllApplications().values(); + } catch (IOException e) { + throw new WebApplicationException(e); + } + for (ApplicationReport appReport : appReports) { + + if (checkCount && num == countNum) { + break; + } + + if (checkAppStates + && !appStates.contains(appReport.getYarnApplicationState().toString() + .toLowerCase())) { + continue; + } + if (finalStatusQuery != null && !finalStatusQuery.isEmpty()) { + FinalApplicationStatus.valueOf(finalStatusQuery); + if (!appReport.getFinalApplicationStatus().toString() + .equalsIgnoreCase(finalStatusQuery)) { + continue; + } + } + if (userQuery != null && !userQuery.isEmpty()) { + if (!appReport.getUser().equals(userQuery)) { + continue; + } + } + if (queueQuery != null && !queueQuery.isEmpty()) { + if (!appReport.getQueue().equals(queueQuery)) { + continue; + } + } + if (checkAppTypes + && !appTypes.contains(appReport.getApplicationType().trim() + .toLowerCase())) { + continue; + } + + if (checkStart + && (appReport.getStartTime() < sBegin || appReport.getStartTime() > sEnd)) { + continue; + } + if (checkEnd + && (appReport.getFinishTime() < fBegin || appReport.getFinishTime() > fEnd)) { + continue; + } + AppInfo app = new AppInfo(appReport); + + allApps.add(app); + num++; + } + return allApps; + } + + public AppInfo getApp(HttpServletRequest req, HttpServletResponse res, + String appId) { + ApplicationId id = parseApplicationId(appId); + ApplicationReport app = null; + try { + app = appContext.getApplication(id); + } catch (IOException e) { + throw new WebApplicationException(e); + } + if (app == null) { + throw new NotFoundException("app with id: " + appId + " not found"); + } + return new AppInfo(app); + } + + public AppAttemptsInfo getAppAttempts(HttpServletRequest req, + HttpServletResponse res, String appId) { + ApplicationId id = parseApplicationId(appId); + Collection appAttemptReports = null; + try { + appAttemptReports = appContext.getApplicationAttempts(id).values(); + } catch (IOException e) { + throw new WebApplicationException(e); + } + AppAttemptsInfo appAttemptsInfo = new AppAttemptsInfo(); + for (ApplicationAttemptReport appAttemptReport : appAttemptReports) { + AppAttemptInfo appAttemptInfo = new AppAttemptInfo(appAttemptReport); + appAttemptsInfo.add(appAttemptInfo); + } + + return appAttemptsInfo; + } + + public AppAttemptInfo getAppAttempt(HttpServletRequest req, + HttpServletResponse res, String appId, String appAttemptId) { + ApplicationId aid = parseApplicationId(appId); + ApplicationAttemptId aaid = parseApplicationAttemptId(appAttemptId); + validateIds(aid, aaid, null); + ApplicationAttemptReport appAttempt = null; + try { + appAttempt = appContext.getApplicationAttempt(aaid); + } catch (IOException e) { + throw new WebApplicationException(e); + } + if (appAttempt == null) { + throw new NotFoundException("app attempt with id: " + appAttemptId + + " not found"); + } + return new AppAttemptInfo(appAttempt); + } + + public ContainersInfo getContainers(HttpServletRequest req, + HttpServletResponse res, String appId, String appAttemptId) { + ApplicationId aid = parseApplicationId(appId); + ApplicationAttemptId aaid = parseApplicationAttemptId(appAttemptId); + validateIds(aid, aaid, null); + Collection containerReports = null; + try { + containerReports = appContext.getContainers(aaid).values(); + } catch (IOException e) { + throw new WebApplicationException(e); + } + ContainersInfo containersInfo = new ContainersInfo(); + for (ContainerReport containerReport : containerReports) { + ContainerInfo containerInfo = new ContainerInfo(containerReport); + containersInfo.add(containerInfo); + } + return containersInfo; + } + + public ContainerInfo getContainer(HttpServletRequest req, + HttpServletResponse res, String appId, String appAttemptId, + String containerId) { + ApplicationId aid = parseApplicationId(appId); + ApplicationAttemptId aaid = parseApplicationAttemptId(appAttemptId); + ContainerId cid = parseContainerId(containerId); + validateIds(aid, aaid, cid); + ContainerReport container = null; + try { + container = appContext.getContainer(cid); + } catch (IOException e) { + throw new WebApplicationException(e); + } + if (container == null) { + throw new NotFoundException("container with id: " + containerId + + " not found"); + } + return new ContainerInfo(container); + } + + protected void init(HttpServletResponse response) { + // clear content type + response.setContentType(null); + } + + protected static Set + parseQueries(Set queries, boolean isState) { + Set params = new HashSet(); + if (!queries.isEmpty()) { + for (String query : queries) { + if (query != null && !query.trim().isEmpty()) { + String[] paramStrs = query.split(","); + for (String paramStr : paramStrs) { + if (paramStr != null && !paramStr.trim().isEmpty()) { + if (isState) { + try { + // enum string is in the uppercase + YarnApplicationState.valueOf(paramStr.trim().toUpperCase()); + } catch (RuntimeException e) { + YarnApplicationState[] stateArray = + YarnApplicationState.values(); + String allAppStates = Arrays.toString(stateArray); + throw new BadRequestException("Invalid application-state " + + paramStr.trim() + " specified. It should be one of " + + allAppStates); + } + } + params.add(paramStr.trim().toLowerCase()); + } + } + } + } + } + return params; + } + + protected static ApplicationId parseApplicationId(String appId) { + if (appId == null || appId.isEmpty()) { + throw new NotFoundException("appId, " + appId + ", is empty or null"); + } + ApplicationId aid = ConverterUtils.toApplicationId(appId); + if (aid == null) { + throw new NotFoundException("appId is null"); + } + return aid; + } + + protected static ApplicationAttemptId parseApplicationAttemptId( + String appAttemptId) { + if (appAttemptId == null || appAttemptId.isEmpty()) { + throw new NotFoundException("appAttemptId, " + appAttemptId + + ", is empty or null"); + } + ApplicationAttemptId aaid = + ConverterUtils.toApplicationAttemptId(appAttemptId); + if (aaid == null) { + throw new NotFoundException("appAttemptId is null"); + } + return aaid; + } + + protected static ContainerId parseContainerId(String containerId) { + if (containerId == null || containerId.isEmpty()) { + throw new NotFoundException("containerId, " + containerId + + ", is empty or null"); + } + ContainerId cid = ConverterUtils.toContainerId(containerId); + if (cid == null) { + throw new NotFoundException("containerId is null"); + } + return cid; + } + + protected void validateIds(ApplicationId appId, + ApplicationAttemptId appAttemptId, ContainerId containerId) { + if (!appAttemptId.getApplicationId().equals(appId)) { + throw new NotFoundException("appId and appAttemptId don't match"); + } + if (containerId != null + && !containerId.getApplicationAttemptId().equals(appAttemptId)) { + throw new NotFoundException("appAttemptId and containerId don't match"); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppAttemptInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppAttemptInfo.java new file mode 100644 index 00000000000..014ae8b70d1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppAttemptInfo.java @@ -0,0 +1,84 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.webapp.dao; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; +import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; + +@XmlRootElement(name = "appAttempt") +@XmlAccessorType(XmlAccessType.FIELD) +public class AppAttemptInfo { + + protected String appAttemptId; + protected String host; + protected int rpcPort; + protected String trackingUrl; + protected String diagnosticsInfo; + protected YarnApplicationAttemptState appAttemptState; + protected String amContainerId; + + public AppAttemptInfo() { + // JAXB needs this + } + + public AppAttemptInfo(ApplicationAttemptReport appAttempt) { + appAttemptId = appAttempt.getApplicationAttemptId().toString(); + host = appAttempt.getHost(); + rpcPort = appAttempt.getRpcPort(); + trackingUrl = appAttempt.getTrackingUrl(); + diagnosticsInfo = appAttempt.getDiagnostics(); + appAttemptState = appAttempt.getYarnApplicationAttemptState(); + if (appAttempt.getAMContainerId() != null) { + amContainerId = appAttempt.getAMContainerId().toString(); + } + } + + public String getAppAttemptId() { + return appAttemptId; + } + + public String getHost() { + return host; + } + + public int getRpcPort() { + return rpcPort; + } + + public String getTrackingUrl() { + return trackingUrl; + } + + public String getDiagnosticsInfo() { + return diagnosticsInfo; + } + + public YarnApplicationAttemptState getAppAttemptState() { + return appAttemptState; + } + + public String getAmContainerId() { + return amContainerId; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppAttemptsInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppAttemptsInfo.java new file mode 100644 index 00000000000..d62bbe7cf4e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppAttemptsInfo.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by joblicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.webapp.dao; + +import java.util.ArrayList; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement(name = "appAttempts") +@XmlAccessorType(XmlAccessType.FIELD) +public class AppAttemptsInfo { + + @XmlElement(name = "appAttempt") + protected ArrayList attempt = new ArrayList(); + + public AppAttemptsInfo() { + // JAXB needs this + } + + public void add(AppAttemptInfo info) { + this.attempt.add(info); + } + + public ArrayList getAttempts() { + return this.attempt; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java new file mode 100644 index 00000000000..aedf0d31609 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java @@ -0,0 +1,169 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.webapp.dao; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.util.Times; + +@XmlRootElement(name = "app") +@XmlAccessorType(XmlAccessType.FIELD) +public class AppInfo { + + protected String appId; + protected String currentAppAttemptId; + protected String user; + protected String name; + protected String queue; + protected String type; + protected String host; + protected int rpcPort; + protected YarnApplicationState appState; + protected float progress; + protected String diagnosticsInfo; + protected String originalTrackingUrl; + protected String trackingUrl; + protected FinalApplicationStatus finalAppStatus; + protected long submittedTime; + protected long startedTime; + protected long finishedTime; + protected long elapsedTime; + protected int allocatedMB; + protected int allocatedVCores; + + public AppInfo() { + // JAXB needs this + } + + public AppInfo(ApplicationReport app) { + appId = app.getApplicationId().toString(); + if (app.getCurrentApplicationAttemptId() != null) { + currentAppAttemptId = app.getCurrentApplicationAttemptId().toString(); + } + user = app.getUser(); + queue = app.getQueue(); + name = app.getName(); + type = app.getApplicationType(); + host = app.getHost(); + rpcPort = app.getRpcPort(); + appState = app.getYarnApplicationState(); + diagnosticsInfo = app.getDiagnostics(); + trackingUrl = app.getTrackingUrl(); + originalTrackingUrl = app.getOriginalTrackingUrl(); + submittedTime = app.getStartTime(); + startedTime = app.getStartTime(); + finishedTime = app.getFinishTime(); + elapsedTime = Times.elapsed(startedTime, finishedTime); + finalAppStatus = app.getFinalApplicationStatus(); + ApplicationResourceUsageReport usage = + app.getApplicationResourceUsageReport(); + if (usage != null) { + allocatedMB = usage.getUsedResources().getMemory(); + allocatedVCores = usage.getUsedResources().getVirtualCores(); + } + progress = app.getProgress(); + } + + public String getAppId() { + return appId; + } + + public String getCurrentAppAttemptId() { + return currentAppAttemptId; + } + + public String getUser() { + return user; + } + + public String getName() { + return name; + } + + public String getQueue() { + return queue; + } + + public String getType() { + return type; + } + + public String getHost() { + return host; + } + + public int getRpcPort() { + return rpcPort; + } + + public YarnApplicationState getAppState() { + return appState; + } + + public float getProgress() { + return progress; + } + + public String getDiagnosticsInfo() { + return diagnosticsInfo; + } + + public String getOriginalTrackingUrl() { + return originalTrackingUrl; + } + + public String getTrackingUrl() { + return trackingUrl; + } + + public FinalApplicationStatus getFinalAppStatus() { + return finalAppStatus; + } + + public long getSubmittedTime() { + return submittedTime; + } + + public long getStartedTime() { + return startedTime; + } + + public long getFinishedTime() { + return finishedTime; + } + + public long getElapsedTime() { + return elapsedTime; + } + + public int getAllocatedMB() { + return allocatedMB; + } + + public int getAllocatedVCores() { + return allocatedVCores; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppsInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppsInfo.java new file mode 100644 index 00000000000..f98b2ea49e6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppsInfo.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.webapp.dao; + +import java.util.ArrayList; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement(name = "apps") +@XmlAccessorType(XmlAccessType.FIELD) +public class AppsInfo { + + protected ArrayList app = new ArrayList(); + + public AppsInfo() { + // JAXB needs this + } + + public void add(AppInfo appinfo) { + app.add(appinfo); + } + + public ArrayList getApps() { + return app; + } + +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java new file mode 100644 index 00000000000..2e4436e1466 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java @@ -0,0 +1,117 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.webapp.dao; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.hadoop.yarn.api.records.ContainerReport; +import org.apache.hadoop.yarn.api.records.ContainerState; +import org.apache.hadoop.yarn.util.Times; + +@XmlRootElement(name = "container") +@XmlAccessorType(XmlAccessType.FIELD) +public class ContainerInfo { + + protected String containerId; + protected int allocatedMB; + protected int allocatedVCores; + protected String assignedNodeId; + protected int priority; + protected long startedTime; + protected long finishedTime; + protected long elapsedTime; + protected String diagnosticsInfo; + protected String logUrl; + protected int containerExitStatus; + protected ContainerState containerState; + + public ContainerInfo() { + // JAXB needs this + } + + public ContainerInfo(ContainerReport container) { + containerId = container.getContainerId().toString(); + if (container.getAllocatedResource() != null) { + allocatedMB = container.getAllocatedResource().getMemory(); + allocatedVCores = container.getAllocatedResource().getVirtualCores(); + } + if (container.getAssignedNode() != null) { + assignedNodeId = container.getAssignedNode().toString(); + } + priority = container.getPriority().getPriority(); + startedTime = container.getStartTime(); + finishedTime = container.getFinishTime(); + elapsedTime = Times.elapsed(startedTime, finishedTime); + diagnosticsInfo = container.getDiagnosticsInfo(); + logUrl = container.getLogUrl(); + containerExitStatus = container.getContainerExitStatus(); + containerState = container.getContainerState(); + } + + public String getContainerId() { + return containerId; + } + + public int getAllocatedMB() { + return allocatedMB; + } + + public int getAllocatedVCores() { + return allocatedVCores; + } + + public String getAssignedNodeId() { + return assignedNodeId; + } + + public int getPriority() { + return priority; + } + + public long getStartedTime() { + return startedTime; + } + + public long getFinishedTime() { + return finishedTime; + } + + public long getElapsedTime() { + return elapsedTime; + } + + public String getDiagnosticsInfo() { + return diagnosticsInfo; + } + + public String getLogUrl() { + return logUrl; + } + + public int getContainerExitStatus() { + return containerExitStatus; + } + + public ContainerState getContainerState() { + return containerState; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainersInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainersInfo.java new file mode 100644 index 00000000000..49767e02324 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainersInfo.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.webapp.dao; + +import java.util.ArrayList; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement(name = "containers") +@XmlAccessorType(XmlAccessType.FIELD) +public class ContainersInfo { + + protected ArrayList container = new ArrayList(); + + public ContainersInfo() { + // JAXB needs this + } + + public void add(ContainerInfo containerInfo) { + container.add(containerInfo); + } + + public ArrayList getContainers() { + return container; + } + +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml index ddc57d03cca..dd23392e966 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml @@ -183,6 +183,11 @@ hadoop-yarn-server-common + + org.apache.hadoop + hadoop-yarn-server-applicationhistoryservice + ${project.version} + org.apache.hadoop hadoop-yarn-server-web-proxy diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java index 1ddb1b48a52..64a4165feb4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java @@ -24,6 +24,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.event.Dispatcher; +import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor; @@ -33,8 +34,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM; import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer; -import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.security.RMDelegationTokenSecretManager; /** @@ -90,4 +91,10 @@ public interface RMContext { void setRMDelegationTokenSecretManager( RMDelegationTokenSecretManager delegationTokenSecretManager); + + RMApplicationHistoryWriter getRMApplicationHistoryWriter(); + + void setRMApplicationHistoryWriter( + RMApplicationHistoryWriter rmApplicationHistoryWriter); + } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java index ec90b4a27d2..79e59831e9d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java @@ -27,6 +27,7 @@ import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.Dispatcher; +import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; import org.apache.hadoop.yarn.server.resourcemanager.recovery.NullRMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; @@ -76,6 +77,7 @@ public class RMContextImpl implements RMContext { private NodesListManager nodesListManager; private ResourceTrackerService resourceTrackerService; private ApplicationMasterService applicationMasterService; + private RMApplicationHistoryWriter rmApplicationHistoryWriter; /** * Default constructor. To be used in conjunction with setter methods for @@ -95,7 +97,8 @@ public RMContextImpl(Dispatcher rmDispatcher, AMRMTokenSecretManager appTokenSecretManager, RMContainerTokenSecretManager containerTokenSecretManager, NMTokenSecretManagerInRM nmTokenSecretManager, - ClientToAMTokenSecretManagerInRM clientToAMTokenSecretManager) { + ClientToAMTokenSecretManagerInRM clientToAMTokenSecretManager, + RMApplicationHistoryWriter rmApplicationHistoryWriter) { this(); this.setDispatcher(rmDispatcher); this.setContainerAllocationExpirer(containerAllocationExpirer); @@ -106,6 +109,7 @@ public RMContextImpl(Dispatcher rmDispatcher, this.setContainerTokenSecretManager(containerTokenSecretManager); this.setNMTokenSecretManager(nmTokenSecretManager); this.setClientToAMTokenSecretManager(clientToAMTokenSecretManager); + this.setRMApplicationHistoryWriter(rmApplicationHistoryWriter); RMStateStore nullStore = new NullRMStateStore(); nullStore.setRMDispatcher(rmDispatcher); @@ -318,4 +322,16 @@ public HAServiceState getHAServiceState() { return haServiceState; } } + + @Override + public RMApplicationHistoryWriter getRMApplicationHistoryWriter() { + return rmApplicationHistoryWriter; + } + + @Override + public void setRMApplicationHistoryWriter( + RMApplicationHistoryWriter rmApplicationHistoryWriter) { + this.rmApplicationHistoryWriter = rmApplicationHistoryWriter; + } + } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java index 8c734435a30..5556255352c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java @@ -33,9 +33,14 @@ import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest; import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.exceptions.InvalidContainerReleaseException; import org.apache.hadoop.yarn.exceptions.InvalidResourceBlacklistRequestException; import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; @@ -43,6 +48,7 @@ * Utility methods to aid serving RM data through the REST and RPC APIs */ public class RMServerUtils { + public static List queryRMNodes(RMContext context, EnumSet acceptedStates) { // nodes contains nodes that are NEW, RUNNING OR UNHEALTHY @@ -56,7 +62,7 @@ public static List queryRMNodes(RMContext context, } } } - + // inactiveNodes contains nodes that are DECOMMISSIONED, LOST, OR REBOOTED if (acceptedStates.contains(NodeState.DECOMMISSIONED) || acceptedStates.contains(NodeState.LOST) || @@ -69,7 +75,7 @@ public static List queryRMNodes(RMContext context, } return results; } - + /** * Utility method to validate a list resource requests, by insuring that the * requested memory/vcore is non-negative and not greater than max @@ -85,8 +91,9 @@ public static void validateResourceRequests(List ask, * @throw InvalidResourceBlacklistRequestException if the * resource is not able to be added to the blacklist. */ - public static void validateBlacklistRequest(ResourceBlacklistRequest blacklistRequest) - throws InvalidResourceBlacklistRequestException { + public static void validateBlacklistRequest( + ResourceBlacklistRequest blacklistRequest) + throws InvalidResourceBlacklistRequestException { if (blacklistRequest != null) { List plus = blacklistRequest.getBlacklistAdditions(); if (plus != null && plus.contains(ResourceRequest.ANY)) { @@ -100,10 +107,12 @@ public static void validateBlacklistRequest(ResourceBlacklistRequest blacklistRe * It will validate to make sure all the containers belong to correct * application attempt id. If not then it will throw * {@link InvalidContainerReleaseException} - * @param containerReleaseList containers to be released as requested by - * application master. - * @param appAttemptId Application attempt Id - * @throws InvalidContainerReleaseException + * + * @param containerReleaseList + * containers to be released as requested by application master. + * @param appAttemptId + * Application attempt Id + * @throws InvalidContainerReleaseException */ public static void validateContainerReleaseRequest(List containerReleaseList, @@ -111,9 +120,11 @@ public static void validateBlacklistRequest(ResourceBlacklistRequest blacklistRe throws InvalidContainerReleaseException { for (ContainerId cId : containerReleaseList) { if (!appAttemptId.equals(cId.getApplicationAttemptId())) { - throw new InvalidContainerReleaseException("Cannot release container : " - + cId.toString() + " not belonging to this application attempt : " - + appAttemptId); + throw new InvalidContainerReleaseException( + "Cannot release container : " + + cId.toString() + + " not belonging to this application attempt : " + + appAttemptId); } } } @@ -157,4 +168,63 @@ public static UserGroupInformation verifyAccess( } return user; } + + public static YarnApplicationState createApplicationState( + RMAppState rmAppState) { + switch (rmAppState) { + case NEW: + return YarnApplicationState.NEW; + case NEW_SAVING: + return YarnApplicationState.NEW_SAVING; + case SUBMITTED: + return YarnApplicationState.SUBMITTED; + case ACCEPTED: + return YarnApplicationState.ACCEPTED; + case RUNNING: + return YarnApplicationState.RUNNING; + case FINISHING: + case FINISHED: + return YarnApplicationState.FINISHED; + case KILLED: + return YarnApplicationState.KILLED; + case FAILED: + return YarnApplicationState.FAILED; + default: + throw new YarnRuntimeException("Unknown state passed!"); + } + } + + public static YarnApplicationAttemptState createApplicationAttemptState( + RMAppAttemptState rmAppAttemptState) { + switch (rmAppAttemptState) { + case NEW: + return YarnApplicationAttemptState.NEW; + case SUBMITTED: + return YarnApplicationAttemptState.SUBMITTED; + case SCHEDULED: + return YarnApplicationAttemptState.SCHEDULED; + case ALLOCATED: + return YarnApplicationAttemptState.ALLOCATED; + case LAUNCHED: + return YarnApplicationAttemptState.LAUNCHED; + case ALLOCATED_SAVING: + case LAUNCHED_UNMANAGED_SAVING: + return YarnApplicationAttemptState.ALLOCATED_SAVING; + case RUNNING: + return YarnApplicationAttemptState.RUNNING; + case FINISHING: + return YarnApplicationAttemptState.FINISHING; + case FINAL_SAVING: + return YarnApplicationAttemptState.FINAL_SAVING; + case FINISHED: + return YarnApplicationAttemptState.FINISHED; + case KILLED: + return YarnApplicationAttemptState.KILLED; + case FAILED: + return YarnApplicationAttemptState.FAILED; + default: + throw new YarnRuntimeException("Unknown state passed!"); + } + } + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index a98be13e3bb..8575cd57d65 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -52,6 +52,7 @@ import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher; import org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingEditPolicy; @@ -261,6 +262,10 @@ protected RMAppManager createRMAppManager() { this.applicationACLsManager, this.conf); } + protected RMApplicationHistoryWriter createRMApplicationHistoryWriter() { + return new RMApplicationHistoryWriter(); + } + // sanity check for configurations protected static void validateConfigs(Configuration conf) { // validate max-attempts @@ -345,6 +350,11 @@ protected void serviceInit(Configuration configuration) throws Exception { rmContext.setDelegationTokenRenewer(delegationTokenRenewer); } + RMApplicationHistoryWriter rmApplicationHistoryWriter = + createRMApplicationHistoryWriter(); + addService(rmApplicationHistoryWriter); + rmContext.setRMApplicationHistoryWriter(rmApplicationHistoryWriter); + // Register event handler for NodesListManager nodesListManager = new NodesListManager(rmContext); rmDispatcher.register(NodesListManagerEventType.class, nodesListManager); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/RMApplicationHistoryWriter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/RMApplicationHistoryWriter.java new file mode 100644 index 00000000000..ffc4a4fbdcf --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/RMApplicationHistoryWriter.java @@ -0,0 +1,345 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.ahs; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.service.CompositeService; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.event.AsyncDispatcher; +import org.apache.hadoop.yarn.event.Dispatcher; +import org.apache.hadoop.yarn.event.Event; +import org.apache.hadoop.yarn.event.EventHandler; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryStore; +import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryWriter; +import org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptFinishData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptStartData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationFinishData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationStartData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerFinishData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerStartData; +import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; +import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; + +/** + *

+ * {@link ResourceManager} uses this class to write the information of + * {@link RMApp}, {@link RMAppAttempt} and {@link RMContainer}. These APIs are + * non-blocking, and just schedule a writing history event. An self-contained + * dispatcher vector will handle the event in separate threads, and extract the + * required fields that are going to be persisted. Then, the extracted + * information will be persisted via the implementation of + * {@link ApplicationHistoryStore}. + *

+ */ +@Private +@Unstable +public class RMApplicationHistoryWriter extends CompositeService { + + public static final Log LOG = LogFactory + .getLog(RMApplicationHistoryWriter.class); + + private Dispatcher dispatcher; + private ApplicationHistoryWriter writer; + private boolean historyServiceEnabled; + + public RMApplicationHistoryWriter() { + super(RMApplicationHistoryWriter.class.getName()); + } + + @Override + protected synchronized void serviceInit(Configuration conf) throws Exception { + + historyServiceEnabled = + conf.getBoolean(YarnConfiguration.YARN_HISTORY_SERVICE_ENABLED, + YarnConfiguration.DEFAULT_YARN_HISTORY_SERVICE_ENABLED); + + writer = createApplicationHistoryStore(conf); + addIfService(writer); + + dispatcher = createDispatcher(conf); + dispatcher.register(WritingHistoryEventType.class, + new ForwardingEventHandler()); + addIfService(dispatcher); + super.serviceInit(conf); + } + + protected Dispatcher createDispatcher(Configuration conf) { + MultiThreadedDispatcher dispatcher = + new MultiThreadedDispatcher( + conf + .getInt( + YarnConfiguration.RM_HISTORY_WRITER_MULTI_THREADED_DISPATCHER_POOL_SIZE, + YarnConfiguration.DEFAULT_RM_HISTORY_WRITER_MULTI_THREADED_DISPATCHER_POOL_SIZE)); + dispatcher.setDrainEventsOnStop(); + return dispatcher; + } + + protected ApplicationHistoryStore createApplicationHistoryStore( + Configuration conf) { + // If the history writer is not enabled, a dummy store will be used to + // write nothing + if (historyServiceEnabled) { + try { + Class storeClass = + conf.getClass(YarnConfiguration.RM_HISTORY_WRITER_CLASS, + NullApplicationHistoryStore.class, ApplicationHistoryStore.class); + return storeClass.newInstance(); + } catch (Exception e) { + String msg = + "Could not instantiate ApplicationHistoryWriter: " + + conf.get(YarnConfiguration.RM_HISTORY_WRITER_CLASS, + NullApplicationHistoryStore.class.getName()); + LOG.error(msg, e); + throw new YarnRuntimeException(msg, e); + } + } else { + return new NullApplicationHistoryStore(); + } + } + + protected void handleWritingApplicationHistoryEvent( + WritingApplicationHistoryEvent event) { + switch (event.getType()) { + case APP_START: + WritingApplicationStartEvent wasEvent = + (WritingApplicationStartEvent) event; + try { + writer.applicationStarted(wasEvent.getApplicationStartData()); + LOG.info("Stored the start data of application " + + wasEvent.getApplicationId()); + } catch (IOException e) { + LOG.error("Error when storing the start data of application " + + wasEvent.getApplicationId()); + } + break; + case APP_FINISH: + WritingApplicationFinishEvent wafEvent = + (WritingApplicationFinishEvent) event; + try { + writer.applicationFinished(wafEvent.getApplicationFinishData()); + LOG.info("Stored the finish data of application " + + wafEvent.getApplicationId()); + } catch (IOException e) { + LOG.error("Error when storing the finish data of application " + + wafEvent.getApplicationId()); + } + break; + case APP_ATTEMPT_START: + WritingApplicationAttemptStartEvent waasEvent = + (WritingApplicationAttemptStartEvent) event; + try { + writer.applicationAttemptStarted(waasEvent + .getApplicationAttemptStartData()); + LOG.info("Stored the start data of application attempt " + + waasEvent.getApplicationAttemptId()); + } catch (IOException e) { + LOG.error("Error when storing the start data of application attempt " + + waasEvent.getApplicationAttemptId()); + } + break; + case APP_ATTEMPT_FINISH: + WritingApplicationAttemptFinishEvent waafEvent = + (WritingApplicationAttemptFinishEvent) event; + try { + writer.applicationAttemptFinished(waafEvent + .getApplicationAttemptFinishData()); + LOG.info("Stored the finish data of application attempt " + + waafEvent.getApplicationAttemptId()); + } catch (IOException e) { + LOG + .error("Error when storing the finish data of application attempt " + + waafEvent.getApplicationAttemptId()); + } + break; + case CONTAINER_START: + WritingContainerStartEvent wcsEvent = + (WritingContainerStartEvent) event; + try { + writer.containerStarted(wcsEvent.getContainerStartData()); + LOG.info("Stored the start data of container " + + wcsEvent.getContainerId()); + } catch (IOException e) { + LOG.error("Error when storing the start data of container " + + wcsEvent.getContainerId()); + } + break; + case CONTAINER_FINISH: + WritingContainerFinishEvent wcfEvent = + (WritingContainerFinishEvent) event; + try { + writer.containerFinished(wcfEvent.getContainerFinishData()); + LOG.info("Stored the finish data of container " + + wcfEvent.getContainerId()); + } catch (IOException e) { + LOG.error("Error when storing the finish data of container " + + wcfEvent.getContainerId()); + } + break; + default: + LOG.error("Unknown WritingApplicationHistoryEvent type: " + + event.getType()); + } + } + + @SuppressWarnings("unchecked") + public void applicationStarted(RMApp app) { + dispatcher.getEventHandler().handle( + new WritingApplicationStartEvent(app.getApplicationId(), + ApplicationStartData.newInstance(app.getApplicationId(), app.getName(), + app.getApplicationType(), app.getQueue(), app.getUser(), + app.getSubmitTime(), app.getStartTime()))); + } + + @SuppressWarnings("unchecked") + public void applicationFinished(RMApp app) { + dispatcher.getEventHandler().handle( + new WritingApplicationFinishEvent(app.getApplicationId(), + ApplicationFinishData.newInstance(app.getApplicationId(), + app.getFinishTime(), app.getDiagnostics().toString(), + app.getFinalApplicationStatus(), app.createApplicationState()))); + } + + @SuppressWarnings("unchecked") + public void applicationAttemptStarted(RMAppAttempt appAttempt) { + if (historyServiceEnabled) { + dispatcher.getEventHandler().handle( + new WritingApplicationAttemptStartEvent(appAttempt.getAppAttemptId(), + ApplicationAttemptStartData.newInstance(appAttempt.getAppAttemptId(), + appAttempt.getHost(), appAttempt.getRpcPort(), appAttempt + .getMasterContainer().getId()))); + } + } + + @SuppressWarnings("unchecked") + public void applicationAttemptFinished(RMAppAttempt appAttempt) { + if (historyServiceEnabled) { + dispatcher.getEventHandler().handle( + new WritingApplicationAttemptFinishEvent(appAttempt.getAppAttemptId(), + ApplicationAttemptFinishData.newInstance( + appAttempt.getAppAttemptId(), appAttempt.getDiagnostics() + .toString(), appAttempt.getTrackingUrl(), appAttempt + .getFinalApplicationStatus(), appAttempt + .createApplicationAttemptState()))); + } + } + + @SuppressWarnings("unchecked") + public void containerStarted(RMContainer container) { + if (historyServiceEnabled) { + dispatcher.getEventHandler().handle( + new WritingContainerStartEvent(container.getContainerId(), + ContainerStartData.newInstance(container.getContainerId(), + container.getAllocatedResource(), container.getAllocatedNode(), + container.getAllocatedPriority(), container.getStartTime()))); + } + } + + @SuppressWarnings("unchecked") + public void containerFinished(RMContainer container) { + if (historyServiceEnabled) { + dispatcher.getEventHandler().handle( + new WritingContainerFinishEvent(container.getContainerId(), + ContainerFinishData.newInstance(container.getContainerId(), + container.getFinishTime(), container.getDiagnosticsInfo(), + container.getLogURL(), container.getContainerExitStatus(), + container.getContainerState()))); + } + } + + /** + * EventHandler implementation which forward events to HistoryWriter Making + * use of it, HistoryWriter can avoid to have a public handle method + */ + private final class ForwardingEventHandler implements + EventHandler { + + @Override + public void handle(WritingApplicationHistoryEvent event) { + handleWritingApplicationHistoryEvent(event); + } + + } + + @SuppressWarnings({ "rawtypes", "unchecked" }) + protected static class MultiThreadedDispatcher extends CompositeService + implements Dispatcher { + + private List dispatchers = + new ArrayList(); + + public MultiThreadedDispatcher(int num) { + super(MultiThreadedDispatcher.class.getName()); + for (int i = 0; i < num; ++i) { + AsyncDispatcher dispatcher = createDispatcher(); + dispatchers.add(dispatcher); + addIfService(dispatcher); + } + } + + @Override + public EventHandler getEventHandler() { + return new CompositEventHandler(); + } + + @Override + public void register(Class eventType, EventHandler handler) { + for (AsyncDispatcher dispatcher : dispatchers) { + dispatcher.register(eventType, handler); + } + } + + public void setDrainEventsOnStop() { + for (AsyncDispatcher dispatcher : dispatchers) { + dispatcher.setDrainEventsOnStop(); + } + } + + private class CompositEventHandler implements EventHandler { + + @Override + public void handle(Event event) { + // Use hashCode (of ApplicationId) to dispatch the event to the child + // dispatcher, such that all the writing events of one application will + // be handled by one thread, the scheduled order of the these events + // will be preserved + int index = (event.hashCode() & Integer.MAX_VALUE) % dispatchers.size(); + dispatchers.get(index).getEventHandler().handle(event); + } + + } + + protected AsyncDispatcher createDispatcher() { + return new AsyncDispatcher(); + } + + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingApplicationAttemptFinishEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingApplicationAttemptFinishEvent.java new file mode 100644 index 00000000000..3f6a6203555 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingApplicationAttemptFinishEvent.java @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.ahs; + +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptFinishData; + +public class WritingApplicationAttemptFinishEvent extends + WritingApplicationHistoryEvent { + + private ApplicationAttemptId appAttemptId; + private ApplicationAttemptFinishData appAttemptFinish; + + public WritingApplicationAttemptFinishEvent( + ApplicationAttemptId appAttemptId, + ApplicationAttemptFinishData appAttemptFinish) { + super(WritingHistoryEventType.APP_ATTEMPT_FINISH); + this.appAttemptId = appAttemptId; + this.appAttemptFinish = appAttemptFinish; + } + + @Override + public int hashCode() { + return appAttemptId.getApplicationId().hashCode(); + } + + public ApplicationAttemptId getApplicationAttemptId() { + return appAttemptId; + } + + public ApplicationAttemptFinishData getApplicationAttemptFinishData() { + return appAttemptFinish; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingApplicationAttemptStartEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingApplicationAttemptStartEvent.java new file mode 100644 index 00000000000..7e092d3455b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingApplicationAttemptStartEvent.java @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.ahs; + +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptStartData; + +public class WritingApplicationAttemptStartEvent extends + WritingApplicationHistoryEvent { + + private ApplicationAttemptId appAttemptId; + private ApplicationAttemptStartData appAttemptStart; + + public WritingApplicationAttemptStartEvent(ApplicationAttemptId appAttemptId, + ApplicationAttemptStartData appAttemptStart) { + super(WritingHistoryEventType.APP_ATTEMPT_START); + this.appAttemptId = appAttemptId; + this.appAttemptStart = appAttemptStart; + } + + @Override + public int hashCode() { + return appAttemptId.getApplicationId().hashCode(); + } + + public ApplicationAttemptId getApplicationAttemptId() { + return appAttemptId; + } + + public ApplicationAttemptStartData getApplicationAttemptStartData() { + return appAttemptStart; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingApplicationFinishEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingApplicationFinishEvent.java new file mode 100644 index 00000000000..7a202144de1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingApplicationFinishEvent.java @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.ahs; + +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationFinishData; + +public class WritingApplicationFinishEvent extends + WritingApplicationHistoryEvent { + + private ApplicationId appId; + private ApplicationFinishData appFinish; + + public WritingApplicationFinishEvent(ApplicationId appId, + ApplicationFinishData appFinish) { + super(WritingHistoryEventType.APP_FINISH); + this.appId = appId; + this.appFinish = appFinish; + } + + @Override + public int hashCode() { + return appId.hashCode(); + } + + public ApplicationId getApplicationId() { + return appId; + } + + public ApplicationFinishData getApplicationFinishData() { + return appFinish; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingApplicationHistoryEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingApplicationHistoryEvent.java new file mode 100644 index 00000000000..bc17edc62b4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingApplicationHistoryEvent.java @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.ahs; + +import org.apache.hadoop.yarn.event.AbstractEvent; + +public class WritingApplicationHistoryEvent extends + AbstractEvent { + + public WritingApplicationHistoryEvent(WritingHistoryEventType type) { + super(type); + } + +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingApplicationStartEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingApplicationStartEvent.java new file mode 100644 index 00000000000..1b5dc784c3b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingApplicationStartEvent.java @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.ahs; + +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationStartData; + +public class WritingApplicationStartEvent extends + WritingApplicationHistoryEvent { + + private ApplicationId appId; + private ApplicationStartData appStart; + + public WritingApplicationStartEvent(ApplicationId appId, + ApplicationStartData appStart) { + super(WritingHistoryEventType.APP_START); + this.appId = appId; + this.appStart = appStart; + } + + @Override + public int hashCode() { + return appId.hashCode(); + } + + public ApplicationId getApplicationId() { + return appId; + } + + public ApplicationStartData getApplicationStartData() { + return appStart; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingContainerFinishEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingContainerFinishEvent.java new file mode 100644 index 00000000000..6b271669f56 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingContainerFinishEvent.java @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.ahs; + +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerFinishData; + +public class WritingContainerFinishEvent extends WritingApplicationHistoryEvent { + + private ContainerId containerId; + private ContainerFinishData containerFinish; + + public WritingContainerFinishEvent(ContainerId containerId, + ContainerFinishData containerFinish) { + super(WritingHistoryEventType.CONTAINER_FINISH); + this.containerId = containerId; + this.containerFinish = containerFinish; + } + + @Override + public int hashCode() { + return containerId.getApplicationAttemptId().getApplicationId().hashCode(); + } + + public ContainerId getContainerId() { + return containerId; + } + + public ContainerFinishData getContainerFinishData() { + return containerFinish; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingContainerStartEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingContainerStartEvent.java new file mode 100644 index 00000000000..f6df6691c74 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingContainerStartEvent.java @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.ahs; + +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerStartData; + +public class WritingContainerStartEvent extends WritingApplicationHistoryEvent { + + private ContainerId containerId; + private ContainerStartData containerStart; + + public WritingContainerStartEvent(ContainerId containerId, + ContainerStartData containerStart) { + super(WritingHistoryEventType.CONTAINER_START); + this.containerId = containerId; + this.containerStart = containerStart; + } + + @Override + public int hashCode() { + return containerId.getApplicationAttemptId().getApplicationId().hashCode(); + } + + public ContainerId getContainerId() { + return containerId; + } + + public ContainerStartData getContainerStartData() { + return containerStart; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingHistoryEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingHistoryEventType.java new file mode 100644 index 00000000000..2f05428d87b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingHistoryEventType.java @@ -0,0 +1,24 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.ahs; + +public enum WritingHistoryEventType { + APP_START, APP_FINISH, APP_ATTEMPT_START, APP_ATTEMPT_FINISH, + CONTAINER_START, CONTAINER_FINISH +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java index 2a90ec92910..55882b30356 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java @@ -54,6 +54,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMAppManagerEvent; import org.apache.hadoop.yarn.server.resourcemanager.RMAppManagerEventType; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.ApplicationState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.Recoverable; @@ -335,6 +336,8 @@ public RMAppImpl(ApplicationId applicationId, RMContext rmContext, this.writeLock = lock.writeLock(); this.stateMachine = stateMachineFactory.make(this); + + rmContext.getRMApplicationHistoryWriter().applicationStarted(this); } @Override @@ -1002,6 +1005,11 @@ public void transition(RMAppImpl app, RMAppEvent event) { app.handler.handle( new RMAppManagerEvent(app.applicationId, RMAppManagerEventType.APP_COMPLETED)); + + // TODO: We need to fix for the problem that RMApp enters the final state + // after RMAppAttempt in the killing case + app.rmContext.getRMApplicationHistoryWriter() + .applicationFinished(app); }; } @@ -1069,27 +1077,7 @@ public YarnApplicationState createApplicationState() { if (rmAppState.equals(RMAppState.KILLING)) { rmAppState = stateBeforeKilling; } - switch (rmAppState) { - case NEW: - return YarnApplicationState.NEW; - case NEW_SAVING: - return YarnApplicationState.NEW_SAVING; - case SUBMITTED: - return YarnApplicationState.SUBMITTED; - case ACCEPTED: - return YarnApplicationState.ACCEPTED; - case RUNNING: - return YarnApplicationState.RUNNING; - case FINISHING: - case FINISHED: - return YarnApplicationState.FINISHED; - case KILLED: - return YarnApplicationState.KILLED; - case FAILED: - return YarnApplicationState.FAILED; - default: - throw new YarnRuntimeException("Unknown state passed!"); - } + return RMServerUtils.createApplicationState(rmAppState); } public static boolean isAppInFinalState(RMApp rmApp) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java index 335dbda65e6..3a666dd0710 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java @@ -32,6 +32,7 @@ import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; @@ -178,4 +179,21 @@ public interface RMAppAttempt extends EventHandler { * @return the start time of the application. */ long getStartTime(); + + /** + * The current state of the {@link RMAppAttempt}. + * + * @return the current state {@link RMAppAttemptState} for this application + * attempt. + */ + RMAppAttemptState getState(); + + /** + * Create the external user-facing state of the attempt of ApplicationMaster + * from the current state of the {@link RMAppAttempt}. + * + * @return the external user-facing state of the attempt ApplicationMaster. + */ + YarnApplicationAttemptState createApplicationAttemptState(); + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java index cd5c9d3bf25..ce246db7ba3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java @@ -55,6 +55,7 @@ import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; @@ -62,6 +63,7 @@ import org.apache.hadoop.yarn.security.client.ClientToAMTokenIdentifier; import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore; @@ -1046,6 +1048,9 @@ public void transition(RMAppAttemptImpl appAttempt, appAttempt.eventHandler.handle(new AppAttemptRemovedSchedulerEvent( appAttemptId, finalAttemptState, keepContainersAcrossAppAttempts)); appAttempt.removeCredentials(appAttempt); + + appAttempt.rmContext.getRMApplicationHistoryWriter() + .applicationAttemptFinished(appAttempt); } } @@ -1143,6 +1148,9 @@ public void transition(RMAppAttemptImpl appAttempt, // write at AM launch time, so we don't save the AM's tracking URL anywhere // as that would mean an extra state-store write. For now, we hope that in // work-preserving restart, AMs are forced to reregister. + + appAttempt.rmContext.getRMApplicationHistoryWriter() + .applicationAttemptStarted(appAttempt); } } @@ -1514,6 +1522,23 @@ public long getStartTime() { } } + @Override + public RMAppAttemptState getState() { + this.readLock.lock(); + + try { + return this.stateMachine.getCurrentState(); + } finally { + this.readLock.unlock(); + } + } + + @Override + public YarnApplicationAttemptState createApplicationAttemptState() { + RMAppAttemptState state = getState(); + return RMServerUtils.createApplicationAttemptState(state); + } + private void launchAttempt(){ // Send event to launch the AM Container eventHandler.handle(new AMLauncherEvent(AMLauncherEventType.LAUNCH, this)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java index 0dfdf20b52d..bda8580e06c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java @@ -21,6 +21,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; @@ -50,4 +51,22 @@ public interface RMContainer extends EventHandler { Priority getReservedPriority(); + Resource getAllocatedResource(); + + NodeId getAllocatedNode(); + + Priority getAllocatedPriority(); + + long getStartTime(); + + long getFinishTime(); + + String getDiagnosticsInfo(); + + String getLogURL(); + + int getContainerExitStatus(); + + ContainerState getContainerState(); + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java index d44fd3f32c2..057c9ace7e3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java @@ -28,10 +28,13 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerState; +import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.event.EventHandler; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerAcquiredEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerAllocatedEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerFinishedEvent; @@ -40,6 +43,7 @@ import org.apache.hadoop.yarn.state.SingleArcTransition; import org.apache.hadoop.yarn.state.StateMachine; import org.apache.hadoop.yarn.state.StateMachineFactory; +import org.apache.hadoop.yarn.webapp.util.WebAppUtils; @SuppressWarnings({"unchecked", "rawtypes"}) public class RMContainerImpl implements RMContainer { @@ -133,28 +137,39 @@ RMContainerEventType.RELEASED, new KillTransition()) private final ApplicationAttemptId appAttemptId; private final NodeId nodeId; private final Container container; + private final RMContext rmContext; private final EventHandler eventHandler; private final ContainerAllocationExpirer containerAllocationExpirer; + private final String user; private Resource reservedResource; private NodeId reservedNode; private Priority reservedPriority; + private long startTime; + private long finishTime; + private String logURL; + private ContainerStatus finishedStatus; + public RMContainerImpl(Container container, ApplicationAttemptId appAttemptId, NodeId nodeId, - EventHandler handler, - ContainerAllocationExpirer containerAllocationExpirer) { + String user, RMContext rmContext) { this.stateMachine = stateMachineFactory.make(this); this.containerId = container.getId(); this.nodeId = nodeId; this.container = container; this.appAttemptId = appAttemptId; - this.eventHandler = handler; - this.containerAllocationExpirer = containerAllocationExpirer; + this.user = user; + this.startTime = System.currentTimeMillis(); + this.rmContext = rmContext; + this.eventHandler = rmContext.getDispatcher().getEventHandler(); + this.containerAllocationExpirer = rmContext.getContainerAllocationExpirer(); ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); this.readLock = lock.readLock(); this.writeLock = lock.writeLock(); + + rmContext.getRMApplicationHistoryWriter().containerStarted(this); } @Override @@ -197,7 +212,77 @@ public NodeId getReservedNode() { public Priority getReservedPriority() { return reservedPriority; } - + + @Override + public Resource getAllocatedResource() { + return container.getResource(); + } + + @Override + public NodeId getAllocatedNode() { + return container.getNodeId(); + } + + @Override + public Priority getAllocatedPriority() { + return container.getPriority(); + } + + @Override + public long getStartTime() { + return startTime; + } + + @Override + public long getFinishTime() { + try { + readLock.lock(); + return finishTime; + } finally { + readLock.unlock(); + } + } + + @Override + public String getDiagnosticsInfo() { + try { + readLock.lock(); + return finishedStatus.getDiagnostics(); + } finally { + readLock.unlock(); + } + } + + @Override + public String getLogURL() { + try { + readLock.lock(); + return logURL; + } finally { + readLock.unlock(); + } + } + + @Override + public int getContainerExitStatus() { + try { + readLock.lock(); + return finishedStatus.getExitStatus(); + } finally { + readLock.unlock(); + } + } + + @Override + public ContainerState getContainerState() { + try { + readLock.lock(); + return finishedStatus.getState(); + } finally { + readLock.unlock(); + } + } + @Override public String toString() { return containerId.toString(); @@ -276,6 +361,11 @@ private static final class LaunchedTransition extends BaseTransition { @Override public void transition(RMContainerImpl container, RMContainerEvent event) { + // The logs of running containers should be found on NM webUI + // The logs should be accessible after the container is launched + container.logURL = WebAppUtils.getLogUrl(container.container + .getNodeHttpAddress(), container.getAllocatedNode().toString(), + container.containerId, container.user); // Unregister from containerAllocationExpirer. container.containerAllocationExpirer.unregister(container .getContainerId()); @@ -288,9 +378,17 @@ private static class FinishedTransition extends BaseTransition { public void transition(RMContainerImpl container, RMContainerEvent event) { RMContainerFinishedEvent finishedEvent = (RMContainerFinishedEvent) event; + container.finishTime = System.currentTimeMillis(); + container.finishedStatus = finishedEvent.getRemoteContainerStatus(); + // TODO: when AHS webUI is ready, logURL should be updated to point to + // the web page that will show the aggregated logs + // Inform AppAttempt container.eventHandler.handle(new RMAppAttemptContainerFinishedEvent( container.appAttemptId, finishedEvent.getRemoteContainerStatus())); + + container.rmContext.getRMApplicationHistoryWriter() + .containerFinished(container); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java index 446fe843bf8..38753cbbeda 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java @@ -233,8 +233,7 @@ public synchronized RMContainer reserve(SchedulerNode node, Priority priority, if (rmContainer == null) { rmContainer = new RMContainerImpl(container, getApplicationAttemptId(), - node.getNodeID(), rmContext.getDispatcher().getEventHandler(), - rmContext.getContainerAllocationExpirer()); + node.getNodeID(), appSchedulingInfo.getUser(), rmContext); Resources.addTo(currentReservation, container.getResource()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java index dcbc5ad7a46..9c34f2f5995 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java @@ -121,9 +121,8 @@ synchronized public RMContainer allocate(NodeType type, FiCaSchedulerNode node, // Create RMContainer RMContainer rmContainer = new RMContainerImpl(container, this - .getApplicationAttemptId(), node.getNodeID(), this.rmContext - .getDispatcher().getEventHandler(), this.rmContext - .getContainerAllocationExpirer()); + .getApplicationAttemptId(), node.getNodeID(), + appSchedulingInfo.getUser(), this.rmContext); // Add it to allContainers list. newlyAllocatedContainers.add(rmContainer); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerApp.java index 0bdac8c3652..adabfefaee1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerApp.java @@ -271,9 +271,8 @@ else if (allowed.equals(NodeType.RACK_LOCAL) && // Create RMContainer RMContainer rmContainer = new RMContainerImpl(container, - getApplicationAttemptId(), node.getNodeID(), rmContext - .getDispatcher().getEventHandler(), rmContext - .getContainerAllocationExpirer()); + getApplicationAttemptId(), node.getNodeID(), + appSchedulingInfo.getUser(), rmContext); // Add it to allContainers list. newlyAllocatedContainers.add(rmContainer); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java index 0a3738200e0..94db331faf0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java @@ -22,9 +22,9 @@ import static org.mockito.Matchers.isA; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import static org.mockito.Mockito.times; import java.util.HashMap; import java.util.List; @@ -47,6 +47,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.MockRMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; @@ -104,9 +105,10 @@ public static RMContext mockRMContext(int n, long time) { rmDispatcher); AMLivelinessMonitor amFinishingMonitor = new AMLivelinessMonitor( rmDispatcher); + RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class); RMContext context = new RMContextImpl(rmDispatcher, containerAllocationExpirer, amLivelinessMonitor, amFinishingMonitor, - null, null, null, null, null) { + null, null, null, null, null, writer) { @Override public ConcurrentMap getRMApps() { return map; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java index 77398a7a832..7e3d5fe073b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java @@ -19,21 +19,21 @@ package org.apache.hadoop.yarn.server.resourcemanager; import static org.junit.Assert.assertEquals; +import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyBoolean; import static org.mockito.Matchers.anyString; -import static org.mockito.Matchers.any; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; import java.io.IOException; import java.net.InetSocketAddress; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; -import java.util.HashSet; import java.util.EnumSet; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -51,9 +51,9 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.yarn.MockApps; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse; -import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse; @@ -81,6 +81,7 @@ import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; +import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; import org.apache.hadoop.yarn.server.resourcemanager.recovery.NullRMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; @@ -599,6 +600,8 @@ private void mockRMContext(YarnScheduler yarnScheduler, RMContext rmContext) .thenReturn(queInfo); when(yarnScheduler.getQueueInfo(eq("nonexistentqueue"), anyBoolean(), anyBoolean())) .thenThrow(new IOException("queue does not exist")); + RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class); + when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer); ConcurrentHashMap apps = getRMApps(rmContext, yarnScheduler); when(rmContext.getRMApps()).thenReturn(apps); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java index 82046c7a9de..a966efdc18f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java @@ -101,7 +101,7 @@ public void setUp() throws Exception { rmContext = new RMContextImpl(rmDispatcher, null, null, null, - mock(DelegationTokenRenewer.class), null, null, null, null); + mock(DelegationTokenRenewer.class), null, null, null, null, null); scheduler = mock(YarnScheduler.class); doAnswer( new Answer() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/TestRMApplicationHistoryWriter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/TestRMApplicationHistoryWriter.java new file mode 100644 index 00000000000..6e063b584eb --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/TestRMApplicationHistoryWriter.java @@ -0,0 +1,517 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.ahs; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; + +import junit.framework.Assert; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerState; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.event.AsyncDispatcher; +import org.apache.hadoop.yarn.event.Dispatcher; +import org.apache.hadoop.yarn.event.Event; +import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse; +import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryStore; +import org.apache.hadoop.yarn.server.applicationhistoryservice.MemoryApplicationHistoryStore; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptHistoryData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationHistoryData; +import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerHistoryData; +import org.apache.hadoop.yarn.server.resourcemanager.MockAM; +import org.apache.hadoop.yarn.server.resourcemanager.MockNM; +import org.apache.hadoop.yarn.server.resourcemanager.MockRM; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; +import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +public class TestRMApplicationHistoryWriter { + + private static int MAX_RETRIES = 10; + + private RMApplicationHistoryWriter writer; + private ApplicationHistoryStore store; + private List dispatchers = + new ArrayList(); + + @Before + public void setup() { + store = new MemoryApplicationHistoryStore(); + Configuration conf = new Configuration(); + conf.setBoolean(YarnConfiguration.YARN_HISTORY_SERVICE_ENABLED, true); + writer = new RMApplicationHistoryWriter() { + + @Override + protected ApplicationHistoryStore createApplicationHistoryStore( + Configuration conf) { + return store; + } + + @Override + protected Dispatcher createDispatcher(Configuration conf) { + MultiThreadedDispatcher dispatcher = + new MultiThreadedDispatcher( + conf + .getInt( + YarnConfiguration.RM_HISTORY_WRITER_MULTI_THREADED_DISPATCHER_POOL_SIZE, + YarnConfiguration.DEFAULT_RM_HISTORY_WRITER_MULTI_THREADED_DISPATCHER_POOL_SIZE)); + dispatcher.setDrainEventsOnStop(); + return dispatcher; + } + + class MultiThreadedDispatcher extends + RMApplicationHistoryWriter.MultiThreadedDispatcher { + + public MultiThreadedDispatcher(int num) { + super(num); + } + + @Override + protected AsyncDispatcher createDispatcher() { + CounterDispatcher dispatcher = new CounterDispatcher(); + dispatchers.add(dispatcher); + return dispatcher; + } + + } + }; + writer.init(conf); + writer.start(); + } + + @After + public void tearDown() { + writer.stop(); + } + + private static RMApp createRMApp(ApplicationId appId) { + RMApp app = mock(RMApp.class); + when(app.getApplicationId()).thenReturn(appId); + when(app.getName()).thenReturn("test app"); + when(app.getApplicationType()).thenReturn("test app type"); + when(app.getUser()).thenReturn("test user"); + when(app.getQueue()).thenReturn("test queue"); + when(app.getSubmitTime()).thenReturn(0L); + when(app.getStartTime()).thenReturn(1L); + when(app.getFinishTime()).thenReturn(2L); + when(app.getDiagnostics()).thenReturn( + new StringBuilder("test diagnostics info")); + when(app.getFinalApplicationStatus()).thenReturn( + FinalApplicationStatus.UNDEFINED); + when(app.createApplicationState()) + .thenReturn(YarnApplicationState.FINISHED); + return app; + } + + private static RMAppAttempt createRMAppAttempt( + ApplicationAttemptId appAttemptId) { + RMAppAttempt appAttempt = mock(RMAppAttempt.class); + when(appAttempt.getAppAttemptId()).thenReturn(appAttemptId); + when(appAttempt.getHost()).thenReturn("test host"); + when(appAttempt.getRpcPort()).thenReturn(-100); + Container container = mock(Container.class); + when(container.getId()) + .thenReturn(ContainerId.newInstance(appAttemptId, 1)); + when(appAttempt.getMasterContainer()).thenReturn(container); + when(appAttempt.getDiagnostics()).thenReturn("test diagnostics info"); + when(appAttempt.getTrackingUrl()).thenReturn("test url"); + when(appAttempt.getFinalApplicationStatus()).thenReturn( + FinalApplicationStatus.UNDEFINED); + when(appAttempt.createApplicationAttemptState()).thenReturn( + YarnApplicationAttemptState.FINISHED); + return appAttempt; + } + + private static RMContainer createRMContainer(ContainerId containerId) { + RMContainer container = mock(RMContainer.class); + when(container.getContainerId()).thenReturn(containerId); + when(container.getAllocatedNode()).thenReturn( + NodeId.newInstance("test host", -100)); + when(container.getAllocatedResource()).thenReturn( + Resource.newInstance(-1, -1)); + when(container.getAllocatedPriority()).thenReturn(Priority.UNDEFINED); + when(container.getStartTime()).thenReturn(0L); + when(container.getFinishTime()).thenReturn(1L); + when(container.getDiagnosticsInfo()).thenReturn("test diagnostics info"); + when(container.getLogURL()).thenReturn("test log url"); + when(container.getContainerExitStatus()).thenReturn(-1); + when(container.getContainerState()).thenReturn(ContainerState.COMPLETE); + return container; + } + + @Test + public void testWriteApplication() throws Exception { + RMApp app = createRMApp(ApplicationId.newInstance(0, 1)); + + writer.applicationStarted(app); + ApplicationHistoryData appHD = null; + for (int i = 0; i < MAX_RETRIES; ++i) { + appHD = store.getApplication(ApplicationId.newInstance(0, 1)); + if (appHD != null) { + break; + } else { + Thread.sleep(100); + } + } + Assert.assertNotNull(appHD); + Assert.assertEquals("test app", appHD.getApplicationName()); + Assert.assertEquals("test app type", appHD.getApplicationType()); + Assert.assertEquals("test user", appHD.getUser()); + Assert.assertEquals("test queue", appHD.getQueue()); + Assert.assertEquals(0L, appHD.getSubmitTime()); + Assert.assertEquals(1L, appHD.getStartTime()); + + writer.applicationFinished(app); + for (int i = 0; i < MAX_RETRIES; ++i) { + appHD = store.getApplication(ApplicationId.newInstance(0, 1)); + if (appHD.getYarnApplicationState() != null) { + break; + } else { + Thread.sleep(100); + } + } + Assert.assertEquals(2L, appHD.getFinishTime()); + Assert.assertEquals("test diagnostics info", appHD.getDiagnosticsInfo()); + Assert.assertEquals(FinalApplicationStatus.UNDEFINED, + appHD.getFinalApplicationStatus()); + Assert.assertEquals(YarnApplicationState.FINISHED, + appHD.getYarnApplicationState()); + } + + @Test + public void testWriteApplicationAttempt() throws Exception { + RMAppAttempt appAttempt = + createRMAppAttempt(ApplicationAttemptId.newInstance( + ApplicationId.newInstance(0, 1), 1)); + writer.applicationAttemptStarted(appAttempt); + ApplicationAttemptHistoryData appAttemptHD = null; + for (int i = 0; i < MAX_RETRIES; ++i) { + appAttemptHD = + store.getApplicationAttempt(ApplicationAttemptId.newInstance( + ApplicationId.newInstance(0, 1), 1)); + if (appAttemptHD != null) { + break; + } else { + Thread.sleep(100); + } + } + Assert.assertNotNull(appAttemptHD); + Assert.assertEquals("test host", appAttemptHD.getHost()); + Assert.assertEquals(-100, appAttemptHD.getRPCPort()); + Assert.assertEquals(ContainerId.newInstance( + ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 1), 1), 1), + appAttemptHD.getMasterContainerId()); + + writer.applicationAttemptFinished(appAttempt); + for (int i = 0; i < MAX_RETRIES; ++i) { + appAttemptHD = + store.getApplicationAttempt(ApplicationAttemptId.newInstance( + ApplicationId.newInstance(0, 1), 1)); + if (appAttemptHD.getYarnApplicationAttemptState() != null) { + break; + } else { + Thread.sleep(100); + } + } + Assert.assertEquals("test diagnostics info", + appAttemptHD.getDiagnosticsInfo()); + Assert.assertEquals("test url", appAttemptHD.getTrackingURL()); + Assert.assertEquals(FinalApplicationStatus.UNDEFINED, + appAttemptHD.getFinalApplicationStatus()); + Assert.assertEquals(YarnApplicationAttemptState.FINISHED, + appAttemptHD.getYarnApplicationAttemptState()); + } + + @Test + public void testWriteContainer() throws Exception { + RMContainer container = + createRMContainer(ContainerId.newInstance( + ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 1), 1), + 1)); + writer.containerStarted(container); + ContainerHistoryData containerHD = null; + for (int i = 0; i < MAX_RETRIES; ++i) { + containerHD = + store.getContainer(ContainerId.newInstance(ApplicationAttemptId + .newInstance(ApplicationId.newInstance(0, 1), 1), 1)); + if (containerHD != null) { + break; + } else { + Thread.sleep(100); + } + } + Assert.assertNotNull(containerHD); + Assert.assertEquals(NodeId.newInstance("test host", -100), + containerHD.getAssignedNode()); + Assert.assertEquals(Resource.newInstance(-1, -1), + containerHD.getAllocatedResource()); + Assert.assertEquals(Priority.UNDEFINED, containerHD.getPriority()); + Assert.assertEquals(0L, container.getStartTime()); + + writer.containerFinished(container); + for (int i = 0; i < MAX_RETRIES; ++i) { + containerHD = + store.getContainer(ContainerId.newInstance(ApplicationAttemptId + .newInstance(ApplicationId.newInstance(0, 1), 1), 1)); + if (containerHD.getContainerState() != null) { + break; + } else { + Thread.sleep(100); + } + } + Assert.assertEquals("test diagnostics info", + containerHD.getDiagnosticsInfo()); + Assert.assertEquals("test log url", containerHD.getLogURL()); + Assert.assertEquals(-1, containerHD.getContainerExitStatus()); + Assert.assertEquals(ContainerState.COMPLETE, + containerHD.getContainerState()); + } + + @Test + public void testParallelWrite() throws Exception { + List appIds = new ArrayList(); + for (int i = 0; i < 10; ++i) { + Random rand = new Random(i); + ApplicationId appId = ApplicationId.newInstance(0, rand.nextInt()); + appIds.add(appId); + RMApp app = createRMApp(appId); + writer.applicationStarted(app); + for (int j = 1; j <= 10; ++j) { + ApplicationAttemptId appAttemptId = + ApplicationAttemptId.newInstance(appId, j); + RMAppAttempt appAttempt = createRMAppAttempt(appAttemptId); + writer.applicationAttemptStarted(appAttempt); + for (int k = 1; k <= 10; ++k) { + ContainerId containerId = ContainerId.newInstance(appAttemptId, k); + RMContainer container = createRMContainer(containerId); + writer.containerStarted(container); + writer.containerFinished(container); + } + writer.applicationAttemptFinished(appAttempt); + } + writer.applicationFinished(app); + } + for (int i = 0; i < MAX_RETRIES; ++i) { + if (allEventsHandled(20 * 10 * 10 + 20 * 10 + 20)) { + break; + } else { + Thread.sleep(500); + } + } + Assert.assertTrue(allEventsHandled(20 * 10 * 10 + 20 * 10 + 20)); + // Validate all events of one application are handled by one dispatcher + for (ApplicationId appId : appIds) { + Assert.assertTrue(handledByOne(appId)); + } + } + + private boolean allEventsHandled(int expected) { + int actual = 0; + for (CounterDispatcher dispatcher : dispatchers) { + for (Integer count : dispatcher.counts.values()) { + actual += count; + } + } + return actual == expected; + } + + @Test + public void testRMWritingMassiveHistory() throws Exception { + // 1. Show RM can run with writing history data + // 2. Test additional workload of processing history events + YarnConfiguration conf = new YarnConfiguration(); + // don't process history events + MockRM rm = new MockRM(conf) { + @Override + protected RMApplicationHistoryWriter createRMApplicationHistoryWriter() { + return new RMApplicationHistoryWriter() { + @Override + public void applicationStarted(RMApp app) { + } + + @Override + public void applicationFinished(RMApp app) { + } + + @Override + public void applicationAttemptStarted(RMAppAttempt appAttempt) { + } + + @Override + public void applicationAttemptFinished(RMAppAttempt appAttempt) { + } + + @Override + public void containerStarted(RMContainer container) { + } + + @Override + public void containerFinished(RMContainer container) { + } + }; + } + }; + long startTime1 = System.currentTimeMillis(); + testRMWritingMassiveHistory(rm); + long finishTime1 = System.currentTimeMillis(); + long elapsedTime1 = finishTime1 - startTime1; + rm = new MockRM(conf); + long startTime2 = System.currentTimeMillis(); + testRMWritingMassiveHistory(rm); + long finishTime2 = System.currentTimeMillis(); + long elapsedTime2 = finishTime2 - startTime2; + // No more than 10% additional workload + // Should be much less, but computation time is fluctuated + Assert.assertTrue(elapsedTime2 - elapsedTime1 < elapsedTime1 / 10); + } + + private void testRMWritingMassiveHistory(MockRM rm) throws Exception { + rm.start(); + MockNM nm = rm.registerNode("127.0.0.1:1234", 1024 * 10100); + + RMApp app = rm.submitApp(1024); + nm.nodeHeartbeat(true); + RMAppAttempt attempt = app.getCurrentAppAttempt(); + MockAM am = rm.sendAMLaunched(attempt.getAppAttemptId()); + am.registerAppAttempt(); + + int request = 10000; + am.allocate("127.0.0.1", 1024, request, new ArrayList()); + nm.nodeHeartbeat(true); + List allocated = + am.allocate(new ArrayList(), + new ArrayList()).getAllocatedContainers(); + int waitCount = 0; + int allocatedSize = allocated.size(); + while (allocatedSize < request && waitCount++ < 200) { + Thread.sleep(100); + allocated = + am.allocate(new ArrayList(), + new ArrayList()).getAllocatedContainers(); + allocatedSize += allocated.size(); + nm.nodeHeartbeat(true); + } + Assert.assertEquals(request, allocatedSize); + + am.unregisterAppAttempt(); + am.waitForState(RMAppAttemptState.FINISHING); + nm.nodeHeartbeat(am.getApplicationAttemptId(), 1, ContainerState.COMPLETE); + am.waitForState(RMAppAttemptState.FINISHED); + + NodeHeartbeatResponse resp = nm.nodeHeartbeat(true); + List cleaned = resp.getContainersToCleanup(); + int cleanedSize = cleaned.size(); + waitCount = 0; + while (cleanedSize < allocatedSize && waitCount++ < 200) { + Thread.sleep(100); + resp = nm.nodeHeartbeat(true); + cleaned = resp.getContainersToCleanup(); + cleanedSize += cleaned.size(); + } + Assert.assertEquals(allocatedSize, cleanedSize); + rm.waitForState(app.getApplicationId(), RMAppState.FINISHED); + + rm.stop(); + } + + private boolean handledByOne(ApplicationId appId) { + int count = 0; + for (CounterDispatcher dispatcher : dispatchers) { + if (dispatcher.counts.containsKey(appId)) { + ++count; + } + } + return count == 1; + } + + private static class CounterDispatcher extends AsyncDispatcher { + + private Map counts = + new HashMap(); + + @SuppressWarnings("rawtypes") + @Override + protected void dispatch(Event event) { + if (event instanceof WritingApplicationHistoryEvent) { + WritingApplicationHistoryEvent ashEvent = + (WritingApplicationHistoryEvent) event; + switch (ashEvent.getType()) { + case APP_START: + incrementCounts(((WritingApplicationStartEvent) event) + .getApplicationId()); + break; + case APP_FINISH: + incrementCounts(((WritingApplicationFinishEvent) event) + .getApplicationId()); + break; + case APP_ATTEMPT_START: + incrementCounts(((WritingApplicationAttemptStartEvent) event) + .getApplicationAttemptId().getApplicationId()); + break; + case APP_ATTEMPT_FINISH: + incrementCounts(((WritingApplicationAttemptFinishEvent) event) + .getApplicationAttemptId().getApplicationId()); + break; + case CONTAINER_START: + incrementCounts(((WritingContainerStartEvent) event) + .getContainerId().getApplicationAttemptId().getApplicationId()); + break; + case CONTAINER_FINISH: + incrementCounts(((WritingContainerFinishEvent) event) + .getContainerId().getApplicationAttemptId().getApplicationId()); + break; + } + } + super.dispatch(event); + } + + private void incrementCounts(ApplicationId appId) { + Integer val = counts.get(appId); + if (val == null) { + counts.put(appId, 1); + } else { + counts.put(appId, val + 1); + } + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java index a884552bf0a..756bf45d77d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java @@ -72,7 +72,7 @@ public void setUp() { // Dispatcher that processes events inline Dispatcher dispatcher = new InlineDispatcher(); RMContext context = new RMContextImpl(dispatcher, null, - null, null, null, null, null, null, null); + null, null, null, null, null, null, null, null); dispatcher.register(SchedulerEventType.class, new InlineDispatcher.EmptyEventHandler()); dispatcher.register(RMNodeEventType.class, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java index cbb23740de4..455fa785758 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java @@ -71,7 +71,7 @@ public void setUp() { new TestRMNodeEventDispatcher()); RMContext context = new RMContextImpl(dispatcher, null, - null, null, null, null, null, null, null); + null, null, null, null, null, null, null, null); dispatcher.register(SchedulerEventType.class, new InlineDispatcher.EmptyEventHandler()); dispatcher.register(RMNodeEventType.class, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java index ddb7a90a592..4f9469548ae 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java @@ -71,7 +71,7 @@ public void handle(Event event) { RMContext context = new RMContextImpl(dispatcher, null, null, null, null, null, new RMContainerTokenSecretManager(conf), - new NMTokenSecretManagerInRM(conf), null); + new NMTokenSecretManagerInRM(conf), null, null); dispatcher.register(RMNodeEventType.class, new ResourceManager.NodeEventDispatcher(context)); NodesListManager nodesListManager = new NodesListManager(context); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java index 1e5733b49eb..d50bc896ef7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java @@ -20,6 +20,7 @@ import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.reset; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -49,6 +50,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMAppManagerEventType; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; +import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.ApplicationState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor; @@ -83,6 +85,7 @@ public class TestRMAppTransitions { private static int appId = 1; private DrainDispatcher rmDispatcher; private RMStateStore store; + private RMApplicationHistoryWriter writer; private YarnScheduler scheduler; // ignore all the RM application attempt events @@ -178,13 +181,15 @@ public void setUp() throws Exception { AMLivelinessMonitor amLivelinessMonitor = mock(AMLivelinessMonitor.class); AMLivelinessMonitor amFinishingMonitor = mock(AMLivelinessMonitor.class); store = mock(RMStateStore.class); + writer = mock(RMApplicationHistoryWriter.class); this.rmContext = new RMContextImpl(rmDispatcher, containerAllocationExpirer, amLivelinessMonitor, amFinishingMonitor, null, new AMRMTokenSecretManager(conf), new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM()); + new ClientToAMTokenSecretManagerInRM(), + writer); ((RMContextImpl)rmContext).setStateStore(store); rmDispatcher.register(RMAppAttemptEventType.class, @@ -335,6 +340,7 @@ private void sendAttemptUpdateSavedEvent(RMApp application) { protected RMApp testCreateAppNewSaving( ApplicationSubmissionContext submissionContext) throws IOException { RMApp application = createNewTestApp(submissionContext); + verify(writer).applicationStarted(any(RMApp.class)); // NEW => NEW_SAVING event RMAppEventType.START RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.START); @@ -456,6 +462,9 @@ public void testUnmanagedApp() throws IOException { Assert.assertTrue("Finished app missing diagnostics", application.getDiagnostics().indexOf(diagMsg) != -1); + // reset the counter of Mockito.verify + reset(writer); + // test app fails after 1 app attempt failure LOG.info("--- START: testUnmanagedAppFailPath ---"); application = testCreateAppRunning(subContext); @@ -497,6 +506,7 @@ public void testAppNewKill() throws IOException { rmDispatcher.await(); sendAppUpdateSavedEvent(application); assertKilled(application); + verify(writer).applicationFinished(any(RMApp.class)); } @Test @@ -512,6 +522,7 @@ public void testAppNewReject() throws IOException { rmDispatcher.await(); sendAppUpdateSavedEvent(application); assertFailed(application, rejectedText); + verify(writer).applicationFinished(any(RMApp.class)); } @Test (timeout = 30000) @@ -526,6 +537,7 @@ public void testAppNewSavingKill() throws IOException { rmDispatcher.await(); sendAppUpdateSavedEvent(application); assertKilled(application); + verify(writer).applicationFinished(any(RMApp.class)); } @Test (timeout = 30000) @@ -541,6 +553,7 @@ public void testAppNewSavingReject() throws IOException { rmDispatcher.await(); sendAppUpdateSavedEvent(application); assertFailed(application, rejectedText); + verify(writer).applicationFinished(any(RMApp.class)); } @Test (timeout = 30000) @@ -556,6 +569,7 @@ public void testAppSubmittedRejected() throws IOException { rmDispatcher.await(); sendAppUpdateSavedEvent(application); assertFailed(application, rejectedText); + verify(writer).applicationFinished(any(RMApp.class)); } @Test @@ -570,6 +584,7 @@ public void testAppSubmittedKill() throws IOException, InterruptedException { sendAppUpdateSavedEvent(application); assertKilled(application); assertAppFinalStateSaved(application); + verify(writer).applicationFinished(any(RMApp.class)); } @Test @@ -603,6 +618,7 @@ public void testAppAcceptedFailed() throws IOException { rmDispatcher.await(); sendAppUpdateSavedEvent(application); assertFailed(application, ".*" + message + ".*Failing the application.*"); + verify(writer).applicationFinished(any(RMApp.class)); } @Test @@ -617,6 +633,7 @@ public void testAppAcceptedKill() throws IOException, InterruptedException { sendAppUpdateSavedEvent(application); assertKilled(application); assertAppFinalStateSaved(application); + verify(writer).applicationFinished(any(RMApp.class)); } @Test @@ -639,6 +656,7 @@ public void testAppRunningKill() throws IOException { sendAttemptUpdateSavedEvent(application); sendAppUpdateSavedEvent(application); assertKilled(application); + verify(writer).applicationFinished(any(RMApp.class)); } @Test @@ -691,6 +709,7 @@ public void testAppRunningFailed() throws IOException { application.handle(event); rmDispatcher.await(); assertFailed(application, ".*Failing the application.*"); + verify(writer).applicationFinished(any(RMApp.class)); } @Test @@ -748,6 +767,7 @@ public void testAppFinishedFinished() throws IOException { StringBuilder diag = application.getDiagnostics(); Assert.assertEquals("application diagnostics is not correct", "", diag.toString()); + verify(writer).applicationFinished(any(RMApp.class)); } @Test (timeout = 30000) @@ -775,6 +795,7 @@ public void testAppFailedFailed() throws IOException { assertTimesAtFinish(application); assertAppState(RMAppState.FAILED, application); + verify(writer).applicationFinished(any(RMApp.class)); } @Test (timeout = 30000) @@ -820,6 +841,7 @@ public void testAppKilledKilled() throws IOException { assertTimesAtFinish(application); assertAppState(RMAppState.KILLED, application); + verify(writer).applicationFinished(any(RMApp.class)); } @Test diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java index 4286950c3c6..954a4845c32 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java @@ -64,6 +64,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; +import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher; @@ -119,6 +120,8 @@ public class TestRMAppAttemptTransitions { private ApplicationMasterLauncher applicationMasterLauncher; private AMLivelinessMonitor amLivelinessMonitor; private AMLivelinessMonitor amFinishingMonitor; + private RMApplicationHistoryWriter writer; + private RMStateStore store; private RMAppImpl application; @@ -213,13 +216,15 @@ public void setUp() throws Exception { mock(ContainerAllocationExpirer.class); amLivelinessMonitor = mock(AMLivelinessMonitor.class); amFinishingMonitor = mock(AMLivelinessMonitor.class); + writer = mock(RMApplicationHistoryWriter.class); rmContext = new RMContextImpl(rmDispatcher, containerAllocationExpirer, amLivelinessMonitor, amFinishingMonitor, null, amRMTokenManager, new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), - clientToAMTokenManager); + clientToAMTokenManager, + writer); store = mock(RMStateStore.class); ((RMContextImpl) rmContext).setStateStore(store); @@ -377,6 +382,7 @@ private void testAppAttemptKilledState(Container amContainer, assertEquals(0, applicationAttempt.getRanNodes().size()); assertNull(applicationAttempt.getFinalApplicationStatus()); verifyTokenCount(applicationAttempt.getAppAttemptId(), 1); + verify(writer).applicationAttemptFinished(any(RMAppAttempt.class)); verifyAttemptFinalStateSaved(); assertFalse(transferStateFromPreviousAttempt); } @@ -452,6 +458,7 @@ private void testAppAttemptFailedState(Container container, // Check events verify(application, times(1)).handle(any(RMAppFailedAttemptEvent.class)); verifyTokenCount(applicationAttempt.getAppAttemptId(), 1); + verify(writer).applicationAttemptFinished(any(RMAppAttempt.class)); verifyAttemptFinalStateSaved(); } @@ -487,6 +494,7 @@ private void testAppAttemptRunningState(Container container, assertEquals(getProxyUrl(applicationAttempt), applicationAttempt.getTrackingUrl()); } + verify(writer).applicationAttemptStarted(any(RMAppAttempt.class)); // TODO - need to add more checks relevant to this state } @@ -780,6 +788,7 @@ public void testAMCrashAtAllocated() { assertEquals(RMAppAttemptState.FAILED, applicationAttempt.getAppAttemptState()); verifyTokenCount(applicationAttempt.getAppAttemptId(), 1); + verify(writer).applicationAttemptFinished(any(RMAppAttempt.class)); } @Test diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java index d1262d88eaf..80fe9136586 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java @@ -19,14 +19,19 @@ package org.apache.hadoop.yarn.server.resourcemanager.rmcontainer; import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; import static org.mockito.Mockito.reset; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Priority; @@ -34,6 +39,8 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.DrainDispatcher; import org.apache.hadoop.yarn.event.EventHandler; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerFinishedEvent; @@ -50,7 +57,6 @@ public class TestRMContainerImpl { public void testReleaseWhileRunning() { DrainDispatcher drainDispatcher = new DrainDispatcher(); - EventHandler eventHandler = drainDispatcher.getEventHandler(); EventHandler appAttemptEventHandler = mock(EventHandler.class); EventHandler generic = mock(EventHandler.class); drainDispatcher.register(RMAppAttemptEventType.class, @@ -71,16 +77,24 @@ public void testReleaseWhileRunning() { Container container = BuilderUtils.newContainer(containerId, nodeId, "host:3465", resource, priority, null); + RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class); + RMContext rmContext = mock(RMContext.class); + when(rmContext.getDispatcher()).thenReturn(drainDispatcher); + when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer); + when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer); RMContainer rmContainer = new RMContainerImpl(container, appAttemptId, - nodeId, eventHandler, expirer); + nodeId, "user", rmContext); assertEquals(RMContainerState.NEW, rmContainer.getState()); + assertEquals(resource, rmContainer.getAllocatedResource()); + assertEquals(nodeId, rmContainer.getAllocatedNode()); + assertEquals(priority, rmContainer.getAllocatedPriority()); + verify(writer).containerStarted(any(RMContainer.class)); rmContainer.handle(new RMContainerEvent(containerId, RMContainerEventType.START)); drainDispatcher.await(); assertEquals(RMContainerState.ALLOCATED, rmContainer.getState()); - rmContainer.handle(new RMContainerEvent(containerId, RMContainerEventType.ACQUIRED)); drainDispatcher.await(); @@ -90,6 +104,9 @@ public void testReleaseWhileRunning() { RMContainerEventType.LAUNCHED)); drainDispatcher.await(); assertEquals(RMContainerState.RUNNING, rmContainer.getState()); + assertEquals( + "http://host:3465/logs/host:3425/container_1_0001_01_000001/container_1_0001_01_000001/user", + rmContainer.getLogURL()); // In RUNNING state. Verify RELEASED and associated actions. reset(appAttemptEventHandler); @@ -100,6 +117,12 @@ public void testReleaseWhileRunning() { containerStatus, RMContainerEventType.RELEASED)); drainDispatcher.await(); assertEquals(RMContainerState.RELEASED, rmContainer.getState()); + assertEquals(SchedulerUtils.RELEASED_CONTAINER, + rmContainer.getDiagnosticsInfo()); + assertEquals(ContainerExitStatus.ABORTED, + rmContainer.getContainerExitStatus()); + assertEquals(ContainerState.COMPLETE, rmContainer.getContainerState()); + verify(writer).containerFinished(any(RMContainer.class)); ArgumentCaptor captor = ArgumentCaptor .forClass(RMAppAttemptContainerFinishedEvent.class); @@ -120,7 +143,6 @@ public void testReleaseWhileRunning() { public void testExpireWhileRunning() { DrainDispatcher drainDispatcher = new DrainDispatcher(); - EventHandler eventHandler = drainDispatcher.getEventHandler(); EventHandler appAttemptEventHandler = mock(EventHandler.class); EventHandler generic = mock(EventHandler.class); drainDispatcher.register(RMAppAttemptEventType.class, @@ -141,10 +163,19 @@ public void testExpireWhileRunning() { Container container = BuilderUtils.newContainer(containerId, nodeId, "host:3465", resource, priority, null); + RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class); + RMContext rmContext = mock(RMContext.class); + when(rmContext.getDispatcher()).thenReturn(drainDispatcher); + when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer); + when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer); RMContainer rmContainer = new RMContainerImpl(container, appAttemptId, - nodeId, eventHandler, expirer); + nodeId, "user", rmContext); assertEquals(RMContainerState.NEW, rmContainer.getState()); + assertEquals(resource, rmContainer.getAllocatedResource()); + assertEquals(nodeId, rmContainer.getAllocatedNode()); + assertEquals(priority, rmContainer.getAllocatedPriority()); + verify(writer).containerStarted(any(RMContainer.class)); rmContainer.handle(new RMContainerEvent(containerId, RMContainerEventType.START)); @@ -160,6 +191,9 @@ public void testExpireWhileRunning() { RMContainerEventType.LAUNCHED)); drainDispatcher.await(); assertEquals(RMContainerState.RUNNING, rmContainer.getState()); + assertEquals( + "http://host:3465/logs/host:3425/container_1_0001_01_000001/container_1_0001_01_000001/user", + rmContainer.getLogURL()); // In RUNNING state. Verify EXPIRE and associated actions. reset(appAttemptEventHandler); @@ -170,5 +204,6 @@ public void testExpireWhileRunning() { containerStatus, RMContainerEventType.EXPIRE)); drainDispatcher.await(); assertEquals(RMContainerState.RUNNING, rmContainer.getState()); + verify(writer, never()).containerFinished(any(RMContainer.class)); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index 08efe29453e..ca60db3f04c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -348,7 +348,7 @@ public void testRefreshQueues() throws Exception { cs.reinitialize(conf, new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM())); + new ClientToAMTokenSecretManagerInRM(), null)); checkQueueCapacities(cs, A_CAPACITY, B_CAPACITY); conf.setCapacity(A, 80f); @@ -447,7 +447,7 @@ public void testParseQueue() throws IOException { cs.reinitialize(conf, new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM())); + new ClientToAMTokenSecretManagerInRM(), null)); } @Test @@ -460,7 +460,7 @@ public void testReconnectedNode() throws Exception { cs.reinitialize(csConf, new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(csConf), new NMTokenSecretManagerInRM(csConf), - new ClientToAMTokenSecretManagerInRM())); + new ClientToAMTokenSecretManagerInRM(), null)); RMNode n1 = MockNodes.newNodeInfo(0, MockNodes.newResource(4 * GB), 1); RMNode n2 = MockNodes.newNodeInfo(0, MockNodes.newResource(2 * GB), 2); @@ -487,7 +487,7 @@ public void testRefreshQueuesWithNewQueue() throws Exception { cs.reinitialize(conf, new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM())); + new ClientToAMTokenSecretManagerInRM(), null)); checkQueueCapacities(cs, A_CAPACITY, B_CAPACITY); // Add a new queue b4 @@ -638,7 +638,7 @@ public void testAddAndRemoveAppFromCapacityScheduler() throws Exception { cs.reinitialize(conf, new RMContextImpl(rmDispatcher, null, null, null, null, null, new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM())); + new ClientToAMTokenSecretManagerInRM(), null)); SchedulerApplication app = TestSchedulerUtils.verifyAppAddedAndRemovedFromScheduler( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java index 3c55b42006f..d509771b4e7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java @@ -41,8 +41,8 @@ import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.DrainDispatcher; -import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType; @@ -248,14 +248,18 @@ public void testSortedQueues() throws Exception { ContainerAllocationExpirer expirer = mock(ContainerAllocationExpirer.class); DrainDispatcher drainDispatcher = new DrainDispatcher(); - EventHandler eventHandler = drainDispatcher.getEventHandler(); + RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class); + RMContext rmContext = mock(RMContext.class); + when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer); + when(rmContext.getDispatcher()).thenReturn(drainDispatcher); + when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer); ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId( app_0.getApplicationId(), 1); ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1); Container container=TestUtils.getMockContainer(containerId, node_0.getNodeID(), Resources.createResource(1*GB), priority); RMContainer rmContainer = new RMContainerImpl(container, appAttemptId, - node_0.getNodeID(), eventHandler, expirer); + node_0.getNodeID(), "user", rmContext); // Assign {1,2,3,4} 1GB containers respectively to queues stubQueueAllocation(a, clusterResource, node_0, 1*GB); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java index c86d6b3d232..21c446aa4d6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java @@ -47,7 +47,7 @@ public void testQueueParsing() throws Exception { capacityScheduler.reinitialize(conf, new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM())); + new ClientToAMTokenSecretManagerInRM(), null)); CSQueue a = capacityScheduler.getQueue("a"); Assert.assertEquals(0.10, a.getAbsoluteCapacity(), DELTA); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java index b974528a3cc..db28dcaa558 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java @@ -41,6 +41,7 @@ import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; +import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; @@ -84,12 +85,13 @@ public EventHandler getEventHandler() { new ContainerAllocationExpirer(nullDispatcher); Configuration conf = new Configuration(); + RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class); RMContext rmContext = new RMContextImpl(nullDispatcher, cae, null, null, null, new AMRMTokenSecretManager(conf), new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM()); + new ClientToAMTokenSecretManagerInRM(), writer); return rmContext; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java index af819d1787e..38a7995d7bb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java @@ -21,6 +21,8 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; + import java.io.IOException; import java.lang.reflect.Method; import java.util.ArrayList; @@ -56,6 +58,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.Task; +import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; @@ -140,8 +143,9 @@ public void testFifoSchedulerCapacityWhenNoNMs() { @Test(timeout=5000) public void testAppAttemptMetrics() throws Exception { AsyncDispatcher dispatcher = new InlineDispatcher(); + RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class); RMContext rmContext = new RMContextImpl(dispatcher, null, - null, null, null, null, null, null, null); + null, null, null, null, null, null, null, writer); FifoScheduler schedular = new FifoScheduler(); schedular.reinitialize(new Configuration(), rmContext); @@ -177,8 +181,9 @@ public void testNodeLocalAssignment() throws Exception { NMTokenSecretManagerInRM nmTokenSecretManager = new NMTokenSecretManagerInRM(conf); nmTokenSecretManager.rollMasterKey(); + RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class); RMContext rmContext = new RMContextImpl(dispatcher, null, null, null, null, - null, containerTokenSecretManager, nmTokenSecretManager, null); + null, containerTokenSecretManager, nmTokenSecretManager, null, writer); FifoScheduler scheduler = new FifoScheduler(); scheduler.reinitialize(new Configuration(), rmContext); @@ -241,8 +246,9 @@ public void testUpdateResourceOnNode() throws Exception { NMTokenSecretManagerInRM nmTokenSecretManager = new NMTokenSecretManagerInRM(conf); nmTokenSecretManager.rollMasterKey(); + RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class); RMContext rmContext = new RMContextImpl(dispatcher, null, null, null, null, - null, containerTokenSecretManager, nmTokenSecretManager, null); + null, containerTokenSecretManager, nmTokenSecretManager, null, writer); FifoScheduler scheduler = new FifoScheduler(){ @SuppressWarnings("unused") diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java index 74dc95a6a70..2c2aae6f9fa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java @@ -163,7 +163,7 @@ public static RMContext mockRMContext(int numApps, int racks, int numNodes, deactivatedNodesMap.put(node.getHostName(), node); } return new RMContextImpl(null, null, null, null, - null, null, null, null, null) { + null, null, null, null, null, null) { @Override public ConcurrentMap getRMApps() { return applicationsMaps; @@ -206,7 +206,7 @@ public static CapacityScheduler mockCapacityScheduler() throws IOException { cs.reinitialize(conf, new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM())); + new ClientToAMTokenSecretManagerInRM(), null)); return cs; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml index e924c46c228..b635d10fbb4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml @@ -40,5 +40,6 @@ hadoop-yarn-server-web-proxy hadoop-yarn-server-resourcemanager hadoop-yarn-server-tests + hadoop-yarn-server-applicationhistoryservice
From df000e69e8fb4b051c1e5c455bb226e2c009fa4c Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Sun, 26 Jan 2014 07:20:51 +0000 Subject: [PATCH 09/11] YARN-1625. Fixed RAT warnings after YARN-321 merge. Contributed by Shinichi Yamashita. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1561458 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml | 1 + 2 files changed, 4 insertions(+) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index a2dfdbb69cc..f6c8ff7cc06 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -106,6 +106,9 @@ Trunk - Unreleased YARN-1605. Fixed formatting issues in the new module on branch YARN-321. (Vinod Kumar Vavilapalli via zjshen) + YARN-1625. Fixed RAT warnings after YARN-321 merge. (Shinichi Yamashita via + vinodkv) + Release 2.4.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml index 7a151b32228..ad8e96594a9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml @@ -199,6 +199,7 @@ src/main/resources/webapps/mapreduce/.keep src/main/resources/webapps/jobhistory/.keep src/main/resources/webapps/yarn/.keep + src/main/resources/webapps/applicationhistory/.keep src/main/resources/webapps/cluster/.keep src/main/resources/webapps/test/.keep src/main/resources/webapps/proxy/.keep From 9875656581989ceb616a12dd8eeb3af4dd3404d6 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Sun, 26 Jan 2014 07:53:32 +0000 Subject: [PATCH 10/11] YARN-1613. Fixed the typo with the configuration name YARN_HISTORY_SERVICE_ENABLED. Contributed by Akira Ajisaka. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1561461 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 +++ .../java/org/apache/hadoop/yarn/conf/YarnConfiguration.java | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index f6c8ff7cc06..c36f1ee67ae 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -109,6 +109,9 @@ Trunk - Unreleased YARN-1625. Fixed RAT warnings after YARN-321 merge. (Shinichi Yamashita via vinodkv) + YARN-1613. Fixed the typo with the configuration name + YARN_HISTORY_SERVICE_ENABLED. (Akira Ajisaka via vinodkv) + Release 2.4.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 7fc8165813f..ea9b93aae15 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -950,7 +950,7 @@ public class YarnConfiguration extends Configuration { /** The setting that controls whether history-service is enabled or not.. */ public static final String YARN_HISTORY_SERVICE_ENABLED = AHS_PREFIX - + ".enabled"; + + "enabled"; public static final boolean DEFAULT_YARN_HISTORY_SERVICE_ENABLED = false; /** URI for FileSystemApplicationHistoryStore */ From 5e2d2dd56b27626594ba0d7017ac30d915b9502a Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Mon, 27 Jan 2014 16:18:04 +0000 Subject: [PATCH 11/11] HADOOP-10203. Connection leak in Jets3tNativeFileSystemStore#retrieveMetadata. Contributed by Andrei Savu. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1561720 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../fs/s3native/Jets3tNativeFileSystemStore.java | 16 +++++++++++----- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 7595563d4e0..fc610d324c5 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -536,6 +536,9 @@ Release 2.4.0 - UNRELEASED HADOOP-10252. HttpServer can't start if hostname is not specified. (Jimmy Xiang via atm) + HADOOP-10203. Connection leak in + Jets3tNativeFileSystemStore#retrieveMetadata. (Andrei Savu via atm) + Release 2.3.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java index e05ed09f586..49266187057 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java @@ -110,23 +110,29 @@ public void storeEmptyFile(String key) throws IOException { handleS3ServiceException(e); } } - + @Override public FileMetadata retrieveMetadata(String key) throws IOException { + StorageObject object = null; try { if(LOG.isDebugEnabled()) { LOG.debug("Getting metadata for key: " + key + " from bucket:" + bucket.getName()); } - S3Object object = s3Service.getObject(bucket.getName(), key); + object = s3Service.getObjectDetails(bucket.getName(), key); return new FileMetadata(key, object.getContentLength(), object.getLastModifiedDate().getTime()); - } catch (S3ServiceException e) { + + } catch (ServiceException e) { // Following is brittle. Is there a better way? - if (e.getS3ErrorCode().matches("NoSuchKey")) { + if ("NoSuchKey".equals(e.getErrorCode())) { return null; //return null if key not found } - handleS3ServiceException(e); + handleServiceException(e); return null; //never returned - keep compiler happy + } finally { + if (object != null) { + object.closeDataInputStream(); + } } }