From 40211d1f0a3e4546eab076e10be8937853490e5e Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Thu, 14 Apr 2016 10:35:04 +0100 Subject: [PATCH 01/26] HADOOP-12969 Mark IPC.Client and IPC.Server as @Public, @Evolving (Xiaobing Zhou via stevel) --- .../src/main/java/org/apache/hadoop/ipc/Client.java | 3 ++- .../src/main/java/org/apache/hadoop/ipc/Server.java | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java index 489c35496fa..f206861c02f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java @@ -61,6 +61,7 @@ import javax.security.sasl.Sasl; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; @@ -107,7 +108,7 @@ import com.google.protobuf.CodedOutputStream; * * @see Server */ -@InterfaceAudience.LimitedPrivate(value = { "Common", "HDFS", "MapReduce", "Yarn" }) +@Public @InterfaceStability.Evolving public class Client implements AutoCloseable { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index eb28ad57d11..1cc9f1d3eeb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -74,6 +74,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration.IntegerRanges; @@ -133,7 +134,7 @@ import com.google.protobuf.Message.Builder; * * @see Client */ -@InterfaceAudience.LimitedPrivate(value = { "Common", "HDFS", "MapReduce", "Yarn" }) +@Public @InterfaceStability.Evolving public abstract class Server { private final boolean authorize; @@ -439,7 +440,7 @@ public abstract class Server { /** * Checks if LogSlowRPC is set true. - * @return + * @return true, if LogSlowRPC is set true, false, otherwise. */ protected boolean isLogSlowRPC() { return logSlowRPC; From df18b6e9849c53c51a3d317f1254298edd8b17d1 Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Thu, 14 Apr 2016 12:44:55 +0100 Subject: [PATCH 02/26] HADOOP-12963 Allow using path style addressing for accessing the s3 endpoint. (Stephen Montgomery via stevel) --- .../src/main/resources/core-default.xml | 7 +++ .../org/apache/hadoop/fs/s3a/Constants.java | 10 ++-- .../apache/hadoop/fs/s3a/S3AFileSystem.java | 10 ++++ .../site/markdown/tools/hadoop-aws/index.md | 7 +++ .../hadoop/fs/s3a/TestS3AConfiguration.java | 47 +++++++++++++++++-- 5 files changed, 75 insertions(+), 6 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index b3436da6702..96b108f0f37 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -765,6 +765,13 @@ + + fs.s3a.path.style.access + Enable S3 path style access ie disabling the default virtual hosting behaviour. + Useful for S3A-compliant storage providers as it removes the need to set up DNS for virtual hosting. + + + fs.s3a.proxy.host Hostname of the (optional) proxy server for S3 connections. diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java index f10f3db60dd..9d79623f719 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java @@ -28,13 +28,17 @@ public class Constants { // number of simultaneous connections to s3 public static final String MAXIMUM_CONNECTIONS = "fs.s3a.connection.maximum"; public static final int DEFAULT_MAXIMUM_CONNECTIONS = 15; - + // connect to s3 over ssl? public static final String SECURE_CONNECTIONS = "fs.s3a.connection.ssl.enabled"; public static final boolean DEFAULT_SECURE_CONNECTIONS = true; //use a custom endpoint? public static final String ENDPOINT = "fs.s3a.endpoint"; + + //Enable path style access? Overrides default virtual hosting + public static final String PATH_STYLE_ACCESS = "fs.s3a.path.style.access"; + //connect to s3 through a proxy server? public static final String PROXY_HOST = "fs.s3a.proxy.host"; public static final String PROXY_PORT = "fs.s3a.proxy.port"; @@ -50,7 +54,7 @@ public class Constants { // seconds until we give up trying to establish a connection to s3 public static final String ESTABLISH_TIMEOUT = "fs.s3a.connection.establish.timeout"; public static final int DEFAULT_ESTABLISH_TIMEOUT = 50000; - + // seconds until we give up on a connection to s3 public static final String SOCKET_TIMEOUT = "fs.s3a.connection.timeout"; public static final int DEFAULT_SOCKET_TIMEOUT = 200000; @@ -74,7 +78,7 @@ public class Constants { // size of each of or multipart pieces in bytes public static final String MULTIPART_SIZE = "fs.s3a.multipart.size"; public static final long DEFAULT_MULTIPART_SIZE = 104857600; // 100 MB - + // minimum size in bytes before we start a multipart uploads or copy public static final String MIN_MULTIPART_THRESHOLD = "fs.s3a.multipart.threshold"; public static final long DEFAULT_MIN_MULTIPART_THRESHOLD = Integer.MAX_VALUE; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java index fe705cef83c..97092ac03d9 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java @@ -38,6 +38,7 @@ import com.amazonaws.auth.AWSCredentialsProviderChain; import com.amazonaws.auth.InstanceProfileCredentialsProvider; import com.amazonaws.services.s3.AmazonS3Client; +import com.amazonaws.services.s3.S3ClientOptions; import com.amazonaws.services.s3.model.CannedAccessControlList; import com.amazonaws.services.s3.model.DeleteObjectRequest; import com.amazonaws.services.s3.model.DeleteObjectsRequest; @@ -244,6 +245,15 @@ public class S3AFileSystem extends FileSystem { throw new IllegalArgumentException(msg, e); } } + enablePathStyleAccessIfRequired(conf); + } + + private void enablePathStyleAccessIfRequired(Configuration conf) { + final boolean pathStyleAccess = conf.getBoolean(PATH_STYLE_ACCESS, false); + if (pathStyleAccess) { + LOG.debug("Enabling path style access!"); + s3.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(true)); + } } private void initTransferManager() { diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md index 7382029f3af..e87b8849ffc 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md @@ -224,6 +224,13 @@ this capability. + + fs.s3a.path.style.access + Enable S3 path style access ie disabling the default virtual hosting behaviour. + Useful for S3A-compliant storage providers as it removes the need to set up DNS for virtual hosting. + + + fs.s3a.proxy.host Hostname of the (optional) proxy server for S3 connections. diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AConfiguration.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AConfiguration.java index ae1539d4c8d..4a0bfbbfc07 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AConfiguration.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AConfiguration.java @@ -19,10 +19,14 @@ package org.apache.hadoop.fs.s3a; import com.amazonaws.services.s3.AmazonS3Client; +import com.amazonaws.services.s3.S3ClientOptions; +import com.amazonaws.services.s3.model.AmazonS3Exception; + import org.apache.commons.lang.StringUtils; import com.amazonaws.AmazonClientException; import org.apache.hadoop.conf.Configuration; - +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.contract.ContractTestUtils; import org.junit.Rule; import org.junit.Test; import org.junit.rules.Timeout; @@ -30,17 +34,19 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; import java.io.File; import java.net.URI; -import java.io.IOException; +import java.lang.reflect.Field; import org.apache.hadoop.security.ProviderUtils; import org.apache.hadoop.security.alias.CredentialProvider; import org.apache.hadoop.security.alias.CredentialProviderFactory; - +import org.apache.http.HttpStatus; import org.junit.rules.TemporaryFolder; public class TestS3AConfiguration { @@ -354,4 +360,39 @@ public class TestS3AConfiguration { assertEquals("SecretKey incorrect.", "456", creds.getAccessSecret()); } + + @Test + public void shouldBeAbleToSwitchOnS3PathStyleAccessViaConfigProperty() throws Exception { + + conf = new Configuration(); + conf.set(Constants.PATH_STYLE_ACCESS, Boolean.toString(true)); + assertTrue(conf.getBoolean(Constants.PATH_STYLE_ACCESS, false)); + + try { + fs = S3ATestUtils.createTestFileSystem(conf); + final Object object = getClientOptionsField(fs.getAmazonS3Client(), "clientOptions"); + assertNotNull(object); + assertTrue("Unexpected type found for clientOptions!", object instanceof S3ClientOptions); + assertTrue("Expected to find path style access to be switched on!", ((S3ClientOptions) object).isPathStyleAccess()); + byte[] file = ContractTestUtils.toAsciiByteArray("test file"); + ContractTestUtils.writeAndRead(fs, new Path("/path/style/access/testFile"), file, file.length, conf.getInt(Constants.FS_S3A_BLOCK_SIZE, file.length), false, true); + } catch (final AmazonS3Exception e) { + LOG.error("Caught exception: ", e); + // Catch/pass standard path style access behaviour when live bucket + // isn't in the same region as the s3 client default. See + // http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + assertEquals(e.getStatusCode(), HttpStatus.SC_MOVED_PERMANENTLY); + } + } + + private Object getClientOptionsField(AmazonS3Client s3client, String field) + throws NoSuchFieldException, IllegalAccessException { + final Field clientOptionsProps = s3client.getClass().getDeclaredField(field); + assertNotNull(clientOptionsProps); + if (!clientOptionsProps.isAccessible()) { + clientOptionsProps.setAccessible(true); + } + final Object object = clientOptionsProps.get(s3client); + return object; + } } From 0d1c1152f1ce2706f92109bfbdff0d62e98e6797 Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Thu, 14 Apr 2016 07:58:24 -0500 Subject: [PATCH 03/26] HDFS-10282. The VolumeScanner should warn about replica files which are misplaced. Contributed by Colin Patrick McCabe. --- .../server/datanode/DirectoryScanner.java | 14 ++--- .../hdfs/server/datanode/VolumeScanner.java | 2 +- .../datanode/fsdataset/impl/FsVolumeImpl.java | 12 ++++ .../server/datanode/FsDatasetTestUtils.java | 7 +++ .../server/datanode/TestBlockScanner.java | 63 +++++++++++++++++++ .../impl/FsDatasetImplTestUtils.java | 21 +++++++ 6 files changed, 111 insertions(+), 8 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java index 0e51cecf2b6..1db445e4167 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java @@ -914,13 +914,13 @@ public class DirectoryScanner implements Runnable { */ private void verifyFileLocation(File actualBlockFile, File bpFinalizedDir, long blockId) { - File blockDir = DatanodeUtil.idToBlockDir(bpFinalizedDir, blockId); - if (actualBlockFile.getParentFile().compareTo(blockDir) != 0) { - File expBlockFile = new File(blockDir, actualBlockFile.getName()); - LOG.warn("Block: " + blockId - + " has to be upgraded to block ID-based layout. " - + "Actual block file path: " + actualBlockFile - + ", expected block file path: " + expBlockFile); + File expectedBlockDir = + DatanodeUtil.idToBlockDir(bpFinalizedDir, blockId); + File actualBlockDir = actualBlockFile.getParentFile(); + if (actualBlockDir.compareTo(expectedBlockDir) != 0) { + LOG.warn("Block: " + blockId + + " found in invalid directory. Expected directory: " + + expectedBlockDir + ". Actual directory: " + actualBlockDir); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java index d1f2d051760..d0dc9edbc82 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java @@ -414,7 +414,7 @@ public class VolumeScanner extends Thread { Block b = volume.getDataset().getStoredBlock( cblock.getBlockPoolId(), cblock.getBlockId()); if (b == null) { - LOG.info("FileNotFound while finding block {} on volume {}", + LOG.info("Replica {} was not found in the VolumeMap for volume {}", cblock, volume.getBasePath()); } else { block = new ExtendedBlock(cblock.getBlockPoolId(), b); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java index 0d060f9eae0..73514b6ecf4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java @@ -697,6 +697,18 @@ public class FsVolumeImpl implements FsVolumeSpi { } else { ExtendedBlock block = new ExtendedBlock(bpid, Block.filename2id(state.curEntry)); + File expectedBlockDir = DatanodeUtil.idToBlockDir( + new File("."), block.getBlockId()); + File actualBlockDir = Paths.get(".", + state.curFinalizedDir, state.curFinalizedSubDir).toFile(); + if (!expectedBlockDir.equals(actualBlockDir)) { + LOG.error("nextBlock({}, {}): block id {} found in invalid " + + "directory. Expected directory: {}. " + + "Actual directory: {}", storageID, bpid, + block.getBlockId(), expectedBlockDir.getPath(), + actualBlockDir.getPath()); + continue; + } LOG.trace("nextBlock({}, {}): advancing to {}", storageID, bpid, block); return block; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java index f5bf4e9f6a8..867d6c92155 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java @@ -135,6 +135,13 @@ public interface FsDatasetTestUtils { * @throws IOException I/O error. */ void truncateMeta(long newSize) throws IOException; + + /** + * Make the replica unreachable, perhaps by renaming it to an + * invalid file name. + * @throws IOException On I/O error. + */ + void makeUnreachable() throws IOException; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java index 4628a4651bc..021361b2d8a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java @@ -24,6 +24,7 @@ import static org.apache.hadoop.hdfs.server.datanode.BlockScanner.Conf.INTERNAL_ import static org.apache.hadoop.hdfs.server.datanode.BlockScanner.Conf.INTERNAL_DFS_BLOCK_SCANNER_CURSOR_SAVE_INTERVAL_MS; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertFalse; import java.io.Closeable; import java.io.File; @@ -38,6 +39,7 @@ import java.util.concurrent.Semaphore; import com.google.common.base.Supplier; import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.VolumeScanner.ScanResultHandler; import org.apache.hadoop.conf.Configuration; @@ -139,6 +141,11 @@ public class TestBlockScanner { throws Exception { return DFSTestUtil.getFirstBlock(dfs[nsIdx], getPath(fileIdx)); } + + public MaterializedReplica getMaterializedReplica(int nsIdx, int fileIdx) + throws Exception { + return cluster.getMaterializedReplica(0, getFileBlock(nsIdx, fileIdx)); + } } /** @@ -806,4 +813,60 @@ public class TestBlockScanner { info.blocksScanned = 0; } } + + /** + * Test that blocks which are in the wrong location are ignored. + */ + @Test(timeout=120000) + public void testIgnoreMisplacedBlock() throws Exception { + Configuration conf = new Configuration(); + // Set a really long scan period. + conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 100L); + conf.set(INTERNAL_VOLUME_SCANNER_SCAN_RESULT_HANDLER, + TestScanResultHandler.class.getName()); + conf.setLong(INTERNAL_DFS_BLOCK_SCANNER_CURSOR_SAVE_INTERVAL_MS, 0L); + final TestContext ctx = new TestContext(conf, 1); + final int NUM_FILES = 4; + ctx.createFiles(0, NUM_FILES, 5); + MaterializedReplica unreachableReplica = ctx.getMaterializedReplica(0, 1); + ExtendedBlock unreachableBlock = ctx.getFileBlock(0, 1); + unreachableReplica.makeUnreachable(); + final TestScanResultHandler.Info info = + TestScanResultHandler.getInfo(ctx.volumes.get(0)); + String storageID = ctx.volumes.get(0).getStorageID(); + synchronized (info) { + info.sem = new Semaphore(NUM_FILES); + info.shouldRun = true; + info.notify(); + } + // Scan the first 4 blocks + LOG.info("Waiting for the blocks to be scanned."); + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + synchronized (info) { + if (info.blocksScanned >= NUM_FILES - 1) { + LOG.info("info = {}. blockScanned has now reached " + + info.blocksScanned, info); + return true; + } else { + LOG.info("info = {}. Waiting for blockScanned to reach " + + (NUM_FILES - 1), info); + return false; + } + } + } + }, 50, 30000); + // We should have scanned 4 blocks + synchronized (info) { + assertFalse(info.goodBlocks.contains(unreachableBlock)); + assertFalse(info.badBlocks.contains(unreachableBlock)); + assertEquals("Expected 3 good blocks.", 3, info.goodBlocks.size()); + info.goodBlocks.clear(); + assertEquals("Expected 3 blocksScanned", 3, info.blocksScanned); + assertEquals("Did not expect bad blocks.", 0, info.badBlocks.size()); + info.blocksScanned = 0; + } + info.sem.release(1); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java index f3c740a3200..f780a14f2a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java @@ -170,6 +170,27 @@ public class FsDatasetImplTestUtils implements FsDatasetTestUtils { truncate(metaFile, newSize); } + @Override + public void makeUnreachable() throws IOException { + long blockId = Block.getBlockId(blockFile.getAbsolutePath()); + File origDir = blockFile.getParentFile(); + File root = origDir.getParentFile().getParentFile(); + File newDir = null; + // Keep incrementing the block ID until the block and metadata + // files end up in a different directory. Actually, with the + // current replica file placement scheme, this should only ever + // require one increment, but this is a bit of defensive coding. + do { + blockId++; + newDir = DatanodeUtil.idToBlockDir(root, blockId); + } while (origDir.equals(newDir)); + Files.createDirectories(newDir.toPath()); + Files.move(blockFile.toPath(), + new File(newDir, blockFile.getName()).toPath()); + Files.move(metaFile.toPath(), + new File(newDir, metaFile.getName()).toPath()); + } + @Override public String toString() { return String.format("MaterializedReplica: file=%s", blockFile); From 404f57f328b00a42ec8b952ad08cd7a80207c7f2 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Thu, 14 Apr 2016 10:35:22 -0700 Subject: [PATCH 04/26] HDFS-10216. Distcp -diff throws exception when handling relative path. Contributed by Takashi Ohnishi. --- .../hadoop/tools/SimpleCopyListing.java | 2 +- .../apache/hadoop/tools/TestDistCpSync.java | 38 +++++++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java index d2598a42d0e..cabb7e352aa 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java @@ -191,7 +191,7 @@ public class SimpleCopyListing extends CopyListing { authority = fs.getUri().getAuthority(); } - return new Path(scheme, authority, path.toUri().getPath()); + return new Path(scheme, authority, makeQualified(path).toUri().getPath()); } /** diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java index 04de8e4d6d9..90e6840f714 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java @@ -674,4 +674,42 @@ public class TestDistCpSync { testAndVerify(numCreatedModified); } + + private void initData9(Path dir) throws Exception { + final Path foo = new Path(dir, "foo"); + final Path foo_f1 = new Path(foo, "f1"); + + DFSTestUtil.createFile(dfs, foo_f1, BLOCK_SIZE, DATA_NUM, 0L); + } + + private void changeData9(Path dir) throws Exception { + final Path foo = new Path(dir, "foo"); + final Path foo_f2 = new Path(foo, "f2"); + + DFSTestUtil.createFile(dfs, foo_f2, BLOCK_SIZE, DATA_NUM, 0L); + } + + /** + * Test a case where the source path is relative. + */ + @Test + public void testSync9() throws Exception { + + // use /user/$USER/source for source directory + Path sourcePath = new Path(dfs.getWorkingDirectory(), "source"); + initData9(sourcePath); + initData9(target); + dfs.allowSnapshot(sourcePath); + dfs.allowSnapshot(target); + dfs.createSnapshot(sourcePath, "s1"); + dfs.createSnapshot(target, "s1"); + changeData9(sourcePath); + dfs.createSnapshot(sourcePath, "s2"); + + String[] args = new String[]{"-update","-diff", "s1", "s2", + "source", target.toString()}; + new DistCp(conf, OptionsParser.parse(args)).execute(); + verifyCopy(dfs.getFileStatus(sourcePath), + dfs.getFileStatus(target), false); + } } From c970f1d00525e4273075cff7406dcbd71305abd5 Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Thu, 14 Apr 2016 12:45:47 -0500 Subject: [PATCH 05/26] HDFS-10280. Document new dfsadmin command -evictWriters. Contributed by Wei-Chiu Chuang. --- .../src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java | 4 ++++ .../hadoop-hdfs/src/site/markdown/HDFSCommands.md | 2 ++ 2 files changed, 6 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index a35246f38b5..08d3da5405c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -427,6 +427,7 @@ public class DFSAdmin extends FsShell { "\t[-allowSnapshot ]\n" + "\t[-disallowSnapshot ]\n" + "\t[-shutdownDatanode [upgrade]]\n" + + "\t[-evictWriters ]\n" + "\t[-getDatanodeInfo ]\n" + "\t[-metasave filename]\n" + "\t[-triggerBlockReport [-incremental] ]\n" + @@ -1829,6 +1830,9 @@ public class DFSAdmin extends FsShell { } else if ("-shutdownDatanode".equals(cmd)) { System.err.println("Usage: hdfs dfsadmin" + " [-shutdownDatanode [upgrade]]"); + } else if ("-evictWriters".equals(cmd)) { + System.err.println("Usage: hdfs dfsadmin" + + " [-evictWriters ]"); } else if ("-getDatanodeInfo".equals(cmd)) { System.err.println("Usage: hdfs dfsadmin" + " [-getDatanodeInfo ]"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md index 035abd63187..a6c8b4c4c9a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md @@ -385,6 +385,7 @@ Usage: hdfs dfsadmin [-allowSnapshot ] hdfs dfsadmin [-disallowSnapshot ] hdfs dfsadmin [-shutdownDatanode [upgrade]] + hdfs dfsadmin [-evictWriters ] hdfs dfsadmin [-getDatanodeInfo ] hdfs dfsadmin [-metasave filename] hdfs dfsadmin [-triggerBlockReport [-incremental] ] @@ -419,6 +420,7 @@ Usage: | `-allowSnapshot` \ | Allowing snapshots of a directory to be created. If the operation completes successfully, the directory becomes snapshottable. See the [HDFS Snapshot Documentation](./HdfsSnapshots.html) for more information. | | `-disallowSnapshot` \ | Disallowing snapshots of a directory to be created. All snapshots of the directory must be deleted before disallowing snapshots. See the [HDFS Snapshot Documentation](./HdfsSnapshots.html) for more information. | | `-shutdownDatanode` \ [upgrade] | Submit a shutdown request for the given datanode. See [Rolling Upgrade document](./HdfsRollingUpgrade.html#dfsadmin_-shutdownDatanode) for the detail. | +| `-evictWriters` \ | Make the datanode evict all clients that are writing a block. This is useful if decommissioning is hung due to slow writers. | | `-getDatanodeInfo` \ | Get the information about the given datanode. See [Rolling Upgrade document](./HdfsRollingUpgrade.html#dfsadmin_-getDatanodeInfo) for the detail. | | `-metasave` filename | Save Namenode's primary data structures to *filename* in the directory specified by hadoop.log.dir property. *filename* is overwritten if it exists. *filename* will contain one line for each of the following
1. Datanodes heart beating with Namenode
2. Blocks waiting to be replicated
3. Blocks currently being replicated
4. Blocks waiting to be deleted | | `-triggerBlockReport` `[-incremental]` \ | Trigger a block report for the given datanode. If 'incremental' is specified, it will be otherwise, it will be a full block report. | From 809226752dd109e16956038017dece16ada6ee0f Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Thu, 14 Apr 2016 10:56:33 -0700 Subject: [PATCH 06/26] HDFS-10286. Fix TestDFSAdmin#testNameNodeGetReconfigurableProperties. Contributed by Xiaobing Zhou. --- .../test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java index 3ca7fec54dc..63bdf740704 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java @@ -234,7 +234,7 @@ public class TestDFSAdmin { final List outs = Lists.newArrayList(); final List errs = Lists.newArrayList(); getReconfigurableProperties("namenode", address, outs, errs); - assertEquals(4, outs.size()); + assertEquals(5, outs.size()); assertEquals(DFS_HEARTBEAT_INTERVAL_KEY, outs.get(1)); assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(2)); assertEquals(errs.size(), 0); From 8b2880c0b62102fc5c8b6962752f72cb2c416a01 Mon Sep 17 00:00:00 2001 From: Wangda Tan Date: Thu, 14 Apr 2016 11:00:53 -0700 Subject: [PATCH 07/26] MAPREDUCE-6513. MR job got hanged forever when one NM unstable for some time. (Varun Saxena via wangda) --- .../app/job/event/TaskAttemptKillEvent.java | 17 +++- .../job/event/TaskTAttemptKilledEvent.java | 40 +++++++++ .../mapreduce/v2/app/job/impl/JobImpl.java | 4 +- .../v2/app/job/impl/TaskAttemptImpl.java | 48 +++++++--- .../mapreduce/v2/app/job/impl/TaskImpl.java | 25 +++++- .../v2/app/rm/RMContainerAllocator.java | 4 +- .../hadoop/mapreduce/v2/app/TestMRApp.java | 51 ++++++++++- .../v2/app/job/impl/TestTaskAttempt.java | 87 +++++++++++++++++-- .../v2/app/job/impl/TestTaskImpl.java | 75 +++++++++++++--- 9 files changed, 312 insertions(+), 39 deletions(-) create mode 100644 hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskTAttemptKilledEvent.java diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptKillEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptKillEvent.java index 9bcc838173e..767ef0d7a28 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptKillEvent.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptKillEvent.java @@ -24,14 +24,27 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; public class TaskAttemptKillEvent extends TaskAttemptEvent { private final String message; + // Next map attempt will be rescheduled(i.e. updated in ask with higher + // priority equivalent to that of a fast fail map) + private final boolean rescheduleAttempt; + + public TaskAttemptKillEvent(TaskAttemptId attemptID, + String message, boolean rescheduleAttempt) { + super(attemptID, TaskAttemptEventType.TA_KILL); + this.message = message; + this.rescheduleAttempt = rescheduleAttempt; + } public TaskAttemptKillEvent(TaskAttemptId attemptID, String message) { - super(attemptID, TaskAttemptEventType.TA_KILL); - this.message = message; + this(attemptID, message, false); } public String getMessage() { return message; } + + public boolean getRescheduleAttempt() { + return rescheduleAttempt; + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskTAttemptKilledEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskTAttemptKilledEvent.java new file mode 100644 index 00000000000..897444d7dc8 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskTAttemptKilledEvent.java @@ -0,0 +1,40 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.mapreduce.v2.app.job.event; + +import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; + +/** + * Task Attempt killed event. + */ +public class TaskTAttemptKilledEvent extends TaskTAttemptEvent { + + // Next map attempt will be rescheduled(i.e. updated in ask with + // higher priority equivalent to that of a fast fail map) + private final boolean rescheduleAttempt; + + public TaskTAttemptKilledEvent(TaskAttemptId id, boolean rescheduleAttempt) { + super(id, TaskEventType.T_ATTEMPT_KILLED); + this.rescheduleAttempt = rescheduleAttempt; + } + + public boolean getRescheduleAttempt() { + return rescheduleAttempt; + } +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java index c8c5ce90ca8..b7036a53639 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java @@ -1349,7 +1349,9 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job, if (TaskType.MAP == id.getTaskId().getTaskType()) { // reschedule only map tasks because their outputs maybe unusable LOG.info(mesg + ". AttemptId:" + id); - eventHandler.handle(new TaskAttemptKillEvent(id, mesg)); + // Kill the attempt and indicate that next map attempt should be + // rescheduled (i.e. considered as a fast fail map). + eventHandler.handle(new TaskAttemptKillEvent(id, mesg, true)); } } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java index 5f0a622ec44..da6617e00e5 100755 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java @@ -98,6 +98,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptTooManyFetchFailureEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent; +import org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptKilledEvent; import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher; import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent; import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerRemoteLaunchEvent; @@ -184,6 +185,7 @@ public abstract class TaskAttemptImpl implements private int httpPort; private Locality locality; private Avataar avataar; + private boolean rescheduleNextAttempt = false; private static final CleanupContainerTransition CLEANUP_CONTAINER_TRANSITION = new CleanupContainerTransition(); @@ -1377,6 +1379,16 @@ public abstract class TaskAttemptImpl implements return container != null; } + //always called in write lock + private boolean getRescheduleNextAttempt() { + return rescheduleNextAttempt; + } + + //always called in write lock + private void setRescheduleNextAttempt(boolean reschedule) { + rescheduleNextAttempt = reschedule; + } + //always called in write lock private void setFinishTime() { //set the finish time only if launch time is set @@ -1745,9 +1757,8 @@ public abstract class TaskAttemptImpl implements TaskEventType.T_ATTEMPT_FAILED)); break; case KILLED: - taskAttempt.eventHandler.handle(new TaskTAttemptEvent( - taskAttempt.attemptId, - TaskEventType.T_ATTEMPT_KILLED)); + taskAttempt.eventHandler.handle(new TaskTAttemptKilledEvent( + taskAttempt.attemptId, false)); break; default: LOG.error("Task final state is not FAILED or KILLED: " + finalState); @@ -2014,8 +2025,13 @@ public abstract class TaskAttemptImpl implements taskAttempt, TaskAttemptStateInternal.KILLED); taskAttempt.eventHandler.handle(new JobHistoryEvent(taskAttempt.attemptId .getTaskId().getJobId(), tauce)); - taskAttempt.eventHandler.handle(new TaskTAttemptEvent( - taskAttempt.attemptId, TaskEventType.T_ATTEMPT_KILLED)); + boolean rescheduleNextTaskAttempt = false; + if (event instanceof TaskAttemptKillEvent) { + rescheduleNextTaskAttempt = + ((TaskAttemptKillEvent)event).getRescheduleAttempt(); + } + taskAttempt.eventHandler.handle(new TaskTAttemptKilledEvent( + taskAttempt.attemptId, rescheduleNextTaskAttempt)); return TaskAttemptStateInternal.KILLED; } } @@ -2044,6 +2060,12 @@ public abstract class TaskAttemptImpl implements taskAttempt.getID().toString()); return TaskAttemptStateInternal.SUCCESS_CONTAINER_CLEANUP; } else { + // Store reschedule flag so that after clean up is completed, new + // attempt is scheduled/rescheduled based on it. + if (event instanceof TaskAttemptKillEvent) { + taskAttempt.setRescheduleNextAttempt( + ((TaskAttemptKillEvent)event).getRescheduleAttempt()); + } return TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP; } } @@ -2075,9 +2097,8 @@ public abstract class TaskAttemptImpl implements ((TaskAttemptKillEvent) event).getMessage()); } - taskAttempt.eventHandler.handle(new TaskTAttemptEvent( - taskAttempt.attemptId, - TaskEventType.T_ATTEMPT_KILLED)); + taskAttempt.eventHandler.handle(new TaskTAttemptKilledEvent( + taskAttempt.attemptId, taskAttempt.getRescheduleNextAttempt())); } } @@ -2095,9 +2116,8 @@ public abstract class TaskAttemptImpl implements taskAttempt.getAssignedContainerID(), taskAttempt.getAssignedContainerMgrAddress(), taskAttempt.container.getContainerToken(), ContainerLauncher.EventType.CONTAINER_REMOTE_CLEANUP)); - taskAttempt.eventHandler.handle(new TaskTAttemptEvent( - taskAttempt.attemptId, - TaskEventType.T_ATTEMPT_KILLED)); + taskAttempt.eventHandler.handle(new TaskTAttemptKilledEvent( + taskAttempt.attemptId, false)); } } @@ -2137,6 +2157,12 @@ public abstract class TaskAttemptImpl implements // for it. finalizeProgress(taskAttempt); sendContainerCleanup(taskAttempt, event); + // Store reschedule flag so that after clean up is completed, new + // attempt is scheduled/rescheduled based on it. + if (event instanceof TaskAttemptKillEvent) { + taskAttempt.setRescheduleNextAttempt( + ((TaskAttemptKillEvent)event).getRescheduleAttempt()); + } } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java index a392837eb1c..34d9f0ef13f 100755 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java @@ -76,6 +76,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskRecoverEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent; +import org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptKilledEvent; import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics; import org.apache.hadoop.mapreduce.v2.app.rm.ContainerFailedEvent; import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; @@ -594,10 +595,15 @@ public abstract class TaskImpl implements Task, EventHandler { // This is always called in the Write Lock private void addAndScheduleAttempt(Avataar avataar) { + addAndScheduleAttempt(avataar, false); + } + + // This is always called in the Write Lock + private void addAndScheduleAttempt(Avataar avataar, boolean reschedule) { TaskAttempt attempt = addAttempt(avataar); inProgressAttempts.add(attempt.getID()); //schedule the nextAttemptNumber - if (failedAttempts.size() > 0) { + if (failedAttempts.size() > 0 || reschedule) { eventHandler.handle(new TaskAttemptEvent(attempt.getID(), TaskAttemptEventType.TA_RESCHEDULE)); } else { @@ -968,7 +974,12 @@ public abstract class TaskImpl implements Task, EventHandler { task.finishedAttempts.add(taskAttemptId); task.inProgressAttempts.remove(taskAttemptId); if (task.successfulAttempt == null) { - task.addAndScheduleAttempt(Avataar.VIRGIN); + boolean rescheduleNewAttempt = false; + if (event instanceof TaskTAttemptKilledEvent) { + rescheduleNewAttempt = + ((TaskTAttemptKilledEvent)event).getRescheduleAttempt(); + } + task.addAndScheduleAttempt(Avataar.VIRGIN, rescheduleNewAttempt); } if ((task.commitAttempt != null) && (task.commitAttempt == taskAttemptId)) { task.commitAttempt = null; @@ -1187,7 +1198,15 @@ public abstract class TaskImpl implements Task, EventHandler { // from the map splitInfo. So the bad node might be sent as a location // to the RM. But the RM would ignore that just like it would ignore // currently pending container requests affinitized to bad nodes. - task.addAndScheduleAttempt(Avataar.VIRGIN); + boolean rescheduleNextTaskAttempt = false; + if (event instanceof TaskTAttemptKilledEvent) { + // Decide whether to reschedule next task attempt. If true, this + // typically indicates that a successful map attempt was killed on an + // unusable node being reported. + rescheduleNextTaskAttempt = + ((TaskTAttemptKilledEvent)event).getRescheduleAttempt(); + } + task.addAndScheduleAttempt(Avataar.VIRGIN, rescheduleNextTaskAttempt); return TaskStateInternal.SCHEDULED; } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java index 73745d358e2..0f4b59bd3f2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java @@ -925,9 +925,11 @@ public class RMContainerAllocator extends RMContainerRequestor LOG.info("Killing taskAttempt:" + tid + " because it is running on unusable node:" + taskAttemptNodeId); + // If map, reschedule next task attempt. + boolean rescheduleNextAttempt = (i == 0) ? true : false; eventHandler.handle(new TaskAttemptKillEvent(tid, "TaskAttempt killed because it ran on unusable node" - + taskAttemptNodeId)); + + taskAttemptNodeId, rescheduleNextAttempt)); } } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java index eb6b93292b8..eaf107050d7 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java @@ -26,6 +26,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Iterator; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; import com.google.common.base.Supplier; import org.apache.hadoop.test.GenericTestUtils; @@ -56,13 +57,19 @@ import org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl; import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher; import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent; import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerRemoteLaunchEvent; +import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator; +import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent; +import org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.NodeState; +import org.apache.hadoop.yarn.event.AsyncDispatcher; +import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.junit.Test; +import org.mockito.Mockito; /** * Tests the state machine of MR App. @@ -201,13 +208,18 @@ public class TestMRApp { @Test public void testUpdatedNodes() throws Exception { int runCount = 0; + Dispatcher disp = Mockito.spy(new AsyncDispatcher()); MRApp app = new MRAppWithHistory(2, 2, false, this.getClass().getName(), - true, ++runCount); + true, ++runCount, disp); Configuration conf = new Configuration(); // after half of the map completion, reduce will start conf.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, 0.5f); // uberization forces full slowstart (1.0), so disable that conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false); + + ContainerAllocEventHandler handler = new ContainerAllocEventHandler(); + disp.register(ContainerAllocator.EventType.class, handler); + final Job job1 = app.submit(conf); app.waitForState(job1, JobState.RUNNING); Assert.assertEquals("Num tasks not correct", 4, job1.getTasks().size()); @@ -285,6 +297,12 @@ public class TestMRApp { events = job1.getTaskAttemptCompletionEvents(0, 100); Assert.assertEquals("Expecting 2 more completion events for killed", 4, events.length); + // 2 map task attempts which were killed above should be requested from + // container allocator with the previous map task marked as failed. If + // this happens allocator will request the container for this mapper from + // RM at a higher priority of 5(i.e. with a priority equivalent to that of + // a fail fast map). + handler.waitForFailedMapContainerReqEvents(2); // all maps must be back to running app.waitForState(mapTask1, TaskState.RUNNING); @@ -324,7 +342,7 @@ public class TestMRApp { // rerun // in rerun the 1st map will be recovered from previous run app = new MRAppWithHistory(2, 2, false, this.getClass().getName(), false, - ++runCount); + ++runCount, (Dispatcher)new AsyncDispatcher()); conf = new Configuration(); conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false); @@ -420,6 +438,25 @@ public class TestMRApp { app.waitForState(job2, JobState.SUCCEEDED); } + private final class ContainerAllocEventHandler + implements EventHandler { + private AtomicInteger failedMapContainerReqEventCnt = new AtomicInteger(0); + @Override + public void handle(ContainerAllocatorEvent event) { + if (event.getType() == ContainerAllocator.EventType.CONTAINER_REQ && + ((ContainerRequestEvent)event).getEarlierAttemptFailed()) { + failedMapContainerReqEventCnt.incrementAndGet(); + } + } + public void waitForFailedMapContainerReqEvents(int count) + throws InterruptedException { + while(failedMapContainerReqEventCnt.get() != count) { + Thread.sleep(50); + } + failedMapContainerReqEventCnt.set(0); + } + } + private static void waitFor(Supplier predicate, int checkIntervalMillis, int checkTotalMillis) throws InterruptedException { try { @@ -590,9 +627,17 @@ public class TestMRApp { } private final class MRAppWithHistory extends MRApp { + private Dispatcher dispatcher; public MRAppWithHistory(int maps, int reduces, boolean autoComplete, - String testName, boolean cleanOnStart, int startCount) { + String testName, boolean cleanOnStart, int startCount, + Dispatcher disp) { super(maps, reduces, autoComplete, testName, cleanOnStart, startCount); + this.dispatcher = disp; + } + + @Override + protected Dispatcher createDispatcher() { + return dispatcher; } @Override diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java index 509f6af6129..98dffba4580 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java @@ -78,9 +78,13 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerLaunchedEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent; +import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptTooManyFetchFailureEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; +import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent; +import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType; +import org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptKilledEvent; import org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent; import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; import org.apache.hadoop.security.Credentials; @@ -982,7 +986,46 @@ public class TestTaskAttempt{ + " Task attempt finish time is not the same ", finishTime, Long.valueOf(taImpl.getFinishTime())); } - + + private void containerKillBeforeAssignment(boolean scheduleAttempt) + throws Exception { + MockEventHandler eventHandler = new MockEventHandler(); + ApplicationId appId = ApplicationId.newInstance(1, 2); + JobId jobId = MRBuilderUtils.newJobId(appId, 1); + TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP); + + TaskAttemptImpl taImpl = + new MapTaskAttemptImpl(taskId, 1, eventHandler, mock(Path.class), 1, + mock(TaskSplitMetaInfo.class), new JobConf(), + mock(TaskAttemptListener.class), mock(Token.class), + new Credentials(), SystemClock.getInstance(), + mock(AppContext.class)); + if (scheduleAttempt) { + taImpl.handle(new TaskAttemptEvent(taImpl.getID(), + TaskAttemptEventType.TA_SCHEDULE)); + } + taImpl.handle(new TaskAttemptKillEvent(taImpl.getID(),"", true)); + assertEquals("Task attempt is not in KILLED state", taImpl.getState(), + TaskAttemptState.KILLED); + assertEquals("Task attempt's internal state is not KILLED", + taImpl.getInternalState(), TaskAttemptStateInternal.KILLED); + assertFalse("InternalError occurred", eventHandler.internalError); + TaskEvent event = eventHandler.lastTaskEvent; + assertEquals(TaskEventType.T_ATTEMPT_KILLED, event.getType()); + // In NEW state, new map attempt should not be rescheduled. + assertFalse(((TaskTAttemptKilledEvent)event).getRescheduleAttempt()); + } + + @Test + public void testContainerKillOnNew() throws Exception { + containerKillBeforeAssignment(false); + } + + @Test + public void testContainerKillOnUnassigned() throws Exception { + containerKillBeforeAssignment(true); + } + @Test public void testContainerKillAfterAssigned() throws Exception { ApplicationId appId = ApplicationId.newInstance(1, 2); @@ -1032,7 +1075,7 @@ public class TestTaskAttempt{ taImpl.getInternalState(), TaskAttemptStateInternal.ASSIGNED); taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_KILL)); - assertEquals("Task should be in KILLED state", + assertEquals("Task should be in KILL_CONTAINER_CLEANUP state", TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP, taImpl.getInternalState()); } @@ -1089,7 +1132,7 @@ public class TestTaskAttempt{ TaskAttemptEventType.TA_KILL)); assertFalse("InternalError occurred trying to handle TA_KILL", eventHandler.internalError); - assertEquals("Task should be in KILLED state", + assertEquals("Task should be in KILL_CONTAINER_CLEANUP state", TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP, taImpl.getInternalState()); } @@ -1150,12 +1193,11 @@ public class TestTaskAttempt{ TaskAttemptEventType.TA_KILL)); assertFalse("InternalError occurred trying to handle TA_KILL", eventHandler.internalError); - assertEquals("Task should be in KILLED state", + assertEquals("Task should be in KILL_CONTAINER_CLEANUP state", TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP, taImpl.getInternalState()); } - @Test public void testKillMapTaskWhileSuccessFinishing() throws Exception { MockEventHandler eventHandler = new MockEventHandler(); @@ -1195,6 +1237,37 @@ public class TestTaskAttempt{ assertFalse("InternalError occurred", eventHandler.internalError); } + @Test + public void testKillMapTaskAfterSuccess() throws Exception { + MockEventHandler eventHandler = new MockEventHandler(); + TaskAttemptImpl taImpl = createTaskAttemptImpl(eventHandler); + + taImpl.handle(new TaskAttemptEvent(taImpl.getID(), + TaskAttemptEventType.TA_DONE)); + + assertEquals("Task attempt is not in SUCCEEDED state", taImpl.getState(), + TaskAttemptState.SUCCEEDED); + assertEquals("Task attempt's internal state is not " + + "SUCCESS_FINISHING_CONTAINER", taImpl.getInternalState(), + TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER); + + taImpl.handle(new TaskAttemptEvent(taImpl.getID(), + TaskAttemptEventType.TA_CONTAINER_CLEANED)); + // Send a map task attempt kill event indicating next map attempt has to be + // reschedule + taImpl.handle(new TaskAttemptKillEvent(taImpl.getID(),"", true)); + assertEquals("Task attempt is not in KILLED state", taImpl.getState(), + TaskAttemptState.KILLED); + assertEquals("Task attempt's internal state is not KILLED", + taImpl.getInternalState(), TaskAttemptStateInternal.KILLED); + assertFalse("InternalError occurred", eventHandler.internalError); + TaskEvent event = eventHandler.lastTaskEvent; + assertEquals(TaskEventType.T_ATTEMPT_KILLED, event.getType()); + // Send an attempt killed event to TaskImpl forwarding the same reschedule + // flag we received in task attempt kill event. + assertTrue(((TaskTAttemptKilledEvent)event).getRescheduleAttempt()); + } + @Test public void testKillMapTaskWhileFailFinishing() throws Exception { MockEventHandler eventHandler = new MockEventHandler(); @@ -1406,9 +1479,13 @@ public class TestTaskAttempt{ public static class MockEventHandler implements EventHandler { public boolean internalError; + public TaskEvent lastTaskEvent; @Override public void handle(Event event) { + if (event instanceof TaskEvent) { + lastTaskEvent = (TaskEvent)event; + } if (event instanceof JobEvent) { JobEvent je = ((JobEvent) event); if (JobEventType.INTERNAL_ERROR == je.getType()) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java index 84576712b44..4abdff871d8 100755 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java @@ -55,6 +55,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent; +import org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptKilledEvent; import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.token.Token; @@ -92,7 +93,8 @@ public class TestTaskImpl { private int taskCounter = 0; private final int partition = 1; - private InlineDispatcher dispatcher; + private InlineDispatcher dispatcher; + private MockTaskAttemptEventHandler taskAttemptEventHandler; private List taskAttempts; private class MockTaskImpl extends TaskImpl { @@ -257,7 +259,10 @@ public class TestTaskImpl { taskSplitMetaInfo = mock(TaskSplitMetaInfo.class); when(taskSplitMetaInfo.getLocations()).thenReturn(dataLocations); - taskAttempts = new ArrayList(); + taskAttempts = new ArrayList(); + + taskAttemptEventHandler = new MockTaskAttemptEventHandler(); + dispatcher.register(TaskAttemptEventType.class, taskAttemptEventHandler); } private MockTaskImpl createMockTask(TaskType taskType) { @@ -294,8 +299,12 @@ public class TestTaskImpl { } private void killScheduledTaskAttempt(TaskAttemptId attemptId) { - mockTask.handle(new TaskTAttemptEvent(attemptId, - TaskEventType.T_ATTEMPT_KILLED)); + killScheduledTaskAttempt(attemptId, false); + } + + private void killScheduledTaskAttempt(TaskAttemptId attemptId, + boolean reschedule) { + mockTask.handle(new TaskTAttemptKilledEvent(attemptId, reschedule)); assertTaskScheduledState(); } @@ -326,11 +335,15 @@ public class TestTaskImpl { } private void killRunningTaskAttempt(TaskAttemptId attemptId) { - mockTask.handle(new TaskTAttemptEvent(attemptId, - TaskEventType.T_ATTEMPT_KILLED)); + killRunningTaskAttempt(attemptId, false); + } + + private void killRunningTaskAttempt(TaskAttemptId attemptId, + boolean reschedule) { + mockTask.handle(new TaskTAttemptKilledEvent(attemptId, reschedule)); assertTaskRunningState(); } - + private void failRunningTaskAttempt(TaskAttemptId attemptId) { mockTask.handle(new TaskTAttemptEvent(attemptId, TaskEventType.T_ATTEMPT_FAILED)); @@ -423,10 +436,12 @@ public class TestTaskImpl { */ public void testKillScheduledTaskAttempt() { LOG.info("--- START: testKillScheduledTaskAttempt ---"); - mockTask = createMockTask(TaskType.MAP); + mockTask = createMockTask(TaskType.MAP); TaskId taskId = getNewTaskID(); scheduleTaskAttempt(taskId); - killScheduledTaskAttempt(getLastAttempt().getAttemptId()); + killScheduledTaskAttempt(getLastAttempt().getAttemptId(), true); + assertEquals(TaskAttemptEventType.TA_RESCHEDULE, + taskAttemptEventHandler.lastTaskAttemptEvent.getType()); } @Test @@ -449,11 +464,13 @@ public class TestTaskImpl { */ public void testKillRunningTaskAttempt() { LOG.info("--- START: testKillRunningTaskAttempt ---"); - mockTask = createMockTask(TaskType.MAP); + mockTask = createMockTask(TaskType.MAP); TaskId taskId = getNewTaskID(); scheduleTaskAttempt(taskId); launchTaskAttempt(getLastAttempt().getAttemptId()); - killRunningTaskAttempt(getLastAttempt().getAttemptId()); + killRunningTaskAttempt(getLastAttempt().getAttemptId(), true); + assertEquals(TaskAttemptEventType.TA_RESCHEDULE, + taskAttemptEventHandler.lastTaskAttemptEvent.getType()); } @Test @@ -471,6 +488,28 @@ public class TestTaskImpl { assertTaskSucceededState(); } + @Test + /** + * Kill map attempt for succeeded map task + * {@link TaskState#SUCCEEDED}->{@link TaskState#SCHEDULED} + */ + public void testKillAttemptForSuccessfulTask() { + LOG.info("--- START: testKillAttemptForSuccessfulTask ---"); + mockTask = createMockTask(TaskType.MAP); + TaskId taskId = getNewTaskID(); + scheduleTaskAttempt(taskId); + launchTaskAttempt(getLastAttempt().getAttemptId()); + commitTaskAttempt(getLastAttempt().getAttemptId()); + mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(), + TaskEventType.T_ATTEMPT_SUCCEEDED)); + assertTaskSucceededState(); + mockTask.handle( + new TaskTAttemptKilledEvent(getLastAttempt().getAttemptId(), true)); + assertEquals(TaskAttemptEventType.TA_RESCHEDULE, + taskAttemptEventHandler.lastTaskAttemptEvent.getType()); + assertTaskScheduledState(); + } + @Test public void testTaskProgress() { LOG.info("--- START: testTaskProgress ---"); @@ -728,8 +767,8 @@ public class TestTaskImpl { assertEquals(TaskState.FAILED, mockTask.getState()); taskAttempt = taskAttempts.get(3); taskAttempt.setState(TaskAttemptState.KILLED); - mockTask.handle(new TaskTAttemptEvent(taskAttempt.getAttemptId(), - TaskEventType.T_ATTEMPT_KILLED)); + mockTask.handle(new TaskTAttemptKilledEvent(taskAttempt.getAttemptId(), + false)); assertEquals(TaskState.FAILED, mockTask.getState()); } @@ -840,4 +879,14 @@ public class TestTaskImpl { Counters taskCounters = mockTask.getCounters(); assertEquals("wrong counters for task", specAttemptCounters, taskCounters); } + + public static class MockTaskAttemptEventHandler implements EventHandler { + public TaskAttemptEvent lastTaskAttemptEvent; + @Override + public void handle(Event event) { + if (event instanceof TaskAttemptEvent) { + lastTaskAttemptEvent = (TaskAttemptEvent)event; + } + } + }; } From a74580a4d3039ff95e7744f1d7a386b2bc7a7484 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Thu, 14 Apr 2016 11:36:12 -0700 Subject: [PATCH 08/26] HADOOP-12811. Change kms server port number which conflicts with HMaster port number. Contributed by Xiao Chen. --- .../key/kms/TestLoadBalancingKMSClientProvider.java | 8 ++++---- hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh | 2 +- .../hadoop-kms/src/main/libexec/kms-config.sh | 2 +- .../hadoop-kms/src/site/markdown/index.md.vm | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java index 08a3d93d2fa..4e421da2219 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java @@ -60,14 +60,14 @@ public class TestLoadBalancingKMSClientProvider { providers[2].getKMSUrl())); kp = new KMSClientProvider.Factory().createProvider(new URI( - "kms://http@host1;host2;host3:16000/kms/foo"), conf); + "kms://http@host1;host2;host3:9600/kms/foo"), conf); assertTrue(kp instanceof LoadBalancingKMSClientProvider); providers = ((LoadBalancingKMSClientProvider) kp).getProviders(); assertEquals(3, providers.length); - assertEquals(Sets.newHashSet("http://host1:16000/kms/foo/v1/", - "http://host2:16000/kms/foo/v1/", - "http://host3:16000/kms/foo/v1/"), + assertEquals(Sets.newHashSet("http://host1:9600/kms/foo/v1/", + "http://host2:9600/kms/foo/v1/", + "http://host3:9600/kms/foo/v1/"), Sets.newHashSet(providers[0].getKMSUrl(), providers[1].getKMSUrl(), providers[2].getKMSUrl())); diff --git a/hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh b/hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh index 7044fa86704..729e63a1218 100644 --- a/hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh +++ b/hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh @@ -24,7 +24,7 @@ # The HTTP port used by KMS # -# export KMS_HTTP_PORT=16000 +# export KMS_HTTP_PORT=9600 # The Admin port used by KMS # diff --git a/hadoop-common-project/hadoop-kms/src/main/libexec/kms-config.sh b/hadoop-common-project/hadoop-kms/src/main/libexec/kms-config.sh index 5e1ffa40c9d..927b4af1fc4 100644 --- a/hadoop-common-project/hadoop-kms/src/main/libexec/kms-config.sh +++ b/hadoop-common-project/hadoop-kms/src/main/libexec/kms-config.sh @@ -37,7 +37,7 @@ function hadoop_subproject_init export HADOOP_CATALINA_CONFIG="${HADOOP_CONF_DIR}" export HADOOP_CATALINA_LOG="${HADOOP_LOG_DIR}" - export HADOOP_CATALINA_HTTP_PORT="${KMS_HTTP_PORT:-16000}" + export HADOOP_CATALINA_HTTP_PORT="${KMS_HTTP_PORT:-9600}" export HADOOP_CATALINA_ADMIN_PORT="${KMS_ADMIN_PORT:-$((HADOOP_CATALINA_HTTP_PORT+1))}" export HADOOP_CATALINA_MAX_THREADS="${KMS_MAX_THREADS:-1000}" export HADOOP_CATALINA_MAX_HTTP_HEADER_SIZE="${KMS_MAX_HTTP_HEADER_SIZE:-65536}" diff --git a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm index 65854cf1105..68663672b23 100644 --- a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm +++ b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm @@ -32,7 +32,7 @@ KMS is a Java web-application and it runs using a pre-configured Tomcat bundled KMS Client Configuration ------------------------ -The KMS client `KeyProvider` uses the **kms** scheme, and the embedded URL must be the URL of the KMS. For example, for a KMS running on `http://localhost:16000/kms`, the KeyProvider URI is `kms://http@localhost:16000/kms`. And, for a KMS running on `https://localhost:16000/kms`, the KeyProvider URI is `kms://https@localhost:16000/kms` +The KMS client `KeyProvider` uses the **kms** scheme, and the embedded URL must be the URL of the KMS. For example, for a KMS running on `http://localhost:9600/kms`, the KeyProvider URI is `kms://http@localhost:9600/kms`. And, for a KMS running on `https://localhost:9600/kms`, the KeyProvider URI is `kms://https@localhost:9600/kms` KMS --- @@ -178,7 +178,7 @@ $H3 Embedded Tomcat Configuration To configure the embedded Tomcat go to the `share/hadoop/kms/tomcat/conf`. -KMS pre-configures the HTTP and Admin ports in Tomcat's `server.xml` to 16000 and 16001. +KMS pre-configures the HTTP and Admin ports in Tomcat's `server.xml` to 9600 and 9601. Tomcat logs are also preconfigured to go to Hadoop's `logs/` directory. From 3150ae8108a1fc40a67926be6254824c1e37cb38 Mon Sep 17 00:00:00 2001 From: Jason Lowe Date: Thu, 14 Apr 2016 19:17:14 +0000 Subject: [PATCH 09/26] YARN-4924. NM recovery race can lead to container not cleaned up. Contributed by sandflee --- .../ContainerManagerImpl.java | 17 ---- .../recovery/NMLeveldbStateStoreService.java | 80 ++++++++++++------- .../recovery/NMNullStateStoreService.java | 4 - .../recovery/NMStateStoreService.java | 12 --- .../TestContainerManagerRecovery.java | 4 + .../recovery/NMMemoryStateStoreService.java | 10 --- .../TestNMLeveldbStateStoreService.java | 10 +-- 7 files changed, 54 insertions(+), 83 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java index 8d09aa75bcc..b8cca28e82d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java @@ -296,20 +296,8 @@ public class ContainerManagerImpl extends CompositeService implements if (LOG.isDebugEnabled()) { LOG.debug("Recovering container with state: " + rcs); } - recoverContainer(rcs); } - - String diagnostic = "Application marked finished during recovery"; - for (ApplicationId appId : appsState.getFinishedApplications()) { - - if (LOG.isDebugEnabled()) { - LOG.debug("Application marked finished during recovery: " + appId); - } - - dispatcher.getEventHandler().handle( - new ApplicationFinishEvent(appId, diagnostic)); - } } else { LOG.info("Not a recoverable state store. Nothing to recover."); } @@ -1332,11 +1320,6 @@ public class ContainerManagerImpl extends CompositeService implements } else if (appsFinishedEvent.getReason() == CMgrCompletedAppsEvent.Reason.BY_RESOURCEMANAGER) { diagnostic = "Application killed by ResourceManager"; } - try { - this.context.getNMStateStore().storeFinishedApplication(appID); - } catch (IOException e) { - LOG.error("Unable to update application state in store", e); - } this.dispatcher.getEventHandler().handle( new ApplicationFinishEvent(appID, diagnostic)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java index 81d6c57de6e..26dea2daa0d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java @@ -84,6 +84,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService { private static final String APPLICATIONS_KEY_PREFIX = "ContainerManager/applications/"; + @Deprecated private static final String FINISHED_APPS_KEY_PREFIX = "ContainerManager/finishedApps/"; @@ -392,20 +393,6 @@ public class NMLeveldbStateStoreService extends NMStateStoreService { state.applications.add( ContainerManagerApplicationProto.parseFrom(entry.getValue())); } - - state.finishedApplications = new ArrayList(); - keyPrefix = FINISHED_APPS_KEY_PREFIX; - iter.seek(bytes(keyPrefix)); - while (iter.hasNext()) { - Entry entry = iter.next(); - String key = asString(entry.getKey()); - if (!key.startsWith(keyPrefix)) { - break; - } - ApplicationId appId = - ConverterUtils.toApplicationId(key.substring(keyPrefix.length())); - state.finishedApplications.add(appId); - } } catch (DBException e) { throw new IOException(e); } finally { @@ -414,6 +401,8 @@ public class NMLeveldbStateStoreService extends NMStateStoreService { } } + cleanupDeprecatedFinishedApps(); + return state; } @@ -433,21 +422,6 @@ public class NMLeveldbStateStoreService extends NMStateStoreService { } } - @Override - public void storeFinishedApplication(ApplicationId appId) - throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("storeFinishedApplication.appId: " + appId); - } - - String key = FINISHED_APPS_KEY_PREFIX + appId; - try { - db.put(bytes(key), new byte[0]); - } catch (DBException e) { - throw new IOException(e); - } - } - @Override public void removeApplication(ApplicationId appId) throws IOException { @@ -460,8 +434,6 @@ public class NMLeveldbStateStoreService extends NMStateStoreService { try { String key = APPLICATIONS_KEY_PREFIX + appId; batch.delete(bytes(key)); - key = FINISHED_APPS_KEY_PREFIX + appId; - batch.delete(bytes(key)); db.write(batch); } finally { batch.close(); @@ -979,6 +951,52 @@ public class NMLeveldbStateStoreService extends NMStateStoreService { } } + @SuppressWarnings("deprecation") + private void cleanupDeprecatedFinishedApps() { + try { + cleanupKeysWithPrefix(FINISHED_APPS_KEY_PREFIX); + } catch (Exception e) { + LOG.warn("cleanup keys with prefix " + FINISHED_APPS_KEY_PREFIX + + " from leveldb failed", e); + } + } + + private void cleanupKeysWithPrefix(String prefix) throws IOException { + WriteBatch batch = null; + LeveldbIterator iter = null; + try { + iter = new LeveldbIterator(db); + try { + batch = db.createWriteBatch(); + iter.seek(bytes(prefix)); + while (iter.hasNext()) { + byte[] key = iter.next().getKey(); + String keyStr = asString(key); + if (!keyStr.startsWith(prefix)) { + break; + } + batch.delete(key); + if (LOG.isDebugEnabled()) { + LOG.debug("cleanup " + keyStr + " from leveldb"); + } + } + db.write(batch); + } catch (DBException e) { + throw new IOException(e); + } finally { + if (batch != null) { + batch.close(); + } + } + } catch (DBException e) { + throw new IOException(e); + } finally { + if (iter != null) { + iter.close(); + } + } + } + private String getLogDeleterKey(ApplicationId appId) { return LOG_DELETER_KEY_PREFIX + appId; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java index d5dce9bb2ee..a887e71e9e1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java @@ -58,10 +58,6 @@ public class NMNullStateStoreService extends NMStateStoreService { ContainerManagerApplicationProto p) throws IOException { } - @Override - public void storeFinishedApplication(ApplicationId appId) { - } - @Override public void removeApplication(ApplicationId appId) throws IOException { } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java index 84c5aa982a7..463815ec9c1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java @@ -52,15 +52,11 @@ public abstract class NMStateStoreService extends AbstractService { public static class RecoveredApplicationsState { List applications; - List finishedApplications; public List getApplications() { return applications; } - public List getFinishedApplications() { - return finishedApplications; - } } public enum RecoveredContainerStatus { @@ -258,14 +254,6 @@ public abstract class NMStateStoreService extends AbstractService { public abstract void storeApplication(ApplicationId appId, ContainerManagerApplicationProto p) throws IOException; - /** - * Record that an application has finished - * @param appId the application ID - * @throws IOException - */ - public abstract void storeFinishedApplication(ApplicationId appId) - throws IOException; - /** * Remove records corresponding to an application * @param appId the application ID diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java index 2e014decbc8..9fa3fcc13c8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java @@ -259,6 +259,10 @@ public class TestContainerManagerRecovery extends BaseContainerManagerTest { assertEquals(1, context.getApplications().size()); app = context.getApplications().get(appId); assertNotNull(app); + // no longer saving FINISH_APP event in NM stateStore, + // simulate by resending FINISH_APP event + cm.handle(new CMgrCompletedAppsEvent(finishedApps, + CMgrCompletedAppsEvent.Reason.BY_RESOURCEMANAGER)); waitForAppState(app, ApplicationState.APPLICATION_RESOURCES_CLEANINGUP); assertTrue(context.getApplicationACLsManager().checkAccess( UserGroupInformation.createRemoteUser(modUser), diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java index a1c95ab03b9..12798963390 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java @@ -44,7 +44,6 @@ import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl; public class NMMemoryStateStoreService extends NMStateStoreService { private Map apps; - private Set finishedApps; private Map containerStates; private Map trackerStates; private Map deleteTasks; @@ -59,7 +58,6 @@ public class NMMemoryStateStoreService extends NMStateStoreService { @Override protected void initStorage(Configuration conf) { apps = new HashMap(); - finishedApps = new HashSet(); containerStates = new HashMap(); nmTokenState = new RecoveredNMTokensState(); nmTokenState.applicationMasterKeys = @@ -86,7 +84,6 @@ public class NMMemoryStateStoreService extends NMStateStoreService { RecoveredApplicationsState state = new RecoveredApplicationsState(); state.applications = new ArrayList( apps.values()); - state.finishedApplications = new ArrayList(finishedApps); return state; } @@ -98,16 +95,10 @@ public class NMMemoryStateStoreService extends NMStateStoreService { apps.put(appId, protoCopy); } - @Override - public synchronized void storeFinishedApplication(ApplicationId appId) { - finishedApps.add(appId); - } - @Override public synchronized void removeApplication(ApplicationId appId) throws IOException { apps.remove(appId); - finishedApps.remove(appId); } @Override @@ -393,7 +384,6 @@ public class NMMemoryStateStoreService extends NMStateStoreService { logDeleterState.remove(appId); } - private static class TrackerState { Map inProgressMap = new HashMap(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java index 08b49e75383..47468d6c25e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java @@ -174,7 +174,6 @@ public class TestNMLeveldbStateStoreService { // test empty when no state RecoveredApplicationsState state = stateStore.loadApplicationsState(); assertTrue(state.getApplications().isEmpty()); - assertTrue(state.getFinishedApplications().isEmpty()); // store an application and verify recovered final ApplicationId appId1 = ApplicationId.newInstance(1234, 1); @@ -188,10 +187,8 @@ public class TestNMLeveldbStateStoreService { state = stateStore.loadApplicationsState(); assertEquals(1, state.getApplications().size()); assertEquals(appProto1, state.getApplications().get(0)); - assertTrue(state.getFinishedApplications().isEmpty()); - // finish an application and add a new one - stateStore.storeFinishedApplication(appId1); + // add a new app final ApplicationId appId2 = ApplicationId.newInstance(1234, 2); builder = ContainerManagerApplicationProto.newBuilder(); builder.setId(((ApplicationIdPBImpl) appId2).getProto()); @@ -203,18 +200,13 @@ public class TestNMLeveldbStateStoreService { assertEquals(2, state.getApplications().size()); assertTrue(state.getApplications().contains(appProto1)); assertTrue(state.getApplications().contains(appProto2)); - assertEquals(1, state.getFinishedApplications().size()); - assertEquals(appId1, state.getFinishedApplications().get(0)); // test removing an application - stateStore.storeFinishedApplication(appId2); stateStore.removeApplication(appId2); restartStateStore(); state = stateStore.loadApplicationsState(); assertEquals(1, state.getApplications().size()); assertEquals(appProto1, state.getApplications().get(0)); - assertEquals(1, state.getFinishedApplications().size()); - assertEquals(appId1, state.getFinishedApplications().get(0)); } @Test From 2c155afe2736a5571bbb3bdfb2fe6f9709227229 Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Thu, 14 Apr 2016 14:25:11 -0500 Subject: [PATCH 10/26] HDFS-10292. Add block id when client got Unable to close file exception. Contributed by Brahma Reddy Battula. --- .../src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index dc88e08991c..0f8279943d2 100755 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -811,7 +811,7 @@ public class DFSOutputStream extends FSOutputSummer try { if (retries == 0) { throw new IOException("Unable to close file because the last block" - + " does not have enough number of replicas."); + + last + " does not have enough number of replicas."); } retries--; Thread.sleep(sleeptime); From b9c9d03591a49be31f3fbc738d01a31700bfdbc4 Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Thu, 14 Apr 2016 15:24:39 -0500 Subject: [PATCH 11/26] HDFS-10281. TestPendingCorruptDnMessages fails intermittently. Contributed by Mingliang Liu. --- .../ha/TestPendingCorruptDnMessages.java | 51 ++++++++++--------- 1 file changed, 28 insertions(+), 23 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java index 5f116d95ccf..5063acdc8af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java @@ -18,12 +18,14 @@ package org.apache.hadoop.hdfs.server.namenode.ha; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import java.io.IOException; import java.io.OutputStream; import java.net.URISyntaxException; import java.util.List; +import java.util.concurrent.TimeoutException; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -37,19 +39,22 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; -import org.apache.hadoop.util.ThreadUtil; +import org.apache.hadoop.test.GenericTestUtils; + +import com.google.common.base.Supplier; + import org.junit.Test; public class TestPendingCorruptDnMessages { private static final Path filePath = new Path("/foo.txt"); - @Test + @Test (timeout = 60000) public void testChangedStorageId() throws IOException, URISyntaxException, - InterruptedException { + InterruptedException, TimeoutException { HdfsConfiguration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(1) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .build(); @@ -83,27 +88,27 @@ public class TestPendingCorruptDnMessages { // Wait until the standby NN queues up the corrupt block in the pending DN // message queue. - while (cluster.getNamesystem(1).getBlockManager() - .getPendingDataNodeMessageCount() < 1) { - ThreadUtil.sleepAtLeastIgnoreInterrupts(1000); - } - - assertEquals(1, cluster.getNamesystem(1).getBlockManager() - .getPendingDataNodeMessageCount()); - String oldStorageId = getRegisteredDatanodeUid(cluster, 1); + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + return cluster.getNamesystem(1).getBlockManager() + .getPendingDataNodeMessageCount() == 1; + } + }, 1000, 30000); + + final String oldStorageId = getRegisteredDatanodeUid(cluster, 1); + assertNotNull(oldStorageId); // Reformat/restart the DN. assertTrue(wipeAndRestartDn(cluster, 0)); - // Give the DN time to start up and register, which will cause the - // DatanodeManager to dissociate the old storage ID from the DN xfer addr. - String newStorageId = ""; - do { - ThreadUtil.sleepAtLeastIgnoreInterrupts(1000); - newStorageId = getRegisteredDatanodeUid(cluster, 1); - System.out.println("====> oldStorageId: " + oldStorageId + - " newStorageId: " + newStorageId); - } while (newStorageId.equals(oldStorageId)); + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + final String newStorageId = getRegisteredDatanodeUid(cluster, 1); + return newStorageId != null && !newStorageId.equals(oldStorageId); + } + }, 1000, 30000); assertEquals(0, cluster.getNamesystem(1).getBlockManager() .getPendingDataNodeMessageCount()); @@ -121,8 +126,8 @@ public class TestPendingCorruptDnMessages { List registeredDatanodes = cluster.getNamesystem(nnIndex) .getBlockManager().getDatanodeManager() .getDatanodeListForReport(DatanodeReportType.ALL); - assertEquals(1, registeredDatanodes.size()); - return registeredDatanodes.get(0).getDatanodeUuid(); + return registeredDatanodes.isEmpty() ? null : + registeredDatanodes.get(0).getDatanodeUuid(); } private static boolean wipeAndRestartDn(MiniDFSCluster cluster, int dnIndex) From 6e6b6dd5aaf93cfb373833cd175ee722d2cb708f Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Fri, 15 Apr 2016 14:14:36 +0900 Subject: [PATCH 12/26] HADOOP-12989. Some tests in org.apache.hadoop.fs.shell.find occasionally time out. Contributed by Takashi Ohnishi. --- .../apache/hadoop/fs/shell/find/TestAnd.java | 25 ++++++----- .../fs/shell/find/TestFilterExpression.java | 27 +++++++----- .../apache/hadoop/fs/shell/find/TestFind.java | 3 +- .../hadoop/fs/shell/find/TestIname.java | 17 +++++--- .../apache/hadoop/fs/shell/find/TestName.java | 17 +++++--- .../hadoop/fs/shell/find/TestPrint.java | 9 +++- .../hadoop/fs/shell/find/TestPrint0.java | 9 +++- .../hadoop/fs/shell/find/TestResult.java | 41 +++++++++++-------- 8 files changed, 92 insertions(+), 56 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestAnd.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestAnd.java index d82a25e07b6..bb5ca4ca1c5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestAnd.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestAnd.java @@ -26,12 +26,17 @@ import java.util.Deque; import java.util.LinkedList; import org.apache.hadoop.fs.shell.PathData; +import org.junit.Rule; +import org.junit.rules.Timeout; import org.junit.Test; public class TestAnd { + @Rule + public Timeout globalTimeout = new Timeout(10000); + // test all expressions passing - @Test(timeout = 1000) + @Test public void testPass() throws IOException { And and = new And(); @@ -56,7 +61,7 @@ public class TestAnd { } // test the first expression failing - @Test(timeout = 1000) + @Test public void testFailFirst() throws IOException { And and = new And(); @@ -80,7 +85,7 @@ public class TestAnd { } // test the second expression failing - @Test(timeout = 1000) + @Test public void testFailSecond() throws IOException { And and = new And(); @@ -105,7 +110,7 @@ public class TestAnd { } // test both expressions failing - @Test(timeout = 1000) + @Test public void testFailBoth() throws IOException { And and = new And(); @@ -129,7 +134,7 @@ public class TestAnd { } // test the first expression stopping - @Test(timeout = 1000) + @Test public void testStopFirst() throws IOException { And and = new And(); @@ -154,7 +159,7 @@ public class TestAnd { } // test the second expression stopping - @Test(timeout = 1000) + @Test public void testStopSecond() throws IOException { And and = new And(); @@ -179,7 +184,7 @@ public class TestAnd { } // test first expression stopping and second failing - @Test(timeout = 1000) + @Test public void testStopFail() throws IOException { And and = new And(); @@ -204,7 +209,7 @@ public class TestAnd { } // test setOptions is called on child - @Test(timeout = 1000) + @Test public void testSetOptions() throws IOException { And and = new And(); Expression first = mock(Expression.class); @@ -224,7 +229,7 @@ public class TestAnd { } // test prepare is called on child - @Test(timeout = 1000) + @Test public void testPrepare() throws IOException { And and = new And(); Expression first = mock(Expression.class); @@ -243,7 +248,7 @@ public class TestAnd { } // test finish is called on child - @Test(timeout = 1000) + @Test public void testFinish() throws IOException { And and = new And(); Expression first = mock(Expression.class); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestFilterExpression.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestFilterExpression.java index 5986a06b23f..7ad0574e183 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestFilterExpression.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestFilterExpression.java @@ -26,12 +26,17 @@ import java.util.Deque; import org.apache.hadoop.fs.shell.PathData; import org.junit.Before; +import org.junit.Rule; +import org.junit.rules.Timeout; import org.junit.Test; public class TestFilterExpression { private Expression expr; private FilterExpression test; + @Rule + public Timeout globalTimeout = new Timeout(10000); + @Before public void setup() { expr = mock(Expression.class); @@ -40,13 +45,13 @@ public class TestFilterExpression { } // test that the child expression is correctly set - @Test(timeout = 1000) + @Test public void expression() throws IOException { assertEquals(expr, test.expression); } // test that setOptions method is called - @Test(timeout = 1000) + @Test public void setOptions() throws IOException { FindOptions options = mock(FindOptions.class); test.setOptions(options); @@ -55,7 +60,7 @@ public class TestFilterExpression { } // test the apply method is called and the result returned - @Test(timeout = 1000) + @Test public void apply() throws IOException { PathData item = mock(PathData.class); when(expr.apply(item, -1)).thenReturn(Result.PASS).thenReturn(Result.FAIL); @@ -66,7 +71,7 @@ public class TestFilterExpression { } // test that the finish method is called - @Test(timeout = 1000) + @Test public void finish() throws IOException { test.finish(); verify(expr).finish(); @@ -74,7 +79,7 @@ public class TestFilterExpression { } // test that the getUsage method is called - @Test(timeout = 1000) + @Test public void getUsage() { String[] usage = new String[] { "Usage 1", "Usage 2", "Usage 3" }; when(expr.getUsage()).thenReturn(usage); @@ -84,7 +89,7 @@ public class TestFilterExpression { } // test that the getHelp method is called - @Test(timeout = 1000) + @Test public void getHelp() { String[] help = new String[] { "Help 1", "Help 2", "Help 3" }; when(expr.getHelp()).thenReturn(help); @@ -94,7 +99,7 @@ public class TestFilterExpression { } // test that the isAction method is called - @Test(timeout = 1000) + @Test public void isAction() { when(expr.isAction()).thenReturn(true).thenReturn(false); assertTrue(test.isAction()); @@ -104,7 +109,7 @@ public class TestFilterExpression { } // test that the isOperator method is called - @Test(timeout = 1000) + @Test public void isOperator() { when(expr.isAction()).thenReturn(true).thenReturn(false); assertTrue(test.isAction()); @@ -114,7 +119,7 @@ public class TestFilterExpression { } // test that the getPrecedence method is called - @Test(timeout = 1000) + @Test public void getPrecedence() { int precedence = 12345; when(expr.getPrecedence()).thenReturn(precedence); @@ -124,7 +129,7 @@ public class TestFilterExpression { } // test that the addChildren method is called - @Test(timeout = 1000) + @Test public void addChildren() { @SuppressWarnings("unchecked") Deque expressions = mock(Deque.class); @@ -134,7 +139,7 @@ public class TestFilterExpression { } // test that the addArguments method is called - @Test(timeout = 1000) + @Test public void addArguments() { @SuppressWarnings("unchecked") Deque args = mock(Deque.class); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestFind.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestFind.java index 8bfcec66146..716230aa4c4 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestFind.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestFind.java @@ -39,11 +39,12 @@ import org.apache.hadoop.fs.shell.find.FindOptions; import org.apache.hadoop.fs.shell.find.Result; import org.junit.Before; import org.junit.Rule; -import org.junit.Test; import org.junit.rules.Timeout; +import org.junit.Test; import org.mockito.InOrder; public class TestFind { + @Rule public Timeout timeout = new Timeout(10000); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestIname.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestIname.java index 6e42fce58fe..c204322f1e9 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestIname.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestIname.java @@ -25,12 +25,17 @@ import java.io.IOException; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.shell.PathData; import org.junit.Before; +import org.junit.Rule; +import org.junit.rules.Timeout; import org.junit.Test; public class TestIname { private FileSystem mockFs; private Name.Iname name; + @Rule + public Timeout globalTimeout = new Timeout(10000); + @Before public void resetMock() throws IOException { mockFs = MockFileSystem.setup(); @@ -44,7 +49,7 @@ public class TestIname { } // test a matching name (same case) - @Test(timeout = 1000) + @Test public void applyMatch() throws IOException { setup("name"); PathData item = new PathData("/directory/path/name", mockFs.getConf()); @@ -52,7 +57,7 @@ public class TestIname { } // test a non-matching name - @Test(timeout = 1000) + @Test public void applyNotMatch() throws IOException { setup("name"); PathData item = new PathData("/directory/path/notname", mockFs.getConf()); @@ -60,7 +65,7 @@ public class TestIname { } // test a matching name (different case) - @Test(timeout = 1000) + @Test public void applyMixedCase() throws IOException { setup("name"); PathData item = new PathData("/directory/path/NaMe", mockFs.getConf()); @@ -68,7 +73,7 @@ public class TestIname { } // test a matching glob pattern (same case) - @Test(timeout = 1000) + @Test public void applyGlob() throws IOException { setup("n*e"); PathData item = new PathData("/directory/path/name", mockFs.getConf()); @@ -76,7 +81,7 @@ public class TestIname { } // test a matching glob pattern (different case) - @Test(timeout = 1000) + @Test public void applyGlobMixedCase() throws IOException { setup("n*e"); PathData item = new PathData("/directory/path/NaMe", mockFs.getConf()); @@ -84,7 +89,7 @@ public class TestIname { } // test a non-matching glob pattern - @Test(timeout = 1000) + @Test public void applyGlobNotMatch() throws IOException { setup("n*e"); PathData item = new PathData("/directory/path/notmatch", mockFs.getConf()); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestName.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestName.java index 2c77fe14b72..81a405f4cfd 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestName.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestName.java @@ -25,12 +25,17 @@ import java.io.IOException; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.shell.PathData; import org.junit.Before; +import org.junit.Rule; +import org.junit.rules.Timeout; import org.junit.Test; public class TestName { private FileSystem mockFs; private Name name; + @Rule + public Timeout globalTimeout = new Timeout(10000); + @Before public void resetMock() throws IOException { mockFs = MockFileSystem.setup(); @@ -44,7 +49,7 @@ public class TestName { } // test a matching name - @Test(timeout = 1000) + @Test public void applyMatch() throws IOException { setup("name"); PathData item = new PathData("/directory/path/name", mockFs.getConf()); @@ -52,7 +57,7 @@ public class TestName { } // test a non-matching name - @Test(timeout = 1000) + @Test public void applyNotMatch() throws IOException { setup("name"); PathData item = new PathData("/directory/path/notname", mockFs.getConf()); @@ -60,7 +65,7 @@ public class TestName { } // test a different case name - @Test(timeout = 1000) + @Test public void applyMixedCase() throws IOException { setup("name"); PathData item = new PathData("/directory/path/NaMe", mockFs.getConf()); @@ -68,7 +73,7 @@ public class TestName { } // test a matching glob pattern - @Test(timeout = 1000) + @Test public void applyGlob() throws IOException { setup("n*e"); PathData item = new PathData("/directory/path/name", mockFs.getConf()); @@ -76,7 +81,7 @@ public class TestName { } // test a glob pattern with different case - @Test(timeout = 1000) + @Test public void applyGlobMixedCase() throws IOException { setup("n*e"); PathData item = new PathData("/directory/path/NaMe", mockFs.getConf()); @@ -84,7 +89,7 @@ public class TestName { } // test a non-matching glob pattern - @Test(timeout = 1000) + @Test public void applyGlobNotMatch() throws IOException { setup("n*e"); PathData item = new PathData("/directory/path/notmatch", mockFs.getConf()); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestPrint.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestPrint.java index 2d276650b96..a5cacc7defb 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestPrint.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestPrint.java @@ -23,23 +23,28 @@ import static org.mockito.Mockito.*; import java.io.IOException; import org.apache.hadoop.fs.shell.PathData; -import org.junit.Test; import java.io.PrintStream; import org.apache.hadoop.fs.FileSystem; import org.junit.Before; +import org.junit.Rule; +import org.junit.rules.Timeout; +import org.junit.Test; public class TestPrint { private FileSystem mockFs; + @Rule + public Timeout globalTimeout = new Timeout(10000); + @Before public void resetMock() throws IOException { mockFs = MockFileSystem.setup(); } // test the full path is printed to stdout - @Test(timeout = 1000) + @Test public void testPrint() throws IOException { Print print = new Print(); PrintStream out = mock(PrintStream.class); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestPrint0.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestPrint0.java index 3b89438d308..20c9bd69470 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestPrint0.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestPrint0.java @@ -23,23 +23,28 @@ import static org.mockito.Mockito.*; import java.io.IOException; import org.apache.hadoop.fs.shell.PathData; -import org.junit.Test; import java.io.PrintStream; import org.apache.hadoop.fs.FileSystem; import org.junit.Before; +import org.junit.Rule; +import org.junit.rules.Timeout; +import org.junit.Test; public class TestPrint0 { private FileSystem mockFs; + @Rule + public Timeout globalTimeout = new Timeout(10000); + @Before public void resetMock() throws IOException { mockFs = MockFileSystem.setup(); } // test the full path is printed to stdout with a '\0' - @Test(timeout = 1000) + @Test public void testPrint() throws IOException { Print.Print0 print = new Print.Print0(); PrintStream out = mock(PrintStream.class); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestResult.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestResult.java index 1139220b94d..999ff598d77 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestResult.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestResult.java @@ -19,12 +19,17 @@ package org.apache.hadoop.fs.shell.find; import static org.junit.Assert.*; +import org.junit.Rule; +import org.junit.rules.Timeout; import org.junit.Test; public class TestResult { + @Rule + public Timeout globalTimeout = new Timeout(10000); + // test the PASS value - @Test(timeout = 1000) + @Test public void testPass() { Result result = Result.PASS; assertTrue(result.isPass()); @@ -32,7 +37,7 @@ public class TestResult { } // test the FAIL value - @Test(timeout = 1000) + @Test public void testFail() { Result result = Result.FAIL; assertFalse(result.isPass()); @@ -40,7 +45,7 @@ public class TestResult { } // test the STOP value - @Test(timeout = 1000) + @Test public void testStop() { Result result = Result.STOP; assertTrue(result.isPass()); @@ -48,7 +53,7 @@ public class TestResult { } // test combine method with two PASSes - @Test(timeout = 1000) + @Test public void combinePassPass() { Result result = Result.PASS.combine(Result.PASS); assertTrue(result.isPass()); @@ -56,7 +61,7 @@ public class TestResult { } // test the combine method with a PASS and a FAIL - @Test(timeout = 1000) + @Test public void combinePassFail() { Result result = Result.PASS.combine(Result.FAIL); assertFalse(result.isPass()); @@ -64,7 +69,7 @@ public class TestResult { } // test the combine method with a FAIL and a PASS - @Test(timeout = 1000) + @Test public void combineFailPass() { Result result = Result.FAIL.combine(Result.PASS); assertFalse(result.isPass()); @@ -72,7 +77,7 @@ public class TestResult { } // test the combine method with two FAILs - @Test(timeout = 1000) + @Test public void combineFailFail() { Result result = Result.FAIL.combine(Result.FAIL); assertFalse(result.isPass()); @@ -80,7 +85,7 @@ public class TestResult { } // test the combine method with a PASS and STOP - @Test(timeout = 1000) + @Test public void combinePassStop() { Result result = Result.PASS.combine(Result.STOP); assertTrue(result.isPass()); @@ -88,7 +93,7 @@ public class TestResult { } // test the combine method with a STOP and FAIL - @Test(timeout = 1000) + @Test public void combineStopFail() { Result result = Result.STOP.combine(Result.FAIL); assertFalse(result.isPass()); @@ -96,7 +101,7 @@ public class TestResult { } // test the combine method with a STOP and a PASS - @Test(timeout = 1000) + @Test public void combineStopPass() { Result result = Result.STOP.combine(Result.PASS); assertTrue(result.isPass()); @@ -104,7 +109,7 @@ public class TestResult { } // test the combine method with a FAIL and a STOP - @Test(timeout = 1000) + @Test public void combineFailStop() { Result result = Result.FAIL.combine(Result.STOP); assertFalse(result.isPass()); @@ -112,7 +117,7 @@ public class TestResult { } // test the negation of PASS - @Test(timeout = 1000) + @Test public void negatePass() { Result result = Result.PASS.negate(); assertFalse(result.isPass()); @@ -120,7 +125,7 @@ public class TestResult { } // test the negation of FAIL - @Test(timeout = 1000) + @Test public void negateFail() { Result result = Result.FAIL.negate(); assertTrue(result.isPass()); @@ -128,7 +133,7 @@ public class TestResult { } // test the negation of STOP - @Test(timeout = 1000) + @Test public void negateStop() { Result result = Result.STOP.negate(); assertFalse(result.isPass()); @@ -136,7 +141,7 @@ public class TestResult { } // test equals with two PASSes - @Test(timeout = 1000) + @Test public void equalsPass() { Result one = Result.PASS; Result two = Result.PASS.combine(Result.PASS); @@ -144,7 +149,7 @@ public class TestResult { } // test equals with two FAILs - @Test(timeout = 1000) + @Test public void equalsFail() { Result one = Result.FAIL; Result two = Result.FAIL.combine(Result.FAIL); @@ -152,7 +157,7 @@ public class TestResult { } // test equals with two STOPS - @Test(timeout = 1000) + @Test public void equalsStop() { Result one = Result.STOP; Result two = Result.STOP.combine(Result.STOP); @@ -160,7 +165,7 @@ public class TestResult { } // test all combinations of not equals - @Test(timeout = 1000) + @Test public void notEquals() { assertFalse(Result.PASS.equals(Result.FAIL)); assertFalse(Result.PASS.equals(Result.STOP)); From 4feed9b2dbff7bc52871cde7e1ff31b862e4fe9a Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Fri, 15 Apr 2016 17:43:38 +0100 Subject: [PATCH 13/26] HADOOP-13026 Should not wrap IOExceptions into a AuthenticationException in KerberosAuthenticator. Xuan Gong via stevel --- .../authentication/client/KerberosAuthenticator.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java index 0f046ae9f3e..a69ee46bbae 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java @@ -327,7 +327,11 @@ public class KerberosAuthenticator implements Authenticator { } }); } catch (PrivilegedActionException ex) { - throw new AuthenticationException(ex.getException()); + if (ex.getException() instanceof IOException) { + throw (IOException) ex.getException(); + } else { + throw new AuthenticationException(ex.getException()); + } } catch (LoginException ex) { throw new AuthenticationException(ex); } From 89a838769ff5b6c64565e6949b14d7fed05daf54 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Fri, 15 Apr 2016 10:49:21 -0700 Subject: [PATCH 14/26] HDFS-10283. o.a.h.hdfs.server.namenode.TestFSImageWithSnapshot#testSaveLoadImageWithAppending fails intermittently. Contributed by Mingliang Liu. --- .../namenode/TestFSImageWithSnapshot.java | 43 ++++++++++--------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java index 1904bbc122e..6be39509c9a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; import org.apache.hadoop.hdfs.util.Canceler; import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Level; + import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -62,7 +63,7 @@ public class TestFSImageWithSnapshot { } static final long seed = 0; - static final short REPLICATION = 3; + static final short NUM_DATANODES = 3; static final int BLOCKSIZE = 1024; static final long txid = 1; @@ -78,7 +79,7 @@ public class TestFSImageWithSnapshot { @Before public void setUp() throws Exception { conf = new Configuration(); - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION) + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES) .build(); cluster.waitActive(); fsn = cluster.getNamesystem(); @@ -177,7 +178,7 @@ public class TestFSImageWithSnapshot { cluster.shutdown(); cluster = new MiniDFSCluster.Builder(conf).format(false) - .numDataNodes(REPLICATION).build(); + .numDataNodes(NUM_DATANODES).build(); cluster.waitActive(); fsn = cluster.getNamesystem(); hdfs = cluster.getFileSystem(); @@ -188,7 +189,7 @@ public class TestFSImageWithSnapshot { hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); cluster.shutdown(); cluster = new MiniDFSCluster.Builder(conf).format(false) - .numDataNodes(REPLICATION).build(); + .numDataNodes(NUM_DATANODES).build(); cluster.waitActive(); fsn = cluster.getNamesystem(); hdfs = cluster.getFileSystem(); @@ -215,7 +216,7 @@ public class TestFSImageWithSnapshot { hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); cluster.shutdown(); cluster = new MiniDFSCluster.Builder(conf).format(false) - .numDataNodes(REPLICATION).build(); + .numDataNodes(NUM_DATANODES).build(); cluster.waitActive(); fsn = cluster.getNamesystem(); hdfs = cluster.getFileSystem(); @@ -248,20 +249,20 @@ public class TestFSImageWithSnapshot { hdfs.createSnapshot(dir, "s" + ++s); Path sub1file1 = new Path(sub1, "sub1file1"); Path sub1file2 = new Path(sub1, "sub1file2"); - DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, REPLICATION, seed); - DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, REPLICATION, seed); + DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, (short) 1, seed); + DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, (short) 1, seed); checkImage(s); hdfs.createSnapshot(dir, "s" + ++s); Path sub2 = new Path(dir, "sub2"); Path sub2file1 = new Path(sub2, "sub2file1"); Path sub2file2 = new Path(sub2, "sub2file2"); - DFSTestUtil.createFile(hdfs, sub2file1, BLOCKSIZE, REPLICATION, seed); - DFSTestUtil.createFile(hdfs, sub2file2, BLOCKSIZE, REPLICATION, seed); + DFSTestUtil.createFile(hdfs, sub2file1, BLOCKSIZE, (short) 1, seed); + DFSTestUtil.createFile(hdfs, sub2file2, BLOCKSIZE, (short) 1, seed); checkImage(s); hdfs.createSnapshot(dir, "s" + ++s); - hdfs.setReplication(sub1file1, (short) (REPLICATION - 1)); + hdfs.setReplication(sub1file1, (short) 1); hdfs.delete(sub1file2, true); hdfs.setOwner(sub2, "dr.who", "unknown"); hdfs.delete(sub2file1, true); @@ -300,7 +301,7 @@ public class TestFSImageWithSnapshot { // restart the cluster, and format the cluster cluster = new MiniDFSCluster.Builder(conf).format(true) - .numDataNodes(REPLICATION).build(); + .numDataNodes(NUM_DATANODES).build(); cluster.waitActive(); fsn = cluster.getNamesystem(); hdfs = cluster.getFileSystem(); @@ -338,8 +339,8 @@ public class TestFSImageWithSnapshot { Path sub1 = new Path(dir, "sub1"); Path sub1file1 = new Path(sub1, "sub1file1"); Path sub1file2 = new Path(sub1, "sub1file2"); - DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, REPLICATION, seed); - DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, REPLICATION, seed); + DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, (short) 1, seed); + DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, (short) 1, seed); // 1. create snapshot s0 hdfs.allowSnapshot(dir); @@ -372,7 +373,7 @@ public class TestFSImageWithSnapshot { out.close(); cluster.shutdown(); cluster = new MiniDFSCluster.Builder(conf).format(true) - .numDataNodes(REPLICATION).build(); + .numDataNodes(NUM_DATANODES).build(); cluster.waitActive(); fsn = cluster.getNamesystem(); hdfs = cluster.getFileSystem(); @@ -394,8 +395,8 @@ public class TestFSImageWithSnapshot { Path sub1 = new Path(dir, "sub1"); Path sub1file1 = new Path(sub1, "sub1file1"); Path sub1file2 = new Path(sub1, "sub1file2"); - DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, REPLICATION, seed); - DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, REPLICATION, seed); + DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, (short) 1, seed); + DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, (short) 1, seed); hdfs.allowSnapshot(dir); hdfs.createSnapshot(dir, "s0"); @@ -410,7 +411,7 @@ public class TestFSImageWithSnapshot { cluster.shutdown(); cluster = new MiniDFSCluster.Builder(conf).format(false) - .numDataNodes(REPLICATION).build(); + .numDataNodes(NUM_DATANODES).build(); cluster.waitActive(); fsn = cluster.getNamesystem(); hdfs = cluster.getFileSystem(); @@ -440,7 +441,7 @@ public class TestFSImageWithSnapshot { // restart cluster cluster.shutdown(); cluster = new MiniDFSCluster.Builder(conf).format(false) - .numDataNodes(REPLICATION).build(); + .numDataNodes(NUM_DATANODES).build(); cluster.waitActive(); hdfs = cluster.getFileSystem(); @@ -478,7 +479,7 @@ public class TestFSImageWithSnapshot { Path newDir = new Path(subsubDir, "newdir"); Path newFile = new Path(newDir, "newfile"); hdfs.mkdirs(newDir); - DFSTestUtil.createFile(hdfs, newFile, BLOCKSIZE, REPLICATION, seed); + DFSTestUtil.createFile(hdfs, newFile, BLOCKSIZE, (short) 1, seed); // create another snapshot SnapshotTestHelper.createSnapshot(hdfs, dir, "s2"); @@ -491,7 +492,7 @@ public class TestFSImageWithSnapshot { // restart cluster cluster.shutdown(); - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION) + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES) .format(false).build(); cluster.waitActive(); fsn = cluster.getNamesystem(); @@ -504,7 +505,7 @@ public class TestFSImageWithSnapshot { cluster.shutdown(); cluster = new MiniDFSCluster.Builder(conf).format(false) - .numDataNodes(REPLICATION).build(); + .numDataNodes(NUM_DATANODES).build(); cluster.waitActive(); fsn = cluster.getNamesystem(); hdfs = cluster.getFileSystem(); From 55e19b7f0c1243090dff2d08ed785cefd420b009 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Fri, 15 Apr 2016 10:53:40 -0700 Subject: [PATCH 15/26] HDFS-10293. StripedFileTestUtil#readAll flaky. Contributed by Mingliang Liu. --- .../hadoop/hdfs/StripedFileTestUtil.java | 21 ++++++------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java index 0f0221c1197..6d0dfa86c82 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock; import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.util.StripedBlockUtil; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem.WebHdfsInputStream; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.erasurecode.CodecUtil; import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder; import org.junit.Assert; @@ -85,16 +86,6 @@ public class StripedFileTestUtil { return (byte) (pos % mod + 1); } - static int readAll(FSDataInputStream in, byte[] buf) throws IOException { - int readLen = 0; - int ret; - while ((ret = in.read(buf, readLen, buf.length - readLen)) >= 0 && - readLen <= buf.length) { - readLen += ret; - } - return readLen; - } - static void verifyLength(FileSystem fs, Path srcPath, int fileLength) throws IOException { FileStatus status = fs.getFileStatus(srcPath); @@ -214,11 +205,11 @@ public class StripedFileTestUtil { static void assertSeekAndRead(FSDataInputStream fsdis, int pos, int writeBytes) throws IOException { fsdis.seek(pos); - byte[] buf = new byte[writeBytes]; - int readLen = StripedFileTestUtil.readAll(fsdis, buf); - assertEquals(readLen, writeBytes - pos); - for (int i = 0; i < readLen; i++) { - assertEquals("Byte at " + i + " should be the same", StripedFileTestUtil.getByte(pos + i), buf[i]); + byte[] buf = new byte[writeBytes - pos]; + IOUtils.readFully(fsdis, buf, 0, buf.length); + for (int i = 0; i < buf.length; i++) { + assertEquals("Byte at " + i + " should be the same", + StripedFileTestUtil.getByte(pos + i), buf[i]); } } From fdbafbc9e59314d9f9f75e615de9d2dfdced017b Mon Sep 17 00:00:00 2001 From: Naganarasimha Date: Fri, 15 Apr 2016 23:37:05 +0530 Subject: [PATCH 16/26] YARN-4909. Fix intermittent failures of TestRMWebServices And TestRMWithCSRFFilter. Contributed by Bibin A Chundatt --- .../hadoop/yarn/webapp/JerseyTestBase.java | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/JerseyTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/JerseyTestBase.java index 7a225a3999e..d537fa748f9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/JerseyTestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/JerseyTestBase.java @@ -19,9 +19,10 @@ package org.apache.hadoop.yarn.webapp; import java.io.IOException; +import java.util.Random; import org.apache.hadoop.net.ServerSocketUtil; -import org.junit.Before; + import com.sun.jersey.test.framework.JerseyTest; import com.sun.jersey.test.framework.WebAppDescriptor; @@ -30,9 +31,16 @@ public abstract class JerseyTestBase extends JerseyTest { super(appDescriptor); } - @Before - public void initializeJerseyPort() throws IOException { - int jerseyPort = ServerSocketUtil.getPort(9998, 10); - System.setProperty("jersey.test.port", Integer.toString(jerseyPort)); + @Override + protected int getPort(int port) { + Random rand = new Random(); + int jerseyPort = port + rand.nextInt(1000); + try { + jerseyPort = ServerSocketUtil.getPort(jerseyPort, 10); + } catch (IOException e) { + // Ignore exception even after 10 times free port is + // not received. + } + return super.getPort(jerseyPort); } } From 69f3d428d5c3ab0c79cacffc22b1f59408622ae7 Mon Sep 17 00:00:00 2001 From: Jason Lowe Date: Fri, 15 Apr 2016 20:36:45 +0000 Subject: [PATCH 17/26] YARN-4940. yarn node -list -all failed if RM start with decommissioned node. Contributed by sandflee --- .../resourcemanager/NodesListManager.java | 36 ++------------ .../resourcemanager/rmnode/RMNodeImpl.java | 4 +- .../resourcemanager/TestClientRMService.java | 49 ++++++++++++++++++- .../TestRMNodeTransitions.java | 4 +- 4 files changed, 55 insertions(+), 38 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java index ec2708ebb3c..121c418fc3a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java @@ -163,7 +163,7 @@ public class NodesListManager extends CompositeService implements private void setDecomissionedNMs() { Set excludeList = hostsReader.getExcludedHosts(); for (final String host : excludeList) { - UnknownNodeId nodeId = new UnknownNodeId(host); + NodeId nodeId = createUnknownNodeId(host); RMNodeImpl rmNode = new RMNodeImpl(nodeId, rmContext, host, -1, -1, new UnknownNode(host), null, null); rmContext.getInactiveRMNodes().put(nodeId, rmNode); @@ -430,38 +430,8 @@ public class NodesListManager extends CompositeService implements * A NodeId instance needed upon startup for populating inactive nodes Map. * It only knows the hostname/ip and marks the port to -1 or invalid. */ - public static class UnknownNodeId extends NodeId { - - private String host; - - public UnknownNodeId(String host) { - this.host = host; - } - - @Override - public String getHost() { - return this.host; - } - - @Override - protected void setHost(String hst) { - - } - - @Override - public int getPort() { - return -1; - } - - @Override - protected void setPort(int port) { - - } - - @Override - protected void build() { - - } + public static NodeId createUnknownNodeId(String host) { + return NodeId.newInstance(host, -1); } /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java index 5f8317e890a..9b807164e7e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java @@ -786,8 +786,8 @@ public class RMNodeImpl implements RMNode, EventHandler { if (previousRMNode != null) { rmNode.updateMetricsForRejoinedNode(previousRMNode.getState()); } else { - NodesListManager.UnknownNodeId unknownNodeId = - new NodesListManager.UnknownNodeId(nodeId.getHost()); + NodeId unknownNodeId = + NodesListManager.createUnknownNodeId(nodeId.getHost()); previousRMNode = rmNode.context.getInactiveRMNodes().remove(unknownNodeId); if (previousRMNode != null) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java index 331f3acde65..bb31f6e35f3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java @@ -28,6 +28,8 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; +import java.io.File; +import java.io.FileOutputStream; import java.io.IOException; import java.net.InetSocketAddress; import java.util.ArrayList; @@ -249,7 +251,7 @@ public class TestClientRMService { Assert.assertTrue(report.getNodeLabels() != null && report.getNodeLabels().isEmpty()); } - + rpc.stopProxy(client, conf); rm.close(); } @@ -1566,4 +1568,49 @@ public class TestClientRMService { Assert.assertEquals("Incorrect priority has been returned", expected, updateApplicationPriority.getApplicationPriority().getPriority()); } + + private void createExcludeFile(String filename) throws IOException { + File file = new File(filename); + if (file.exists()) { + file.delete(); + } + + FileOutputStream out = new FileOutputStream(file); + out.write("decommisssionedHost".getBytes()); + out.close(); + } + + @Test + public void testRMStartWithDecommissionedNode() throws Exception { + String excludeFile = "excludeFile"; + createExcludeFile(excludeFile); + YarnConfiguration conf = new YarnConfiguration(); + conf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH, + excludeFile); + MockRM rm = new MockRM(conf) { + protected ClientRMService createClientRMService() { + return new ClientRMService(this.rmContext, scheduler, + this.rmAppManager, this.applicationACLsManager, this.queueACLsManager, + this.getRMContext().getRMDelegationTokenSecretManager()); + }; + }; + rm.start(); + + YarnRPC rpc = YarnRPC.create(conf); + InetSocketAddress rmAddress = rm.getClientRMService().getBindAddress(); + LOG.info("Connecting to ResourceManager at " + rmAddress); + ApplicationClientProtocol client = + (ApplicationClientProtocol) rpc + .getProxy(ApplicationClientProtocol.class, rmAddress, conf); + + // Make call + GetClusterNodesRequest request = + GetClusterNodesRequest.newInstance(EnumSet.allOf(NodeState.class)); + List nodeReports = client.getClusterNodes(request).getNodeReports(); + Assert.assertEquals(1, nodeReports.size()); + + rm.stop(); + rpc.stopProxy(client, conf); + new File(excludeFile).delete(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java index 6ba360bda4f..7c03574cbcd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java @@ -690,8 +690,8 @@ public class TestRMNodeTransitions { @Test public void testUnknownNodeId() { - NodesListManager.UnknownNodeId nodeId = - new NodesListManager.UnknownNodeId("host1"); + NodeId nodeId = + NodesListManager.createUnknownNodeId("host1"); RMNodeImpl node = new RMNodeImpl(nodeId, rmContext, null, 0, 0, null, null, null); rmContext.getInactiveRMNodes().putIfAbsent(nodeId,node); From cab9cbaa0a6d92dd6473545da0ea1e6a22fd09e1 Mon Sep 17 00:00:00 2001 From: Arun Suresh Date: Fri, 15 Apr 2016 16:58:49 -0700 Subject: [PATCH 18/26] YARN-4468. Document the general ReservationSystem functionality, and the REST API. (subru and carlo via asuresh) --- hadoop-project/src/site/site.xml | 1 + .../src/site/markdown/ReservationSystem.md | 65 +++ .../src/site/markdown/ResourceManagerRest.md | 447 +++++++++++++++++- .../src/site/markdown/YARN.md | 2 + .../images/yarn_reservation_system.png | Bin 0 -> 85449 bytes 5 files changed, 513 insertions(+), 2 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ReservationSystem.md create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/resources/images/yarn_reservation_system.png diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml index 8f020736686..f9f4726f849 100644 --- a/hadoop-project/src/site/site.xml +++ b/hadoop-project/src/site/site.xml @@ -133,6 +133,7 @@ + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ReservationSystem.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ReservationSystem.md new file mode 100644 index 00000000000..eda8d4d806e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ReservationSystem.md @@ -0,0 +1,65 @@ + + +Reservation System +================== + + +* [Purpose](#Purpose) +* [Overview](#Overview) +* [Flow of a Reservation](#Flow_of_a_Reservation) +* [Configuring the Reservation System](#Configuring_the_Reservation_System) + +Purpose +------- + +This document provides a brief overview of the `YARN ReservationSystem`. + +Overview +-------- + +The `ReservationSystem` of YARN provides the user the ability to reserve resources over (and ahead of) time, to ensure that important production jobs will be run very predictably. The ReservationSystem performs careful admission control and provides guarantees over absolute amounts of resources (instead of % of cluster size). Reservation can be both malleable or have gang semantics, and can have time-varying resource requirements. The ReservationSystem is a component of the YARN ResourceManager. + + +Flow of a Reservation +---------------------- + +![YARN Reservation System | width=600px](./images/yarn_reservation_system.png) + +With reference to the figure above, a typical reservation proceeds as follows: + + * **Step 1** The user (or an automated tool on its behalf) submit a reservation request specified by the Reservation Definition Language (RDL). This describes the user need for resources over-time (e.g., a skyline of resources) and temporal constraints (e.g., deadline). This can be done both programmatically through the usual Client-to-RM protocols or via the REST api of the RM. + + * **Step 2** The ReservationSystem leverages a ReservationAgent (GREE in the figure) to find a plausible allocation for the reservation in the Plan, a data structure tracking all reservation currently accepted and the available resources in the system. + + * **Step 3** The SharingPolicy provides a way to enforce invariants on the reservation being accepted, potentially rejecting reservations. For example, the CapacityOvertimePolicy allows enforcement of both instantaneous max-capacity a user can request across all of his/her reservations and a limit on the integral of resources over a period of time, e.g., the user can reserve up to 50% of the cluster capacity instantanesouly, but in any 24h period of time he/she cannot exceed 10% average. + + * **Step 4** Upon a successful validation the ReservationSystem returns to the user a ReservationId (think of it as an airline ticket). + + * **Step 5** When the time comes, a new component called the PlanFollower publishes the state of the plan to the scheduler, by dynamically creating/tweaking/destroying queues. + + * **Step 6** The user can then submit one (or more) jobs to the reservable queue, by simply including the ReservationId as part of the ApplicationSubmissionContext. + + * **Step 7** The Scheduler will then provide containers from a special queue created to ensure resources reservation is respected. Within the limits of the reservation, the user has guaranteed access to the resources, above that resource sharing proceed with standard Capacity/Fairness sharing. + + * **Step 8** The system includes mechanisms to adapt to drop in cluster capacity. This consists in replanning by "moving" the reservation if possible, or rejecting the smallest amount of previously accepted reservation (to ensure that other reservation will receive their full amount). + + + + + +Configuring the Reservation System +---------------------------------- + +Configuring the `ReservationSystem` is simple. Currently we have added support for *reservations* in both `CapacityScheduler` and `FairScheduler`. You can mark any **leaf queue** in the **capacity-scheduler.xml** or **fair-scheduler.xml** as available for "reservations" (see [CapacityScheduler](http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/CapacityScheduler.html#Configuring_ReservationSystem_with_CapacityScheduler) and the [FairScheduler](http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/FairScheduler.html) for details). Then the capacity/fair share within that queue can be used for making reservations. Jobs can still be submitted to the *reservable queue* without a reservation, in which case they will be run in best-effort mode in whatever capacity is left over by the jobs running within active reservations. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md index c72b7f408a7..dd6ac0448fd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md @@ -34,6 +34,9 @@ ResourceManager REST API's. * [Cluster Application Priority API](#Cluster_Application_Priority_API) * [Cluster Delegation Tokens API](#Cluster_Delegation_Tokens_API) * [Cluster Reservation API List](#Cluster_Reservation_API_List) +* [Cluster Reservation API Submit](#Cluster_Reservation_API_Submit) +* [Cluster Reservation API Update](#Cluster_Reservation_API_Update) +* [Cluster Reservation API Delete](#Cluster_Reservation_API_Delete) Overview -------- @@ -3223,8 +3226,8 @@ The Cluster Reservation API can be used to list reservations. When listing reser | Item | Data Type | Description | |:---- |:---- |:---- | | arrival | long | The UTC time representation of the earliest time this reservation can be allocated from. | -| deadline | long | The UTC time representation of the latest time within which this reservatino can be allocated. | -| reservation-name | string | A mnemonic name of the reservaiton (not a valid identifier). | +| deadline | long | The UTC time representation of the latest time within which this reservation can be allocated. | +| reservation-name | string | A mnemonic name of the reservation (not a valid identifier). | | reservation-requests | object | A list of "stages" or phases of this reservation, each describing resource requirements and duration | ### Elements of the *reservation-requests* object @@ -3381,3 +3384,443 @@ Response Body: ``` + +Cluster Reservation API Submit +------------------------------ + +The Cluster Reservation API can be used to submit reservations.When submitting a reservation the user specify the constraints in terms of resources, and time that are required, the resulting page returns a reservation-id that the user can use to get access to the resources by specifying it as part of [Cluster Submit Applications API](#Cluster_Applications_APISubmit_Application). + +### URI + + * http:///ws/v1/cluster/reservation/submit + +### HTTP Operations Supported + + * POST + +### POST Response Examples + +POST requests can be used to submit reservations to the ResourceManager. As mentioned above, a reservation-id is returned upon success (in the body of the answer). Successful submissions result in a 200 response. Please note that in order to submit a reservation, you must have an authentication filter setup for the HTTP interface. The functionality requires that a username is set in the HttpServletRequest. If no filter is setup, the response will be an "UNAUTHORIZED" response. + +Please note that this feature is currently in the alpha stage and may change in the future. + +#### Elements of the POST request object + +| Item | Data Type | Description | +|:---- |:---- |:---- | +| queue | string | The (reservable) queue you are submitting to| +| reservation-definition | object | A set of constraints representing the need for resources over time of a user. | + +Elements of the *reservation-definition* object + +| Item | Data Type | Description | +|:---- |:---- |:---- | +|arrival | long | The UTC time representation of the earliest time this reservation can be allocated from. | +| deadline | long | The UTC time representation of the latest time within which this reservation can be allocated. | +| reservation-name | string | A mnemonic name of the reservation (not a valid identifier). | +| reservation-requests | object | A list of "stages" or phases of this reservation, each describing resource requirements and duration | + +Elements of the *reservation-requests* object + +| Item | Data Type | Description | +|:---- |:---- |:---- | +| reservation-request-interpreter | int | A numeric choice of how to interpret the set of ReservationRequest: 0 is an ANY, 1 for ALL, 2 for ORDER, 3 for ORDER\_NO\_GAP | +| reservation-request | object | The description of the resource and time capabilities for a phase/stage of this reservation | + +Elements of the *reservation-request* object + +| Item | Data Type | Description | +|:---- |:---- |:---- | +| duration | long | The duration of a ReservationRequeust in milliseconds (amount of consecutive milliseconds a satisfiable allocation for this portion of the reservation should exist for). | +| num-containers | int | The number of containers required in this phase of the reservation (capture the maximum parallelism of the job(s) in this phase). | +| min-concurrency | int | The minimum number of containers that must be concurrently allocated to satisfy this allocation (capture min-parallelism, useful to express gang semantics). | +| capability | object | Allows to specify the size of each container (memory, vCores).| + +Elements of the *capability* object + +| Item | Data Type | Description | +|:---- |:---- |:---- | +| memory | int | the number of MB of memory for this container | +| vCores | int | the number of virtual cores for this container | + + +**JSON response** + +This examples contains a reservation composed of two stages (alternative to each other as the *reservation-request-interpreter* is set to 0), so that the first is shorter and "taller" and "gang" +with exactly 220 containers for 60 seconds, while the second alternative is longer with 120 seconds duration and less tall with 110 containers (and a min-concurrency of 1 container, thus no gang semantics). + +HTTP Request: + +```json +POST http://rmdns:8088/ws/v1/cluster/reservation/submit +Content-Type: application/json +{ + "queue" : "dedicated", + "reservation-definition" : { + "arrival" : 1765541532000, + "deadline" : 1765542252000, + "reservation-name" : "res_1", + "reservation-requests" : { + "reservation-request-interpreter" : 0, + "reservation-request" : [ + { + "duration" : 60000, + "num-containers" : 220, + "min-concurrency" : 220, + "capability" : { + "memory" : 1024, + "vCores" : 1 + } + }, + { + "duration" : 120000, + "num-containers" : 110, + "min-concurrency" : 1, + "capability" : { + "memory" : 1024, + "vCores" : 1 + } + } + ] + } + } +} +``` + +Response Header: + +200 OK +Cache-Control: no-cache +Expires: Thu, 17 Dec 2015 23:36:34 GMT, Thu, 17 Dec 2015 23:36:34 GMT +Date: Thu, 17 Dec 2015 23:36:34 GMT, Thu, 17 Dec 2015 23:36:34 GMT +Pragma: no-cache, no-cache +Content-Type: application/xml +Content-Encoding: gzip +Content-Length: 137 +Server: Jetty(6.1.26) + +Response Body: + +```json +{"reservation-id":"reservation_1448064217915_0009"} +``` + +**XML response** + +HTTP Request: + +```xml +POST http://rmdns:8088/ws/v1/cluster/reservation/submit +Accept: application/xml +Content-Type: application/xml + + dedicated + + 1765541532000 + 1765542252000 + res_1 + + 0 + + 60000 + 220 + 220 + + 1024 + 1 + + + + 120000 + 110 + 1 + + 1024 + 1 + + + + + +``` + +Response Header: + +200 OK +Cache-Control: no-cache +Expires: Thu, 17 Dec 2015 23:49:21 GMT, Thu, 17 Dec 2015 23:49:21 GMT +Date: Thu, 17 Dec 2015 23:49:21 GMT, Thu, 17 Dec 2015 23:49:21 GMT +Pragma: no-cache, no-cache +Content-Type: application/xml +Content-Encoding: gzip +Content-Length: 137 +Server: Jetty(6.1.26) + +Response Body: + +```xml + + + reservation_1448064217915_0010 + +``` + + +Cluster Reservation API Update +------------------------------ + +The Cluster Reservation API Update can be used to update existing reservations.Update of a Reservation works similarly to submit described above, but the user submits the reservation-id of an existing reservation to be updated. The semantics is a try-and-swap, successful operation will modify the existing reservation based on the requested update parameter, while a failed execution will leave the existing reservation unchanged. + +### URI + + * http:///ws/v1/cluster/reservation/update + +### HTTP Operations Supported + + * POST + +### POST Response Examples + +POST requests can be used to update reservations to the ResourceManager. Successful submissions result in a 200 response, indicate in-place update of the existing reservation (id does not change). Please note that in order to update a reservation, you must have an authentication filter setup for the HTTP interface. The functionality requires that a username is set in the HttpServletRequest. If no filter is setup, the response will be an "UNAUTHORIZED" response. + +Please note that this feature is currently in the alpha stage and may change in the future. + +#### Elements of the POST request object + +| Item | Data Type | Description | +|:---- |:---- |:---- | +| reservation-id | string | The id of the reservation to be updated (the system automatically looks up the right queue from this)| +| reservation-definition | object | A set of constraints representing the need for resources over time of a user. | + +Elements of the *reservation-definition* object + +| Item | Data Type | Description | +|:---- |:---- |:---- | +|arrival | long | The UTC time representation of the earliest time this reservation can be allocated from. | +| deadline | long | The UTC time representation of the latest time within which this reservation can be allocated. | +| reservation-name | string | A mnemonic name of the reservation (not a valid identifier). | +| reservation-requests | object | A list of "stages" or phases of this reservation, each describing resource requirements and duration | + +Elements of the *reservation-requests* object + +| Item | Data Type | Description | +|:---- |:---- |:---- | +| reservation-request-interpreter | int | A numeric choice of how to interpret the set of ReservationRequest: 0 is an ANY, 1 for ALL, 2 for ORDER, 3 for ORDER\_NO\_GAP | +| reservation-request | object | The description of the resource and time capabilities for a phase/stage of this reservation | + +Elements of the *reservation-request* object + +| Item | Data Type | Description | +|:---- |:---- |:---- | +| duration | long | The duration of a ReservationRequeust in milliseconds (amount of consecutive milliseconds a satisfiable allocation for this portion of the reservation should exist for). | +| num-containers | int | The number of containers required in this phase of the reservation (capture the maximum parallelism of the job(s) in this phase). | +| min-concurrency | int | The minimum number of containers that must be concurrently allocated to satisfy this allocation (capture min-parallelism, useful to express gang semantics). | +| capability | object | Allows to specify the size of each container (memory, vCores).| + +Elements of the *capability* object + +| Item | Data Type | Description | +|:---- |:---- |:---- | +| memory | int | the number of MB of memory for this container | +| vCores | int | the number of virtual cores for this container | + + +**JSON response** + +This examples updates an existing reservation identified by *reservation_1449259268893_0005* with two stages (in order as the *reservation-request-interpreter* is set to 2), with the first stage being a "gang" of 10 containers for 5 minutes (min-concurrency of 10 containers) followed by a 50 containers for 10 minutes(min-concurrency of 1 container, thus no gang semantics). + +HTTP Request: + +```json +POST http://rmdns:8088/ws/v1/cluster/reservation/update +Accept: application/json +Content-Type: application/json +{ + "reservation-id" : "reservation_1449259268893_0005", + "reservation-definition" : { + "arrival" : 1765541532000, + "deadline" : 1765542252000, + "reservation-name" : "res_1", + "reservation-requests" : { + "reservation-request-interpreter" : 2, + "reservation-request" : [ + { + "duration" : 300000, + "num-containers" : 10, + "min-concurrency" : 10, + "capability" : { + "memory" : 1024, + "vCores" : 1 + } + }, + { + "duration" : 60000, + "num-containers" : 50, + "min-concurrency" : 1, + "capability" : { + "memory" : 1024, + "vCores" : 1 + } + } + ] + } + } +} +``` + +Response Header: + +200 OK +Cache-Control: no-cache +Expires: Thu, 17 Dec 2015 23:36:34 GMT, Thu, 17 Dec 2015 23:36:34 GMT +Date: Thu, 17 Dec 2015 23:36:34 GMT, Thu, 17 Dec 2015 23:36:34 GMT +Pragma: no-cache, no-cache +Content-Type: application/json +Content-Encoding: gzip +Content-Length: 137 +Server: Jetty(6.1.26) + +Response Body: + + No response body + +**XML response** + +HTTP Request: + +```xml +POST http://rmdns:8088/ws/v1/cluster/reservation/update +Accept: application/xml +Content-Type: application/xml + + reservation_1449259268893_0005 + + 1765541532000 + 1765542252000 + res_1 + + 2 + + 300000 + 10 + 10 + + 1024 + 1 + + + + 60000 + 50 + 1 + + 1024 + 1 + + + + + +``` + +Response Header: + +200 OK +Cache-Control: no-cache +Expires: Thu, 17 Dec 2015 23:49:21 GMT, Thu, 17 Dec 2015 23:49:21 GMT +Date: Thu, 17 Dec 2015 23:49:21 GMT, Thu, 17 Dec 2015 23:49:21 GMT +Pragma: no-cache, no-cache +Content-Type: application/xml +Content-Encoding: gzip +Content-Length: 137 +Server: Jetty(6.1.26) + +Response Body: + + No response body + +Cluster Reservation API Delete +------------------------------ + +The Cluster Reservation API Delete can be used to delete existing reservations.Delete works similar to update. The requests contains the reservation-id, and if successful the reservation is cancelled, otherwise the reservation remains in the system. + +### URI + + * http:///ws/v1/cluster/reservation/delete + +### HTTP Operations Supported + + * POST + +### POST Response Examples + +POST requests can be used to delete reservations to the ResourceManager. Successful submissions result in a 200 response, indicating that the delete succeeded. Please note that in order to delete a reservation, you must have an authentication filter setup for the HTTP interface. The functionality requires that a username is set in the HttpServletRequest. If no filter is setup, the response will be an "UNAUTHORIZED" response. + +Please note that this feature is currently in the alpha stage and may change in the future. + +#### Elements of the POST request object + +| Item | Data Type | Description | +|:---- |:---- |:---- | +| reservation-id | string | The id of the reservation to be deleted (the system automatically looks up the right queue from this)| + + +**JSON response** + +This examples deletes an existing reservation identified by *reservation_1449259268893_0006* + +HTTP Request: + +```json +POST http://10.200.91.98:8088/ws/v1/cluster/reservation/delete +Accept: application/json +Content-Type: application/json +{ + "reservation-id" : "reservation_1449259268893_0006" +} +``` + +Response Header: + +200 OK +Cache-Control: no-cache +Expires: Fri, 18 Dec 2015 01:31:05 GMT, Fri, 18 Dec 2015 01:31:05 GMT +Date: Fri, 18 Dec 2015 01:31:05 GMT, Fri, 18 Dec 2015 01:31:05 GMT +Pragma: no-cache, no-cache +Content-Type: application/json +Content-Encoding: gzip +Transfer-Encoding: chunked +Server: Jetty(6.1.26) + +Response Body: + + No response body + +**XML response** + +HTTP Request: + +```xml +POST http://10.200.91.98:8088/ws/v1/cluster/reservation/delete +Accept: application/xml +Content-Type: application/xml + +reservation_1449259268893_0006 + +``` + +Response Header: + +200 OK +Cache-Control: no-cache +Expires: Fri, 18 Dec 2015 01:33:23 GMT, Fri, 18 Dec 2015 01:33:23 GMT +Date: Fri, 18 Dec 2015 01:33:23 GMT, Fri, 18 Dec 2015 01:33:23 GMT +Pragma: no-cache, no-cache +Content-Type: application/xml +Content-Encoding: gzip +Content-Length: 101 +Server: Jetty(6.1.26) + +Response Body: + + No response body diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YARN.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YARN.md index f8e8154774c..974f41dfdcd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YARN.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YARN.md @@ -32,3 +32,5 @@ The Scheduler has a pluggable policy which is responsible for partitioning the c The ApplicationsManager is responsible for accepting job-submissions, negotiating the first container for executing the application specific ApplicationMaster and provides the service for restarting the ApplicationMaster container on failure. The per-application ApplicationMaster has the responsibility of negotiating appropriate resource containers from the Scheduler, tracking their status and monitoring for progress. MapReduce in hadoop-2.x maintains **API compatibility** with previous stable release (hadoop-1.x). This means that all MapReduce jobs should still run unchanged on top of YARN with just a recompile. + +YARN also supports the notion of **resource reservation** via the [ReservationSystem](http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/ReservationSystem.html), a component that allows users to specify a profile of resources over-time and temporal constraints (e.g., deadlines), and reserve resources to ensure the predictable execution of important jobs.The *ReservationSystem* tracks resources over-time, performs admission control for reservations, and dynamically instruct the underlying scheduler to ensure that the reservation is fullfilled. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/resources/images/yarn_reservation_system.png b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/resources/images/yarn_reservation_system.png new file mode 100644 index 0000000000000000000000000000000000000000..cbe197510c2404ecb61eb64795150c1fdc2234a3 GIT binary patch literal 85449 zcmeFZXIRtQvo;(HqM)ME6hUlA32iGV0uls~CY=xnO;n@_MClztV2e@&1B%iKB_W}O z7D@y}1Vn0r(h+G1y^|2~{DOO*|2fzDK3|^ChYK;JtTk(9)~tK(nZ-*(ea&MW0vsR^ z=$N+F9b*vaAPxjN5Ow$v@XYZvPA=fTeICY|Y9QQq!CBy!gATX#Zi7H2ahyALY{2hF z5Ly-og@HNnZCQO4liSf}jW_TgvF1MzBc)!c5$ zr>dJ{+&&zPePwc!^wjglkA~y=cM8AAc^6BxmpqHQ+w7hBV(V)?Nq~O8-|d1~EE4Xe z;rig_A&Z2FV%7aGd}Xyx9C>uY{O}3BH(A?E75WDkule%P_o*$z?^8Fa9~bw{E6*v< zEqn_M?zdV+9|q(Ez8ueof&V;`JJrDZ=aIn6G1l6Fub4Xs)_Opo(7P9__U}DXy>!xQ z-`=B)H-`bWu)eyq4#NICa=iS%!~L)B_I%-g9p!&_#s5=85gYsblAA}vi1jVcOM1kL zEj;&ymc*m8#DH&f%>V7F(1J%o@l09~(?35}7&3x+*b;k;WXmAg?hVBn&16CFXDWnl zpXyNKu~fxQQ_`MI6D54ycIL`DHz=AAnh|m6W;ti= zj$u0^^G9l8&8ipVb6Xe?*=2c455cJYMRCEaH$tw_)@_;VHg%hIi}R#_k<~OCYww@- zFV|g&8!rFaVBJ2Ivu4%aXSeX#bRT+@UN=gOuwaRL8W^Z3gh9=Q)ls1Tyjufm6|y24 zq6Qosp%cP8R`(|tihS7bA!~eWL-svmCvnQu%ED`X|N3KJ4I`2E)Q2G(W$7_#A-mf) z)ozMI!fzN5$r_I>$-_9fe(JFruG>jH8rHlk_3rT5{bd}qF3VLJIMk@|Ne5A@gjouM zrc^Q4)ZKyKoNS=)`gT3om7ybJ6kS`Iu`)j^6xpA!y3jpeIhs}zyc90{*V2&BK+*>SW!s9VTq$nJZMqw4^^pnuuQh+sZK@WR|&yh{4a@B!YR%%Msd z2%#Wq#3BSo=_8xp{E`dm0ye)a#;2>j7gF+mw<={~zd4#{pklGTJ6P-IA@fL}nF}{` zyDI`3%sz*^q%8&B+Yp9QKCz7Voq@Td1vLSO6F7^9u;CJ$eT$NnM^9OmfAnfdMy(xg zznJDv5%&m&_ALz8O!%~F3e(UsDc0gUU;Rg2XWnR@`)k@l!@fGBvagznuh#&6@_gm- z>b$l2ZdK9}-k*i-OY{A8#3K!A+Re!Oq;(-tzOxiHhTz#dpYN|6#>Ian{Af4!{~d7B zS?WPYQ8Zz5B!D$f+z@^W2Ayp;y(1*`&}vpQ<8Q^yQO4$|Fi}^!Bz-6 zX~pY$B$w<>-a+SQ(G8?Tu-j0hvp3vckJ`Ue`c!=&txf*6cO@I4xQ)#WttlMzb`MnK zzf)@2oq+64j0g`wAv@z-0h9YBcw1JMq0U{I&qD5-RYMf1absvX;9a5Q3wt0eC7d;894Br^ zG_fi?!)^Jf`>N^ms>H2=_&+*M0(8v99EYr>b>Vsv+o8C@{g1!?1^6p_$P#;<=s0Fb zIL_K(ej({Lk~-DoMIS#`|L9f*3ov#m{#l5}2>>6CIl&R@!;8U{kkN}?-5UHF?MYIv zyyt$L>^%8SGUy7n6OFNSs}?zTEmam|rzjP7Qkxy4De>xElZUr6#UT3g93N z#lS5@k&WLZ(uRer;o2dYMvjvdcF5N4#r5k4VT2`N4XICk&qO|wc`J`TFfKg^%T~+l z)mQ5H_+5sjuiS8@3U1(&iW+u_!dBJa+pQ6kie9GGvFA4De)fW6s1%vqUo!sdc_Fks zKuYc~u#tin*}~zNUJ<;`-%FWK*2WCMTc1RhFdS718Ok~T`d>}xCQHYVi)=?RSj)-{ zN&~y_90`bRKV>%Wqp~&QHlkTJ*9mr%$2K4@ud{hJ^TIwq1Wnkp=~3c`nLYy!Rt7?A zEywb_G=QVvlP6-&ysCMgB2}fiomaVr6${KP?7n7L;@46lC##1IS55xTb!r-{9a831 ztzyI@i68B#R~ds+m>OQW7zh?4c~|d-v#`Y9PbMV343$11aJz@^8$EDOTEA?Qx; z*ZfKQ;*)wqgpo|d0hl)a-mWhhkH$}uulH1xR%FwMW>yuQEor^g&+tWS=f`6Ov`Xh^vdRClt9W9zc_}?NNRa ztNqtGl0{~(ojCMl;QB7n+M*X+STX{^t_5oY0logwFbqD8pGU`EJxacdR55W%_h>B* z^tWZ;CXe9O>Me+iPJ_@wr_W66x-C2Cp3G)I2nh<)V6KMjcG|d()KKFA;FIam{)#bxx)qZBVrfg;n7Uh_ofqyT2u4|qmJ)1Qm0C{E9 zD>APRK2f~k+PzMyFyPR@yRI&Lz;9351%1KCG@g)|y+(=4UQ*xbw~A;$s$s6CYl0t@ zd}%+M@BF^7^U=um7OGP>p*CH_CB&iusdCmX{~ao!ewtiG_R(eoy>naGOk=s3xL+99 z+7xYtV4c@Q+lAIEOa_s;2}Fcrsb$oQDm;hS{n`leQUcQ3_G}{d(;qyI++#j8=g$@f zy~EoU2Dz}YTj(xwM_wGr)j04mCtf5jXH4RW_QP_HqwbuZnFY)gjBJ%enGjU^@~XuI zZ^05hk*DjoIQWDFcjPv&bdd_TeSPwH{VA8)oW*0H`-JLII#g`~R?1dH)@hTdINW<9FZT))1ppA2d`~X{r7VL~sK8)@>F4`Rop}PaIWw-(Qz7$^a zw^yd1%}1K2kznj1`vdY$<9+eBcwDS>AeM6tKgmP&*6wKG0$mo=L;?ot*~qLg8SpNV zUg4E{(Y|0lXg#517W&YYQ***{P}ylmG9e6nWVAivoW255zE?2|zWY7~)PlajriucrDw8vDE@uAOU#Tb`#8EJ{${iY%rWgj0S%w zdCTdp-oprPvviFM;QXijpUgn=5`lxm0k1 z3cvmvryg^9inSQ_KnCOGQ*Uo5m|T3tOZo8;xpWbrtls zhQD%4-z1mj?|9+W%8alprT4gx!<>{(nD0~FteODr#T^2l)ohl$xI*bjllv{?XO06y z8YmJ$9aVg{J#+ILMr&*AQFSp_6sGjFymgPtJLb*JRX>%(2ZEh*A6LBV+C0e&;FN%wTbV8PnW)<&B0 zb^~(#rk;z&%!}R^?SNqRjQRth+*eA+S*N}t%B2Ab1|y24m0u+#!Ahko;f6E&E!$D5 z;Q6dkf`Tybu7ZHfN3wg#rEvt5FFUuaqB);W?!oXzGLOn*-tR?@+nHK6W@2yGs+TKG zwr;LBaWlPOZfSf$-{tBmN_f9rhAJ4ie3V?dmv`Ujc})GK0dMl=Cy5Z@Sr6{x!X$~d zZhB-dZE$2DU8CLMSSKwA2zvZJf-I2Zsq-NmW0d(kw?vH#W~X4Pi4Y;PxhEN$XWDaE z?zfnj`pcPS)Rg!-1@~2j2Yh0w(*5N4PPAFlq`_6^x<=m(<_39Sk=;EEAM?o!&CGn1YCoIDPADA6^07PgKVrQYO)T;pINc4Pu&ZZd7 zpQ{0`E$%1ws^RfHEA(-sy#BfDTw!_T*8JJ%1t5?fu5`Cy%}eggutMGb-E(Tb=%jI7S@P^V^X zPgUP)x9xW#KxNxi@cW-j2C<`#Oy>#QaH+$`e3X6BON3Gcq@Z*p^60vES(fMv&s6XJ5ozyEPJ>Fi4_vCjI zHqgu!mTA-;0^t+93jyWp^M`?{t=8l!aC8(&Nwa6Wn-B?B@%AI=gba zMFBhJt*l{5E||cD{d8XUM?Bg1s{r4j>t1CW*_J9tHj!Vr=#Ctf;gD}ghyf-p&nSJ_ zs9pRYcT=mtGvp}O9tX8FEy=uGf4>|Krh33DWUe2XE&zNak>$pjB{sp^la5p}c9{su z5U@UsD|Eunmg|oz5D*)do-YcWOx={={ETck=PaXeNr!sDY%AW&(!8D9#ww}_3h$IN z)8#`J&8XlTE3)X#Mz2y4?}XLN3!T~yejwK)=;no9hm$WHm1NlAcKl-fJL4;!D%8uz z$m{bt_401lnvcJM==DrrbUmwOEK3b}T>7FhGpDmcMiBR30(R?vR5-~L2APJBImeUG z7i&>n4(pbakbQ1_5uOQnvYIykXm(g~4nli%HOATERFA_K=QG(Wyu(r2X-^MGiU7L(b$zPLe#1U{fD-5(6q#m4_RSX8M}GUk{w)V64CAylc+J^NuF+I zxz*Le>(5C{KiQhhGmiEgo{dM&U%jqfml0m$X`kv zZ@k?$*OcR$W$%@4(ajWf{eZBv%zRF6wl#?097rcC=0=EFnA7hJeoe;9n3Cz16Wgo! zsXVV$$TheRwyZqJ>R+)3lF5i5zMY`=8>vPS=_NUfRb*I`Rp{hK(17>h1H(C!UDVxa z(?KQiqnwaDvH~d7?ODq{09LE!ov=Dqwm+f}b^pBS<6vB4s5U=Io4?)yT!7T>i3obL zg8Uf;|BerRVj8h8{Bn?VX_D|A9=jlE{5=(Fg&~aW&>_&7?f|7D~%K! zS6-d7l**P5R_Oe*B@65mH*+Bf*T%pnd4Atq=55DB;P?NbK|N+Gp}#3iM$RIIb~`@U5#&L zylqZSQnTItUVS541nuIGoXr;%%$o?6)p-4K+a)_Vl(+= zlBM^OQOI{>&GeX@_yK33y!m)0C13Q@hm>C&oR-RR{;GammC>yNn+wijYBudMZ4%G+Ny1p32RaIs_obqOAXImR&Pn=MRs(E5L8 zH2(8C2;}?U2nx@h<%yQ;TLBwu$2|>Hwjb?C{2A!--p29Y8js2zbSKu?O4t4=Qti#w zrTyW$tH5F0Lfz)b2$dxZNOg~zP;BMGnksqE&D7}w8UTUh*jbP&@t^rFv9Q9QN?2Fm z-uVBF3Gg_o4%NanwWzU+ae3*%10N0GlR;dXz1yqZ0Zn)R3?(-tH69Fl)#sF# z%4hwwnYT{3-3V7bCE#%`n^IWzol>Wnc0zZ8zN&harAAQ3_Rvj$W(n1=o=mc&K|<|y zS9Qu9yDxeWl;tV6KCX5`h>#8qerp~g2j`z<2QCPH0EEeVA(hCp@<_ZqY$eYFNnUA7 zzbjQ{Ye*g`#fWWBPl^5gf=fn!aqp>-3%{@{aa+Kelp2Q8q2N zT}LXvG4!5PbHa-tr|R>EuK$n4d_Sx%8lkJPwW`H|Ym^mVG}+}z*y_cw-g`mlf2_ix z(58;SoX9giVe|q5WMw)(r)RLN~3A6?QktgN$b;*r9As-8aoV?Eb&snyl5~CxvDg5mn>L}SuJdu_f zO*zHsk}fLa3L5SqbEp1?^!kRvCJRvp(98FX5#yCPIuu!JgZ0%>T}YvZqhg2(`6^XD zL=ddi^rv+CUj{s^o>6O;(0Gig9pYSv>>Kz0Apv%8nbZG~(nf%GEwbxo3fyJ67qA`v zS;>oH`viK!S!jy%+-2*1Nt{ZOyITU4VlMC1p=&6T{2!-Xt@d~Vh?jaI(m+EO3s6fL zD=h|uRM}TX3@35Ka+GD|sRySUNOJ_j0so)qHejoCZ1anEU?j*puijSpL34wc7NW@- z7OOJG&;=sQ#;-Xr?jn?T!`2%*PXvgZFCLObFX@Xw&1Gum6%Sn|K>jHcV6!FDT>Mx} zCWLw`x(~ldy!elsE?HjTl^Q;$fL`@r{7vhzhm6);`1X#k=;p=}M_$JSgs$?B{&X6DDGnzHRek=?gVQeMiPv#FFJ6 zY5HAhaV`iK?LQvDp9S!@?^mVa_M7SYu|D$y_5e9QTb8kC{~lBt%gA#b{mjVolF}J8 z9~?;W>F(L>bx=!>$pUEamtW=~dNp2DGk}CM$cZKMZJtXVpA)HpgZXKz$H(G6Op zg_@%McekfqeugM&is?)X0`zy$&BZKe^X_1og8}~O#9fbF4c%!y`;F1@rl_dzqiy7= zjt^(6ndkgxGQZ3dSsbZahBjhvSLLSdXWz`u>zNPWh`5Vn@~9De8o_MP-71=q)Wx}b zBhJ;(J@x!}7~;6WeP=`i6uXcD;{2b_!bksIw#|>L;+@Y;AU(l( za}p77Z64X}2|ixwGTtPon?neU2Hz7Jm~3Kji!@jXlqRfpoNt7Ig??ib-$4JDu#10NY#fU5I}uFjO~F2shYX=UP^87M~W>k zRL-|gn7a_DH`s63S~ogj&1G?UBcxK~l z@vx)piMXQ_8pp`%M0=^FMX)SXii*#eX|4^9q7t;E%9s=mr}jA_K@C0+8BT%T0ahTD z`>JQ&gH_;w@8?CC$peqf-ZS!4awyIvz(Pp8J(NJ)jH@3|kja{s@mheSav6jvwOpy% zFT1{TLQKk2XCl|&gJ#koz0^?kS7!^w(;7nFRj{Y^G!{x9g3+jX(z)$#_3yNMKSM9+ z@V{lniYe%owG-Gx+RNf8Tf8Ur?)zY{9Cpf%wP}PtR59f`uj2IyT_wPaea2-k=>|kg z`^JR|Q_jKpR-JV}qb8?f=yAv;Pq_{YbPH$A&x0e=OrIbv3&{!M}-2|Ei+V#W(t z#~L#fq6ibOVfOJuxi`~k`eneFJa?1leQSYjz1n#dDCo8QyRYaCCI#zI;5P0Vq=mihpeQ3 zY5G}FcUyQ$p734qPdXew(G*?I@PE$Pe+EtwKEyl!?pC|W5nku5iQlujx|K3CDXghe zr5q({KsY(}C+0Q+>BgX~cX!+v{a7P6>{Gv*I`S}Evt;B_yQejun+yP#KFR$%9KFN@ z7;iX4!mh{FuX4nn+7(bO48_RRe_6J4sumz|!G7$&2e32eoS?sxTAefzNSq^LGI=U9 zulMqWsy3M+8wmgf5N4e){#UWQ9MI;y)ASvjcH0Bujm|RIUbR#gDIefy%DIR-q1&#k z^CQ{pjSgk4uKh5s2YS+~fLv&icp9iuO!DtZrkWZnc=MF&&r+0+O0`pO?#%XddR&w# zP|gJo>349uccysc8weMY_Kss4Te_08A1sBDw|zy@0lOD8^B+2Cg~CWw8Dp($DmM8E zCmNu^;~U06V^N>>bo1^_)czdBs=ag1MAt5UzX(iYd)3e)NT-0;RKW)B6ewkz#Al`w zVM*RuF(bzp>H;E)u)(w&67^jphHx0onXR*?t9)c_^b zHXzGb0iNJRB{x6($b!P4J-7$D2LfKsYCyUj!RMMp>{&F0Rl*BRDNt_>XXml z0pxnZ!U26+xBGzlbhpx1intg@{);h*R9?A9$Ih1Oe~|xX7@t56NrG~u?8vqy^H|N! zM%jT9O|@6`;i3@=+RxK$1w^HM%W{v9>W*dwCE(K;JsW-A9fYENAk|eCZ)cbdoV3RT z9ff_zp0lv>Y=YWLNxN|QREa08iNNqo^KoAb-RH2c9JoGbG2Q%$WL_H!>f0NhCL*p5 zE<=XH?HSz7%7Z!+a5}338S2eK{TX+6Z=0bG6rHd_fd#B)FE^&>^R}I`vbbO}(sOki z{Vq@E`Mo4roR*Uc-`)_0SG3mls&N1Lv<3pHXT0^7#1q4I5f?(10WTiD;b=3CeL@hD z5|3Y@!xQj8qWrtlvf=G+swbyiRjpIzn~IpE-tHsGv=2JYb&p!F+N8E;xH`q;?UwAg zW^OwxiFUwP1u0dfy@g+5p2tv(5xgT;s(ltme7`yZri{42Ap?*+9Dk{0hk+R(3PaA5 zzsuZSa*isAR~hFg-dS{tkR7j0tgt+twq3YxYB4wb=r$gK*Kh z>(RW2p=Ye1B}X|j8^tU;)~HhkN$>>zkRehVW`Na7kla7P7CW#Xs(% zUF3e=;qMM?x@gG;B>Yv(1HELCsvfLw_M>6GaRs!Yb^CoRk+dD8)nKNO%BuDJH$#!i z$IxQn&tEN_8xr~SA02ZMOwAArHBv=7Yy&07afKww-brgwQL5)Z0}XSec+)fE@;LT` zx@`6w^ef~m`Y^7vmHX&6YL-VbA~nyl^pK?DU)ofNi|I)#XxUMXX|R1Ru+tLv3=p#u zUKl+laTe^DulVHasmD293-ifejH>L}tPa9RlIq>tI`5tKfj&KA?PEihp)OXnec=#5 zA3t0A;tW~pF@en}2wVV#DFi1W#M@aC@*Z<6)#s1R<{*_HB`--zQD0aBWemr?I;rz- z;YWZbf0|#=AlWtLXMFaLPF2I;^M2SLNiKc;u=T37b>2>Nq}xIzQB7X29>e3s zD29-a`bYq+f0{kx98fOXtu6EQQ)i5*QeZ-|ySnp}F&5Lg6>SK#I! zCCB$YlWKd($jQ~&R zSrNmdvSxZi9vDzzNa|*Um(kj~Jic_1H&WMPznE^) zEahknPzK^fdz|6ndF?9d)!p{AMrH7gv>(}B6T(cFfRr5CPuus$5Nee>i(<6a?F~6?)Yune5^X-{YV=5P zSK5_=k(Ce2pPyf(R?fvuyga~MWIDv9n?eh1drIY=hr|QtoBhLx)6l467Gb9_>PtDs zE%QLUz7c67T7~vL)8oc0)0tQ?1)6*Zbk8&ZNbg6D2BIHlUFR{Q2y_>{sqr-tf&|yX z+#G5^uTEf;jw`#cb7xbMAv~R*R`V)_J9*1Tm6>a|HmaP~ePp7ugo&p*&znus z3d-NeFXYoUQh}E>$OgO>Nj=2XL-)}VWEt!v`SM9C-hU(uPskbU-7&t8BCDD~VB?90 zkKms8<(l}~M;O^T4Y~|vwI2k52tael%fq;_Ue~8Z0cAq>a!55HGHK5pR*we^8KKCU znYKGp0Yi9WD%KE5%?s@i@G+geUT^qj-9!~-=BmP==Z(0GsK-|Ta`fS&Y4x(opBB9qUSy@m z^v9*9<-yK&YiD88mXP5u4&5}yYWw_bVGpf9>7s9~5 zr}h`-2*u6jE@|EVoZo}>$<9Y6%Db9dxXJg@EqWvEBW&w)aijIZ1HF;*v+E}5W_AeF za*7taBBW}d+QVUSUHTx5CIoXxf%7xXZr=as`QS{X8333-0HwpR+yrJ2IY51Cx~YVk zt~LUxSiG@ilaQC;d7;XGuB7D<*_wKo$KETjNWkj@Nm-6{wE2n(!vZ-ig|h6b%5vA(x(G;gBd8!1|;qDN;nwHZTMEjzz&4!F*(3%GkdVd zh!e6NV!!T+YU{~64Fb*Zg@JkQA|kHT%$2rhm36Arm|FXeM|3|w!2auI78uA*wlk4B zaev)q7xeopy23p=t7ueEqp~nIiv-S|p@-n*3C#-@(rkEiL ztKn}ef6X4j5u(O8u;q5cPueBGPtX{?jt@b$ar&o8Fr=E~@Gk!$C?(Wds0`ySxgTKU zWTRPml$skLPpMzaeMb~w%47LcK{DR4xkc zyx!^E{CoiZ0uXTdGuH~6nZTwAIBSM1Usk#8L~Dxr^}cz{y$&}nU@*a?ux;!V_5?Wk z12{hVpCMCs+rUMagF&(Yn-Jez3OTp?QPB7LxgVzh!Pp{nC@yUraaMKfuf^bglXF#e zs>tMAqU-$Jj+`x(VdYf4mIjE&yO&~!-AOIww({|g`{p}cwC^UT62)<=^qBmxY!{US$;6``j zI_B~srhi^|Lz$nx)17jb%2}vJ>C|Ds0mwUP4}U#zNWDp!Hqt)EbxSBSiphu1Mcp6jsO! zWbp_WNY7Y|j93I8tQQCpe3g|)=Bw|ov!u@cIw^XuJ#(@o$;3dakEPCn z5|8Qc&pq0d7Hh8ohvJ@d86U>w+QPyyBDzUyYJXo(4oiA^q3U9lT63SKX^#mbN%HS3 zVWHQW-)3?l(OH|F_k3E<0$btn)VTA=keL5;r$#q)5!ix5r{kL3i(Bf`@Yk5;bib)!~`{*6G##OP=|dG1iGRVs+cWV*y%W&U?-8EvS`HbnlOfeImb`$=8}ee=t;=kD|aP0v4zx<|8LzbDc_a+gU3 zI05#Ar{}9uhDQM9-|6W+0JAw07ykxsWHD}<)PvWsuKR2gER+l+5L5u3`Q?wkE*e!1 zNPtk5hb(k1BugK?UN}(^`6JHuWmdXB{r>q(GjlytZG`->a{B!cr5V+$$6Yya|DRyU zuuMCG>pDDm<7Z1AM!KW_2wfENgx%Orr;?8^6L2*+b$QtR%cXsw&?a z(JKsm@8f>q^25jDGzlHjox haa;WY3S@TKMA*y@~M)h9WY7*9S5NN@dn=bJE8<< z>vG4DrI(0d)F8(;`02z+D?eWMD0vt1XUjYr=C%R5Iwt+2uttjjZ7wqA?2Y4sj$UKD ztXbSFx#A$sdLJv-)x%lQ*THoL|6cu7T7~3|L{TG06&vXy58Dyg9YLm!o%Q10Jhm`s;2+*LRH- zz1+h-nI}CPDVr@bI-(VRVvkDqiy`-;FFV=6y3Jr&i7SXyd>Vly3<`Dfygxd(Y}udg zS7bNpy)iexacnXC{TRp6tgid8^L=EP*t!=g@wf8J^57FZuf_XHr3DImq+ZZ;uL?wr zwPDg#YAUTJjvDthI!~)#vmJDp;1_5EvIYW=70`_4cYj#e@An(u!&mef;K-H~pUyb; zsf@hC@bN^Q$`eiEH+BYYxaKd*ab_#&@67u+}AjVvx7fJP^@)T0MpS^5q zG{bTpC|hV&PIW+rclX!y@LN<0DnAQRHe&Ps9yqH)f7vZn`ZjWu$?&}>x32*Sdx|Lc zU#Iv|KP~@B*0>-$OP>hBDuD{;!d@!UCdB-EhsGyf$7=eC+|riS-Fi!=ppOb=liYKN zZg5-d4G_o`bzWNv6FqdsG{=)sAUbnnY)c!#Hp4z<0-lq>fQL)-gkLI}I55u7L|vGV z(h~h8kawU@oSr_yrx9iIx2@xOTGfigahw^>>WsUP0+2V8P-8j4U^hXb*Pr#jq$FKYu87I*`=oYE z>WVKrQ2rz61=-{b3*?I)iKXAUp__oSWMB188@xz-y56c^Ia*v?Jt!tVWcqStpDr7xGp^oYyuoE-Yrc_b}))6Qyf2znVG0V##;ux$n_{wo)#(o0(|CC{)2~6`Goz*?7$AR-4Mwb6Aw#>N|QHhFIYM~l^j3|6gqasQ>svO!g7(S6S8J3-2G4u z&`Ni*W!w(FJ``qUy!+;7di{XP5tuKuj~k-pCgjan@y(f4MIep!j^l!t6GooJfU9VY z-<}RIUyIc`OYCKojdv`h{MvU)a>azWu@6xcsiHVdiB^KOVdY&QnwV1BB?3SFZOifY z3-9UKOsQ;mP(jhmUqftnSaV~KkdyW8%y<*h5=^gDCE(2-nDp5xH9&RbZ&uODo9iY8 zV2E|4{^KF~8`trR?*Gge5mMFNx_sG9$-u=GHQ@+tObkg4Jpk&GQ{RcaA3LXqcqsMgCJ|CvCPptMgU&8bMqvB!Sy2+Fq?jxUWqqKm_C*q3UlDA%nRHZNk zfz+~cNd`rbvJ>pP=-2nuSSsn2%Gb*;y#||kcHXdqWJUTWb$ve?ODVfoq)!xjVhdC* zVkcb81q|#gj0nN{l*YT*QfE~}gj-kQnL?Q)#gFYxxBgnng{0^e7Wwzja6g4X55cn5 zZdnPJi(xMQ{oy=I{m&5Pfs?KFm<;JUxw_7qfJ){8Kd}n-L~K=MT5?lzi3_oc@68;c zlTu-^UexB@x@A=m=*mn*c~grRbmR*!;YxWdHe08^M1y(%Et$C@g_(GRUY*H~189du zpxf(Tgw7jDlvu6 z>ee&j@a;4kckw?eLHTcW&3TKN5bN-j!F!lg+qX8K8YiT9M4!6ZjnseVRTE^54<1)E zAcR{2H`Wv)T143_{KIU`>~VSwV_|cR*|^(g{MU6Q7RsJQe*eHFq{veT+zML;llCg} zryVa7Vp~3Y$usJVI}XrJ8Qvt=V2nowX)^Xqqp9LP=`6fuIcQ6;rLtmG|bh0wDW~r zGqZ8pP`%2z%v*aElP|g<&|@F&Q(gvYC_N^%Rxv#zi+h5VDIB=t!UDUQzVw^RpS)?P zkbtYNw`%w`y6c6%91s9*>O@{-!Qr}*TOmunnddP&isV@`gsL#3(?o*(R(NrHILi;{ zZ~|Sqtjvd5J71#gDOes`g3h6S$BWXFg3yZ&kiTm62|(vF*kKQFFPFM;7nWTbzz4jh za^QZ4zl!X?W^k=Ha(fwUw3g14i{9=1Chup`bofFQI`irw7|a?XXeBQ{Hs_B(iQaJ@ zEco?Zq-UnC3ag8KjGQ?G+RD!&j<;Oq7ccC34{EuX`*D;2d7L^FQGX!w5(` zL;#8AA$80Rxh?n_u4VyfGx1&=84jr9Y#@1b-s)HCa;aPY@gTzdT`>}NTv!IJAdS{o z$xkxVI2UakNd{ZNun>TVD_yD`f7qK0Yd+wiTpSh?(JIWceuRg|ZUS z#x3*8BV<iA1Yjd-+JQFC8q!y#{ErRcPu5o&JlU@vC8$;o1FBZKH8zmPU@=f zM9htiMu`Zs-D%qf^2%g8XH=DLlH|*&@2R@Jz}=4Z=Cg<5bkP@6OZhX&-A?@l(DI6B zg}ZVPAmZFFt6l}M>~PFE0}l%Qb?PhD~|K`F56dw zbnB41hem5?M$6p!+~}19VA6YspA&lOlZSD~zXzteZ;u+S%_~jt!*geW7Io`Dmv{9r zKY%Wa9z)+QeE2)SBdv;3^;l>Toqx6E9;Q&`KJC4Z2O_rZ302pgQjtvgy*cwA7S ztA?#RDC97}I!u`aahZtqLs^MPWJ6JJQI!yx;C-fH)s%i zHwoZcGBgHGS#6^j3Dah3VVFZozW29sjFEY$1t2#T2IS3ZxkoLkg?9?_v(r9OUaaGL z9|wEXbrRiB*5%Gn&ly9SaKjH4HseRsAzh8>2$}AMs zZ{-3eepi?{s=@?y+1s_uDcX=)6)WKKSsKsmD8zQ#(NY81bKE{dtl)QAsB8l+LIOM& zsrZ1azj7f@j2t?Kol22!#*iOC3#D^#&$z~3-4f0wft7<+^VltLqEpdPpB?EhGhK0&3 z`?Iy7n(bg~emWK7ywB&`UKAD@;|&_}F-ts(E6lqSbR!^LQ13Q^w0yD38Mtu=^95gM zfO5_#oIIyw>cwEIkR~4~rBswJA0~Be8jtcqJ{fa@htF-XFwn5g>7@Eg!CgR$G>EZJ zL|J9FHNdMBr`bzE`!sW#{Wbwd_E$ejw^0nHSUm%7pmx9H7X1aoOvd&DTuh()&KLJJ zH&_W}0>b<9-tNNwQ(=bTR{rc7H=(pp}xGm8$}+?fmVLS}$;vDm_3Y)>@Hv{#h%3m{WZ&#oIHCEU;8 z3G>_C4Zin-f%|WtR1sdl?d}$J^ki%=pyAj0Tb{Z)#=}b`aYsWeR#YfTMcRj8l%?;M z{SVzgK6FdY?f=CU>4r`@pbOR)fjeo7q($cL;-<^?%48jFc$6`0 z!}t)g>P}my5_hR}#t@~2RR);20tRjs_tpBX>Ib{)B&B8LzS`~#TJSg^3iR9_^9E?3 zQ6z;T^6<#>XWc7~)rFHJ|Cxf94?P*{{U1gn@Wy$I1dY?6u6+&V@I@}k#0)ZwT#nYk zmZD4NN*mTwtFKD|fB8V@z`tD&?LyfA+iWx5d)Cw|?VCQ(bTMi@?jMXWQZBZhnLtly z2*a@R6FQaJ;-)~Ggth0~k0&i7yGJ5{&^X?=`Ps7bv`Sh&&!PE|lW_`m@YtMqz4oHK zx!5-fMi~F=e zltmYf@NgA2Q<*C!7l7nKp_~k zSpG_X`6JG200no0K_UTXHLmLNIrDIBHkdoa0U_jNJc=Jwyb2ULdFmEA!k>RmX2O(Z zFEQS&%WxDTUO2nyIU!I72GVW?WF_19iwC2I8;~`;hN35(cDRoMC05-b;6UQmC-$XS za4FqX87y^^yk1ylX;jm*;#@775k2^1UG_k~@kx+uM4?K0bj73K&WMfm+(wLKp_4WA zc96v^SvAC3cjOq5UCr{R%}{hr`)wbIp0u)vyXrpc8une(UGdD_9ah2NVavq6GqWv) zA-C^G0P)y`BOH9|;wYp`;$1=o8;pz%&EBG7flzdE-4FFp+c$7oH$-lG;G07+4y3zE z+DUZd^ki}M`l0n)a#ZSDJt+68xql+(pBdbJsqzSi%c|q7%y6ivJ4jW2vgsB zG)YaHKYP(r9Wj~`1#Mn2Kn0s-QOQXrQJjYwkopuXH!W%8%FN&m4_6|!drSBxOT4ai z^^@prR&Z|WJkAbt#`fR&aKrtaLf44T=(i(HuPYA1oGSOn{wk^jyeoqIO|T}xt-}Ka zqMCpoGvl*Fd6F_`D~DB-h{f_x&Ee8gqX#?EA)`AZhCCX2y0cp`9Ol?~zBGQWZ`v2) zz3K{#h0UCLa-q672}ujtsx)*0VlkaxBTK@V&nWhL>Nm-O$hF2l%aF+Wte^x!6 z_(b5gDt{N7Tv_=2+3&k68=2?z1rTe6-34HFg@fn7}wUyLpVa#4WSe%lRa5yMQodDqd@>HQ8G} zBjR4%0<*8~kmVfS=Egl_dw#ls7d{KL4=*$AbGWTA*%rgmh9W`Y3{j;mfZ|@(;;G`|fq?@XVsdm!2GC$w@s29Gfa+JgzL4P9TsY z*Cb=ywkTgQ$M=)s0_EVDk9oNs@m+7Ue8fmMAOG?q#agX+ANIa--$jO-hi-K0eZoE& zbzQMX3?4Jo^~Yr8;@{8XiKWSysKPd`hGp>@} zj(LUgm`l9C^=`0U-gB#FU|JV9R8IuIkCVK99x(3pWe0daJ!01LBSn3L7uwATyxi#c zo&CRS6L;PY#5jF2KU|JA7~f95K8Ua1D{jrsMFQ(2W9)Gg5($UW`lS;4ovh%(a=~kmWJ?`-^@@ zf`<8#W1gAiDQPH_9ntxo!xTCKDo4gj`9`B&E~>Ye`G`~zmp-A;)*D*smGAH9j~Vu( z{O10-k=#WxZ)qrcbY%fS$~U3Mc10IYTt@l(q*?u?AJ3LWe+EdFVRv>A>3Soo=D3{j}2%9G5RRfArTbnGs^|Q63 zyY-r?*P@sBpPG;qkH3ruox{c!%gxk+Hoy`Pz5pf(}C%Q^$$5*~J=@+*5? z`1Ur&m;DWjkh})AQ?aYR@5P#!7z}H(tn7#L(V$kzX}|E zt1a{J;wZItpJKYuj=%#!m_uemucUI;^ZMV6R-H*9Ik1eOIAXi4g_=Hhg#e_G12!Ql zq}Euo6cXK$_s^&ASpGy_Kb{+yxSD7F-OkYOu5lCX9u07CAY6NH9t0? zJzSR^&U9)l;B$CQYxT2??FINNbzp{F+T(uBa>2Me1Y}V#mu?bd=cV{4zrxxotmV** za@`57D-Lr+HT5*{^{7Z(qq~uJ;;WzOH_D|Z@1N;m+zp8|0pw-w@0jA+LG3(BAnteg zVtxXNL~of63&S(^`61s~KW__WD2afHGt09xDZuIWa|#AyZ%<}7Q%|MU_ajnHdQq?X zdBcc0;Q|lt2DKjvH}0VFS2j#*Mi<2$E%c}?>~*}ffm^1i&C64_R?jui#O)Kxri?IF ztx-5EyV_h}6fD7QI+X8kP|fVPD|@*Q_p0Q$)R6`4yyOqgD$LRFhNs)Lh~~>_?N{SZ z{wkrm=-U#U&vdOAoO9b>o4HJE)DZh_zP3zlm@2palFUc0pik`+W7-&vMhY-+o1{x)IRraZZ!>?)ZFZ1 z(wY2u7hz+`U$r|dj@@w|N}*+6?6^1)Bu&{hwra_C2}I*uht;PI{GAJY0?R&x@xS}T zSJ#V!6J3&zP?^9UVkRdJ_c%6Zl zUodVYx|tl0Ir~_pB_>iOZx{->6k*F~yn~QskzCMy z=k#2`k)AVEU-hk3_q;ZPk|hf`4r4+n9`sOu7@X?}yBuAI$F^*l*qian#FV%4ebnT$ z^k~|zXBgYsUP7oIw(e6n9tNb+dmQVlW zP5FK~Kspw;jaKn_DznKx8^tg*utr-`3)b$&a2adPchk*YBG`kM6!-G$y zVQnX})=QO?{x1GGmp1-RyTz$b=)@1Zjp)anS;7V%U+|zmiHn{4xxJ`dk~qY0GdR}o zE|x3(TlsV^b#VQMw7N(|!HsADD6BagMWp>8+iz*6cV2z*$L%@taH3_usMx0GrLi+b zZBUcpT{{OY#SC4eSTt{#r*wSfl8TLfB=kHKZL_%0z4Y0sjKD5xZ|&Q*6sa`f<@Ip= z@jwGJ%ng(!Z@v3Q6M?d}xW#+i#z}6#Hb3nB>y7570O=*6_dt$Bkdm*3>ddLEx!1sC zHW2l{(&H7}KXEu`2mP@Gd3B)u!RkPQ>|vNke@2kul$}4Y5xr8-cX@Kj{jh3*mDfzG z*bVzNhiFv1G{E(o0eo^1sK-#O?;}HV#bQ88#WTUGjQfZ3n=kYVQjktN@2hSA%0OY| zH}`vI4SL^ou~Kpl0b^y4VW;Bl7Y^h*gp&fuXFWz}YFz*Z!)X>OsCb=s_Wj5t;wK7p z)3<$*V%}(P*_vUsLnDv!F&eM+@5k?iGzFL<*8<`FaCXGzLoaKa1D~`<_w%A!Y+L>F zHdErQB#CMR-MsCsV7gtQ*kBMY2ecdo zZ+CywBiLL@hrGp5D!pCi40U#5G3nO7_x!-nauf9d#(dH`AlJh88S`jWP_C3#GGe<9 zB{rlmowr+Pl$fBLKQJ=26JAT(t6g+$lO;YTOW@A!u}U3AMNSs`luXA>H)+iHrSx41 z-lON*ww`CPZj(73E3_1ZrNg-yQBoGCCmV2S$^04adyk{0Vq8) z2?CTPV)ojZu1Eb2nXpguQ1S^|LxG~r!u2>YPE!h zXJ>D=Q=sVxB-WnwlXeA2w`#;1F7?32V(C!XxZR40ZmR~` z!u6V~ZfPsan<+A1dWUQtxsWq{>$$Q8IHOkWS^i{@_7}EKCV*@CHkmrL{Fby1WF8{SD*8Un<>6TWJ*|d6g=m{(7J9J+6`UY8#4KX zN`Q{{=g*=N?R#AD7{h;tpB`atD(IvB_G-bq$VAe6-@>tscJt&Boa5({#vU0GIfwc1 zEt6MVNv*EImJxgo|tc|DGaHd}?+HkTtSEmBrdNYrGx1rCGspOAp(q{Kac%SyXQ=du=i- z(dWj4%6%68ews8=MuWxgNoxCVEkevor7SV1mhE);=vnv*Xi5zxHU|GXb#))xXnak$i!p z)9^oY7#?taWnlf!A;vW(CfK+-y$H8Q#@X9)^dRqdmib$hgG{3NJK?7i++8^DM7BQM zZ$)U|v4=+I6U}+$DR}ej8uVfbt6wT)PZfxTx)akw^UY6cmOxv?R>~im!jeyGt}4g` z{+qA-pZf#Rw${!~7*=!!t;^1>Vp2`ikeqt*t2^=a(R)9Q70&G_vE5}*VB{U>p zK9HP&Wa7XYiM{UJQz#GQkGx`}V+gb%q@MQkVw?Vi+D4m=Xg7Z+cj^}1{&q`bfjux9 zJ*cejzAkM2(d291i9Ut)bYG+7-_969#&LOTJ=IlT`6UtOHrfgk^kJ2UBGv{wV6_xTwkvr zdG${?xEQ|O;v4juNFzc0cVWFo_bSWv=pAlVoAQLTmZO2{HofLG8_^DbjacdPcCX&h zLjkk>0^P-p%Wea=K5u-OrHICYerSAb^oq}SbHGv*elQqyhMvFApS%4YOB-wIb=p)F zDjsTmAYTgDy8qwHZ?~Xp|0l4B{+*b!-#w3xY8$MHRTxiwSSZsJk}yy3aXGfrVx@MN zIK2CoScKu^%+n4S*8YD=o~;+<7JXI!X7zQIphL|QO^4K9NvP4O zH5vm?#Y>_~PtVHO1W3=B2iSjb^dUNpUL+^J8lF7wDxb0@BfzDbdw)X0?lzXif8QM}(dc zOPqoIPmaQCdBs&YT+{+msf;|!npOh9afre}Z%y6kpP5!X&+&At%{9h(=O!gwdpIm=X!)UgH|YhBbENas^#QTFDGh|@WC6tCZ@7U zOP2R|v`kVnr0CdT@s0ktLr$xLdDMdH3bK#kK-gnL>sH-ko7MJ~%;s7~C zjSJ}1WzOO}JZ&dOV|Q!lh+J+8M-!;g`YqD`H2>lGTY4T%&$t3DV@NBaSFg|L*TUJK zSrYq<_WtkNZl)A7Y7`UDGS+4koS!{APlI~K>6F9A9>{I(yUBF42Bbk##Gf0+4^_p( zikkZR4*_%4KB_Q=D`my1NKW;u5gSdmm4#F5u0en~EO{w059X^My&9)`egnO27!f@r zYeYr_<(8A3k!Dys^>n9&{9}CNr^)xKzn1Cr*xm+iR{M*3C`TB zQxfrGKm`bh821;_gK}?5Y2E~HBL)GHfk9j?ol(3)?3{Pwxrd?`TmuWonB%49z`}!* zLJ-{WcHE6`(>v)d!}W?9qwjH_i33B(Y+W@$NPTw18#TPoj2+ftX;pLo+)m0WIc^C&Ok73*@s7kU;F#{LPOd;X_y&zUTA9u zLHTxGOt~qNJq_or2W4;sUSD_payp2i3AQ?UJG?-(Gex1N0bfrc_X>x;Z`!H;JYHjA zY8X{DL-T1H;$Td}oIM-I*|U$cWXBKt_Y`7F`XY<53w4T1va+518eW1<~Zkc z42uy;nkTVB^A^}tvGtbJ;)rcyEojZAhVS1`qzYLL6FAaGL9TJz_- zz!-oeM#j=}94p4cpWfC48Q(sr-l!`Ny*$I{N(fSB(OUc#EQ`I}JbSPm8jc;5X-lrL z%i?vXU3!auvS3E&!tE0vq!Idf?SunOHljZ$Pvq6`;we@;LJgWLmg=Hj)aQUqy9S8Z zrYt@RT%4CaKB@G7o+C*I+HBpd zJbR*MOx~sNurHncraL0Ii?_BZ@SSWLBFf8mEL_%dw+tpbrK3|fHVe)_AG&&u2ap|L zQxStaA7=J>X}i(MgK!wTK8ilEOW*-%s{a8CJryUFcDjtPYBzGLV#3sl<$?XcX#pSr z;QVLNw1Bwe(q);nZpN{5%EPK?O%Jts;#f{>r%=ganE%}|W5;yAQ@+#qpWEwJSn^Gd zxPj)i%<;d)6>?risCYT!&D*SocqzT!Yg0_m@%`lQyu4UXM!H)DJ$+O;VS7fr_ z)Dj5#O-MA6i&5J+!+FkO$1^<4lVd)Ue{nXj&kM~PC*L1Yq=o6T9Bzim)&NY10M$j~bND_C)BMoU-2$LW7c9{)j9_af(#KK-Shh_HSrhodB1_?de= z{PL#E%9;`<-?MG)UH-zVn4`yk#IcB7jJE+Va2(cC!*`2q?Pk`dfT6w+=e!T4g_1ly z*J&lpjaD96qBeNw2U{)V)+V51{d=pO>{@+BqdWZe2Vf@0&m`eyGTj%hV(uyyPnHq* zFUcXSUudD5SOq z4LsD$<2TU|%cKs|9}hrNe(QQNaGqK2W_eq2YlYrVoc{$k{}cbn(`5u%J?xi>_R%(W zf0UK=brZYRC%$#&BJ{8<9sI-AX)E5HvL&?U*0_nb`G z2lR|4(0+*D!1LC8xyb%)*iZghU)SN>3i%F;oz&Bl@xgbQ&Mo*NQcb^hm3;K2$%TC2 z6k6#S@F1z{uIfOPzBkn6i!*4=_NDI` zp@(4sxt_yZ6ZcONGCmg7BJ;+Xy|uOPLl@t^);A>#Hsb}`|DLN#32xEjw0s!~O}gw# zoIL1%x0{xs=7pnm%JXbe6E!JWq)vy7td@0JIaZ?CLwtUM)~*Yib@bmFKO~6&)Yrd$ zo#Hvp+yAbW>$sMIu!w)E7=_sQQ1i5P{tSeFA zX9czx=X36}3K&_yQ$)oN7^BL#lEOK7TO~|5kx6y8nfGU#qKJw=ML+A3)m2*iW*8xk z?+`po#Ss1M2aq1ed!7=0#7{`*P4_H%)eKAf>3e+N16XKIHZMrL%g)cr8rG)PT(axA zwOs8|-+OC$fp4eFscWz#qxv20gH#3oq5R#gG3i__Zw_h3Bck}S-#tGi9VLM%KmJB9 zuM&ilMqXfmGV-i40@@BRq4SWZeiz!6SpTC>&dXJ`P0O$9UU&W%aZD;CGbJD|)dZ?c z1oEXHa)7psn=H4H(M!$;elv5M*pu@7ElLUbYiBZB&1|EEIU065GY%_{=O)z|4Lkq` z2Ge)(v&rU96#!^-=BMEo@KLZ8By>&lwQUM>e&JC|;R`t0l#8-9W=yv&8L`%^JK2oKYZW_? zjY4hQV|(<8f*WPY3o<1m^+ofp3$>@yy992!66h>!u{a&4F+vzV7mwtofT-SR=KdKb zV;;|57H8k41yh%Un~o(%HM76|0;z-S0BmF&pq*tCul;+we|AK>aiizUG%6+wRoGiD zM~)w^2sDxUgLf*&712!|CT}CB<-+1z7F+rI^T%eT-HN(Ru2 zj}c_lr_j!Ae_cmk*|RpIuS~1a<=zj68tU+Aozx8v=n{(&c&yEAyL!%@V-L*H7lA6B z?G7tI%ypkC6GkaFL(Vlkn5)IW~Ec< zc?|Klr?HIjh?H|jt^WDc22{9b!8m$DKvc*5LV4mQ$hJBJX;TC7EIED-CLe%+oMl4@hu zZPZEE%3gISOPsxgNhKa#IR}23h1sb(0qhRdxf`@@uDmKo%7a0L;aqMCC58}e%CAPA_O4UJB z^C?u7HTws7h?qp*aFRa{bFN^(CxCo>Ho3#%D!^?^h4o@@^M{Jpx;8TINf76&bWxvS<2OMOKrqGdp)%tVp!j z;|qlOJtShK0`)}LPtNHyCax6jv|%Y;c9mc0pRkm-=-)QBt@=&d2S_4;zMxfDw;xnj42Je|kUPYk0toceMj zS|SycA3gp)UnB!A%?u~Yo!QsdrZ;wlwGUd<2Q z1&|GANAn`)N5|{=vT-8n-Miav`B^~1o?|P$|IOMwQSkmP+pxcM@Shs11vKOfW>(a# z6?cskr^lOH^mz=p?jo*HE)GFqcx=>Rp<$KZqsTLzhL&$rUwy{@Y^}~}QSTwgGE!qF z+WOc`(D_Hah5Ux|1RB}q3(nf(ik3ty^8|aC>Zy?5T>Yxcc3{iB9qMAx}$VTk=k}jn>CbOImg{ndT-T>^>kp=Rp1H=bYZhmPuwEW9!QVn!jspWbSuo z?{^?$ccS7(wOd&zPVj+jYxAU53!`f8WSq9I>aqO!OWD7brt;lZhp6IqX0>M>mJdS+ zhfV0~Rpn}f1+S$?)#imK*X^E-Kf@CcHC7^8xj}omCgkVi6gM99tdf_jKi$PMK2$n- ziqY2AUnA)DR?kQVCoMC%_~c+23Zn%1eL;T>b$a$sN)*7|bZmCjz_W(OgwBZkYjoOf zCplx^l;z0=$j`I%&zgTuT-vDSB$($^Zsa|jf+8#qAUy>O^(qZ}t;IU1Bh$ftvrUy> z!Dfa?IFMgB@N^GjXw1)Ln(anlRD0CE{qx4;?650^-|9m~U%BymcRMPNK$Qr+Ty|*$*P>V@*Z{oiUU5%M4{Qk3wYu2}Y+2W=omz9VIhZmY{A7oHnJG54@Tn z=?twq@Sr8Z`Up_S9)nMPX}7&^Lba4K^GK&FQ1vMPSktbojtJS%ksc$l`Z1m zVvllG0;w)Yrc$3a|_*1ffPrPQa_y!+6g)zjG zx<(6%2l3noRQ8k;81+zpoAiuc?(Nr(J?2&W9)A!3bJzuv)}{o-Gbc)fwz ztbIf9SyEKZ*L{RkS*Ij84>^W(miqg7g5QVD1RK)Q5L<3^EM~jl@$+Jt4UsCl&X4yA z_KAkxu)FAr1#t3n5rDq-=B_B4T|za8T$hPNQDMo2A*td_XVo-!vn`Z)5RtR?2SQ}L&A!ZX=_|Ezys!j~k}mByaldrG zFVy0J)6DMQN4e1n`EwTkytnO4bn9_+j(H_)v9ciP9z}PNr-^sJZQbG6{O>nEL%-f1 zAdNmjU3Jd}=(tyKH8nY=lWnj9nn0B*jG=*TqY2?v8~6-rNhco~8N9y>WY4!BZD<`! zAs&P4dN^f-HZ7de`B7PM22p> z6|3bN^ARz^aMrJn={peM=Nl*8Q|~anphckG`-8mkQkpS)Rz8I~uA9y?ZuUX%ruJ#Y z`pKLV!IYueMbH=%(Xb~LSBDw@%y*Er^0t6b98yrdbARyf7ri|FpoUs9uf8$9qjZg) zq3PmNYt9gtg+6A?&9(8z7hW+QO4aawqG2$BGT9d6SbVX*wHQ@PD|m#T;)2pQya$`s zEr2)NhYl0)gOfu1&(h+wR&DC8x36wSh9^F=HA>y0gD<0XKFGn9Rf6ADG9jN#!|PvZ zbL_qElOe9?7^_cO9d1ENG%iN2g!@|!`G~IPSRExJc{eii>l^6Sv5{)XyAaqo`vdR9 zL16`&oyMOJP$QrGS;`J`h`$_8(i8d};IRjM9$%>MYIwmT6!);2_Y*0_P$n;vbUGyx zgi)Yk6#eVjCHn=D=+{N>oC(-~7_3>=@9=aPC@cWMM8nYdWAo#Uv>RMYk%QW03CcIr z0c}_c_$O(xBFS8|=k#P=)8JFZWBJ;Cr{ge^_E9JM-Tc*)REPCP?`X{qST*dhc;Tl$ znLl$_cQW5+%yeJwSBJM!kEOXUD2H|zLa`DMrrxujV%_sQEIuh9pu57oAKTb`cAtN@ z7cA(9@CY^1`X`pZlwnbX0(tkKZ5F~UBmFf!5wA5ntI2zkYIevGqrE|NxlCBY@V&N7 zRrl;CPH6{%KLM*!>_|xD-LQ`>@-@-KKz6Uujl|oj4Z4x?`Sal&j6-j!XogdUC7GW2|ef z4oL86T1AzpHVS$KEqVY?*pNopv(N_Q;>S`YGog9*Bj1T>c`>K*W1t0qc}D{Vr?s-b zMm@FO!+qSD+rUhsbWKa44CMnQNt>hKqe!ZyPsA3)vH#S>djXuNlI2$8{ph*ZXxucy zv)1ye%tz0t`z;eD=jwD|0(IvP_~ox8iY>{9gf-M%8oH~x=3tT6YhSC#VF!3z*~nGT zBOVl_A!v;Sb4MctuwedMd(`El6qd8xYL#OYxskXKwSC>gg|^72m!jS1EICTQ!!V&@ z@<&bW>h^ndblOqglpO(5F`7md6_bV zn*6&}@Y(z(vnDoWSr}7B`1?2QL3qzr{Vc)t)*>U#131r3Yvqt6fZArS3ApPb4fB^eZ;nt}5+e@=3lH2!b(}9LAa$-B9VDr&ue*o+kzVp=*3E&_Ke_oU(v6WQned*jq9m#uH?L*K8 zy157GdVo3&t*8{X?A{A&0D`};mZ{=4n7CsxIoko`)ZRUyrEri2KouOJ!5Y;n< zlveC^y)>)%F220kw!&Yp22^<-YPC#bslJcI7!T5q;J-0ev5WK8Zd&IqdI36)3J)MF zM|sqrWzqGeK+NpZY%+7}_!GaV%@oNX2P1GTla$hdugT4&>-zialnnOf?}Yj;MF4#r zQch&-wsIyg3Y*hki;ey%Z=_*zNTL_bhGo|P3|FS1sb?C_WhsJzo!0+)ShD~!c#eKn z7tkOQ*2YXlpu#N|7;}2L{mj!1`vcJv-4-%g5tmse>Ba8eJ}6A+D>aRif3za$hTg-u zx<=bA4!i)k#{+_%p>eoly{6uxZX&QOWSdzFdxEs3uzbdxi&ih{8q$&q?(d@Y@dJF>fZ`x^heGQDX_1~LB(zn{a@GsOPx zFI#?hW*;(>vkhEI3zTc)KmYj5dZ{L1SVU&CSW#p^Bs}Tf_Xj?`l)%fc#YX|8QwAD*dNL@7qpup zFSQEY+7TZYF5M{K2ZBQ&GzlW~Cji`DDv*G8j2AkYihaj;n;DR3NQ|?GT6%g`ubZ<_ z34NVDx7Za3u`%T+LEIu8Bkv)w+4IStKNV~J z$dDl?j6DvHAJg@i=8|$8H~C7IP0!6*5`eKh8N^4iVllE3I->AyIrJo*c!1U2CMVu6 zS@iJDV^GvA<%oqhCvwPZu9g|<2?}kF>Lod%Y1!#%4^zI-ZJ#lammS;j=bIIdZ^0w1 ziqVYVc>VMb)2^%ye73RMQrgWZtsR5_S$;OH&5-tOPrTizNz69WFBBfMvg_n|f&D)x zt(FScmLg#P)`B1^>lW(=M2_>x-B6#{P}WAVEb`6pI*^Z;WM%_|2(35R4ognhJizrp zoxPym$F{nTjA=W?au3^RT?_P4A`G4A`PS?D^h?sJ{2Di3fs7n-7Q*+SIIQ1|5A~z# zCYaX@b<_3njFu*T#qP%CLdFD{gLZ{vKq2|GFPqudx6JrkLTx)+dtDF5&nOKJRiC_W zk~T!N1VWdp^x|VO!r9#i0(e|Q2NJYD33RkzBO ziS;|V<~ioA$Q{n-cpi_2(%bB=f?MB$ppB;GeA(;rf14J45v~o#P9>#XSFxv6{R&vt zew-t7b6|qo@*j7iq<#cfM>Q#DY^C1u8Dq}3i7HPQ=9YHuoKq3>S~2X zcy!^1jg0`-f-fp!X2;d01l3wk1fL8I#q~f}=bR~5CmdK2$?Xl*V6eQX9Oj8!D5bJ9 z$B|i8`B~(+>9$brxOC1xHVuWUtGUMcFaEUk%Rt-;4ML#qyBHdBv+m;=31!#*^XHB} zR6uk>?%#skco->Hi|Q5JPC$`O(u1ltHC{oUsudA3_C$o+z-6asrf`x` z^(_(Pd92PU{O3AX4LeI%RRNzb^M1mWXo#9(5cY_`S zYv^V>cyGJ?`Ta$*7(xl^2s@6Q`tik^z5sRcC!R12Ylb9z3e&pvmwWnIznp!uqp6ffl4!8Mi1c^qHCCOm_i6jJ@t9 zD28Hm^;726zuM=x4OMdR;Nzvu6{|u4`WN@aICIfY4$)%abh`7>9yq`!U;7 zq=l#5|(oUY+aE(Vv2LVs! zT35#GO~G^`rAi!s4j2BQb+8I7Cx~$=c2{YQmjpj9q{YX;Z2My!Veii|pZ1m_O*GaJ zE+)qxXo7a(?>;7m8cVU|jk=Kv9~Z763}>zyvqDdMw&{mFWcD_2pEn0?Mw{oJdJu2* zWw&X>CWxr(pPPH)V`!FrBxppmo=ifFjSyRm$$NDfBq?9NaLgA|#2zGfBiVzUdcmc(1pXr%Cp#nN+Q{I1x0)RgCB|Ur48!`t!N6w7x%gL@Ms;0wU?KxW0L8 zXxy|BeO`cfH?kXANoPoDr-N%pHb#^VrVwGJo;KqF7NY%#3k7al4LG@^KZT!-QOwyu zQLB@q*4c<7#FI;UZOmCnPfrO-`PlT}2Y_C%jtlI)byEATu@ANr#2t=BVT$fM&6@rd zR>|E6Da?OkOmkp@$`z@<*Pj$CFEaLYw%}P}BB7V#f5k-{Ran3)0 z>-<}7|>$3WIPl8E#kdWm)A;fh0IV-TcpP@NW_ z$@U(HUVfduo4B0X1N$A8W_mX0KQw%v18VivMdYLq%URQw_>?nuz;2D%B%-Qc}8m7b!A@1#h#W6$@c+WT? zwji`Fw22tAp-`!zq7!dJ3SuSSzDNM8oiwS%*!o9l;P!+3V2Q22Sx3Ff3BGIBz-o*4 z7}Ew)OR%EM0UL=&{iGnE{XZKl7)^es1`arm;KUeMA*hS4x{sI!`5v&`?rQ|Kpzhm< zFT984d%bPx;PnpIL!w1#9$3)JjhZLb%&4>Onl8wg+bqVe8Q6mh3lvT)0~8oS5_OLd?4%8W@gwk4Ae%z#SR<$F=dxV7HT7Go{XIN~f^U z3J;EROxyKE;Q*v(XRL849yaUdaU28x=6S)?*iv*S38JqW zL?D`K4*22-_uw(2jvQy6WWK{3v!3=35CXt%MG5{0EPWUP!YIwJGhl*4T6=x74p>z_ zCAWVCaPxg1FawL6>wE{{zrSC+V2EAAiSVzyT|}I8evu~yEl5SPr7Ca{q2q1aKNo@1 z&^~{fA;$$s{q1`spLv)J^lmXxlCqm5qvL|*b5n?do_xmJBkB=59$NY2SX06X#7r6U z+`nhvBAz_!P0~HK7b<0f5sv&^dBOKJzy3!&oWOXZ7@pPPlQH<*%=Y@thV&hw76u4f zz7}QOzT6z>0i~BF@O4U+zVjp3$sBx+?EB2&Zb6$uv75v-*dL5-UDNu$#w)n=-mB-h zB6*T?!TLu#Ph5`|S%AorD*TW(e@91jL#LX?cz}UuZ+mg=d&ay}!mBQA&qvqgUT(K+ zMEBg%lkm4;93wLperf035F!Q}vxIvhTs}$_u@|R)!tA;R%YqR?PH58uml|9aooQuk z$;=p=PL6+F0!B4Br=Vtq_FREcGlhKkKiPWi4{G1%mdL&eZZ7Sx{Tku*vDr2!C6R^B zrD+OC%l$)orS6W{PkokTEyCIx&(-cBIo0J|14F8NeO@+5%;|6166p}}Si0#^m)Q4O z6Jd$dDA`cb_irKk2Lk2U@hEP~R&y}KnxamjxYHx$XX9<6pMYIT?JHKerBa_YgJ?Y= zcBqd|@#5#-gR)tF+ZuWB$LT(z>4l5dA8)a3GU8>#F0~g2o1mB((51%3bx!%1k**&S z3YBE6iu1i*79&E4Xs-Rx#tXkiLZ~ss^YeztPk*w=ec)CX|KB9mKvmQNQ6c|tNaLir zq&x_7y{F~@EPiFZ?%_SopL@#WE4vL|X2)4+*yB(s^Y*t;s-*=r$txTgJd4_NflQN&pHOgY@nsWFj>))NpHD!5QZ4Owp_?cD;4SnhK~YA=mg< z5kS?W*Tw-FVdqqSB?q?vv-rb;%ypZkJ}@;iTUqNW_VJj#NS0*boPTD1i$db4Ne&x1 zD8u)eU5%!mp{e=kEF528t}G9Qc4V??Vp7_DmYk3a(~8D4t6IvKTDyMoCNfv@W)ZzY zm0#$NwU$B=qElL*vLoDmv_6`^`gEF;X`Dk0+)o*Tx0kkPgND8xI3QX1K4MA`-Az^q&Z?iC3;Mw2Xg%p@StkXv6Ie$ z`d4|BtuV@yJzkzX!Doyv^p>4~UGe0onT9a`omJM-mr(ySd=w478LXe3R2?*jA&7CTSOt|l3+OE)J z+Ykw^Tc{>#qC+GS9+4aZ{!f!N8M(Zm+VTyE5@Q|^LrD<2CYEc9$)J^{{3j?X8_LGl zvyw#=HQJgm^{A$>8;rVt!)f8>OvG<2pg09Kk4T6y;IFm|9(!$#tepR!>?>9S37Vys zK)rEO`#Ew-JTs%duOS{Ry7-X0ck&?4WRR1!oP7#Wh;&V-^kBCOVkyaWrf`Qvo6le= zlp)}0TdJQTzF)yF;?tkE8tF(}foNgU^v*C{$MS$RSbMovN7()}+LbVCgXDGbd8l^e zvbVPNsM)~Ym3ri&A>&nRn{xVhr!UwX0e@tN^!g`*(jLfQ9q;TQP}!7{#m+Ui`3ClC z!?Ruqeft=V3Iof0l1+$wA8c)RHQL&4<$PZwld*QNogtUtxURt^5Mwgo+&iC~EMQLA z-j4e3;{zoi=jB>}Ns{M)vD|+5*3&ge77+ME4^v~X{3+Pi5&ZGedutj4{{fO<9RwNJ z-q$i+I4*kx)eDr{Y{EF7Mnoaollcr{6_M|GX7o>Tpauk`_B!NJZb>k&z3*`x#)6+# zzUsm|wx^AYOSCC}^QEStl1>~mBsVKBTAP6pp|W5m1Ob|vBob*+v()%>Uw_10OeRju z$q^K*beQ_FFWfx-GjH7gn1W5j4ZT#+b1z=^`WbWje&WIiAdiRg!D|_z`qFC?Km{sZ!fMOa*V8@@x2#QY_X1H&R_|Ce|$x9QYq4VFc zRW`f%x?_+Ub~DGa)g64q=A^F!{qu5DmAa6|Xsp9CU2JQleq_n;YZKEC7iaP~`Rrc+ z^fROmfBmbQc zB1l>+3rhLX^+UhrZf8Z4%`dz~@g}#fFe3IO*wsG$`E~MZsrTJeb=O(9rn`4*(qrLK#OaV82SHhG_6ChUCf9{pwltJ)YIHTvaAX2 z``*8_Wj+fj_hc#u?ejYs^LalvhBwR8JdTl<+1qe}`;Mm7ONmV+FX*g60`{1f`X??j z^hNwh!)GcMhHg8cUGpAoU`gxJZ}MpJ^P6 z1F{vbUHOs~73Y60TSdXSTPa(Nwwvo^1p+tB^iM2w{JhzS%w8r{JoNpb?cK`s_)_=V z=PI_-j4zM{WdSzd!EZqE&$xn&B>!JUQi)+^z zAhVDEI9p+HEF=w%V-%KIVqh}x3N=k0g$=lHmh+{$mD%pF4Hf!G?)R>*h!AE-JV;0Tg62LK)-hc@Yfj2J%L?dyX!L zxIC!17f5ugomY_(eEb<0{UgI8T zlTk`;2iMfO?2@5<2u`EHh;`;zlcsnG;s8j+M1uvwhfhJmcE`R;UA*wD{Ie_0IfgRi z79uL3&qryUz25x5@|aw6VIsGgy-fTJ6gK~ske#3>mL{k997XNg7%~xMIk`gs9l|S; zlQtmN$=1rY=wr0!eQ$U!<-`KvmRF(5dWa{~oHTz@*J!z`rvGJO{~ditTy_b-<=YNR zCh~UZ-fVv(I<)=a6cqQQcoA$c+cG>rnph{ZXh3c|1=im*|m0kIvr*Jc-{Q)RwreJ*GgX;7#F} zJ7edqg{42tRS1%abtxvQrDjkFLKlFkn9U&HF)sH=*ZCW)tmj+3)*>nKEzF`X>gq^zL)Yk!1GAd+&b@rpnOQ z4EP}SNWQY?SYxBsQ@wMf*Fs$53Kyz)W8`#2OqK_=MKyTuXFgH6+4x@MX@L)^xjg;k zjq1X^Qe@&gX?`!E;|2C_l-z2(737lqwwklP0T;UxWAk7+m(2;(!O4vm6wXzS)!@*N?OKT$)!#) z74+HTXr?`gD9H;c*jg7L(?fJKDP*qVF&`Fm1 z4g?xX_fBpXMuMboL!cootV2+cI$?Q6oo4+}EJGl+)Yn_Jz;fGJ1Ufo-cO!b(j3st# z^A@v#Cjs$tCcjUUHU@46eRDK<{>pt!Ogro_$9LoJ1-=^1JLDU|-2&byt~B93O* z=CQ1DnUb)}GT+MQG^Yi*>hd58u6@b;Qif`F3h*tF#Fo!IGm_#)wZGexZuA*dau z@uXII`^ENE@@9%>v*@d2-chHJWE)2T^pkh=J8>MLV3^s&3iBH z;gJ{X_YlgD=Lls6>_Pz{jyLF{*18+{VxY-=xc2|C_1*DQxBver387&{sAL`@S=kh_ z=Q)RCSF*A)GO|P2o5{(L^)$K&_M{dn}p?Yyt| z^}fbyJVzoK8qn$EY|)YLF}KlWmbW$TQ;w_>@9pDhgJC%JqR+J$RAp^{Q$P@#$~;=C z6@stn1U5!NAAsTo2}#stz>k7O0<#1~H7c8geSIKc6_X*l7RQiKNDimEUwuW56sm7c zSw0keCmSP(p?MBk@36C%ot*K6wyT5V&dLxw;~>|kEyNOfn7?Yt&e+{-Be^R-p1hZx zJ|MSl;u6+aW^ChkW;Jbk>gC5#FVB89%Ja|qx1hBF&^PS~EdHDI0(8>yy%s9gk*6x( zLZL9p{7QtL-Jwfk_4?;j4@9UsY{tYGG-biAzusN#y_n5Y$1=Lo&EmAJ_t^0u4|J-y zU0R^5{&>AWh95tL_QEqYrsJ+Cm4vvG9bo>)J=L(3iqz8(c-Czx`8+)sqW0db zf7~4AAmT6qxiV6@DCop+?-I~QxP!;l=W{vxR<;&Zrx^Ii3Ie+=sNQ{s@-=X5Q;>Zn zLj3=l)15Sw_-~eM8^%{UF=B?53Q#YxN^>Q>t2f*K&yJ%&X<-pmxE9Y+su!V zI>iEv8cy6*Z~2UyFl#5Fg$v@#=0eL-?)rN@AQ#PVP0kZ6Z>8-MRGYYM2&DI{0vE_i z*SD*;H#*$JB5HzgpDF|nSc(CcJIO)ah0EU98es9K9vG2>DCQ*6Uh_pYzaTGK;HDfA%3}cB7;aE}1z>)tepwd<8JhXvQa0 z=?UOfR%_%PvxS*i#b|^-#)Ldl-Iv7*N-%XrT}1%0-`|&jz07}mJ+i>^bNV!u zrlDF;7+G9_8yv4Y zfsV*8=Yy**{#53BQ>g@jLR%OztMbYJ`SSBcIXBK-HZ%r=iG)_J%;$gTO^vR-R<{ao zWqW4wdV0nlGuK6jOdz{8){(qWbR3gz17e`UZQY!?6nJ->sK37Oqv#52&&_inVrWUe z;phg3;U%+EIPKosVe1{=)X-E4@Ceg_S9I!)Zd2^5nV1WDC>{g0^NQ!%OA4}HVowva z9{I5aI90pan9u~oWn-&QGFpep#(4_3U5k2F*Co?=K6I7!$YI-Z#2Glw-9MBx|I>=9 zfWzJaRhlSxxmv3!m$=RCLBhu9gDWk`9u7ZZY1>Z)J^rUNnVik=C#u0ipdc~Ih23sb z4Jf!Jr}R^`w+i3rml-YnymG+z#QHJ0M!r(c+iFv_1TuLz6u9^vs3!9m>U$+iTs3r% zak_9Xv6`BiK}w2`M86+BvV>TP0Kqw3vMrt}vJ z$sspSUviido3t>8)D2Y@t4#S9*{Na>204i}G^OD9TlO0Y7K==|Q<9uzF`i{$KUt4mQoY~WT70U^mM1w+XyNrmr}?qKS!t}l~F4TL!D8=!hCSv4JsS@q`l## z8LE^kg0b?G(GT_B<_W8Oi(nnw&YrJ59DEf|G-Hg*4+sy6;^4NMbot7F`!VJJZKHu| zG5>d~>*2iCLoF2lI7=6>>z!HN&f6qk!GYBzSUMSMZe*`kIM5aCi$~YCyQPhm%&M17 zEY>`ENwJVbF9+@o)+bzQ4rq_htl#6+_qRsd@OMfkt_RWos(nT5j{wEu;)LL|Y7l!I zR0F;Sxp_L#%&kKfUsm-UZUa2ch77h_6^eiaKXFIvlor6?RMkTW6F!$xC9jNGR$vSV z;$^>?vrnZr?>o|EkMpdYp?m4gaw9dSP}Y-J$$e2f&r`Em28_j2*U4(E2&aj8&w> zyi0FyD!z2CuVeb|1Fiq9L&Qv#}xbA%MWP!f}Zy$PLe&|b3>PM%yutP#g{Jl^AOKU03Zkj ziU6d;MQEVCCbT?bwTDih^!n!L$D-4=Ws!Z8Wy)QxB@Lln?|Mi(^Qo!arK=%XcPO~x z-JhR4VIk}ulSf}dexD^Tx#a25<20r8r!PCh@4IDkP_j|&*-~JW39_8wJC)REQ*4P; zYI(u|&A)}B(k+lX7BQLcZG*<|r@N%pdL}^lt8o4PwRncBATVSZGn3&a*J;@9Z&GD; zsvm3H+dpis;8xOMem?QF%_`QZ)*!O1Mi=ra1e z3O(5w{=;5B^DD|s5!Z+QeP01cY@Nhr3?x@+fV_`=$$V|`(>dU1POiN&(?yqsU^D$( z1UpBez(1w#b-X(l&+^0JcD5d>Fc}e>yllyS0%swH(ve6XAS^4Q}eoJif636tp^HNX4yQpY2_ z4W4%N0L14N%(C<hE=wOc`N-_{NGNk!T5pECZS7%stjDnSTF-!3 zp>roSAEH~l$;>{i+Mlq$w2pxUbU@RV|5)l#Za~0{TjHTBAplUUS8ZiR@A%$Nyr**ilhj*hapqx30FXOxf3&EM?Wfd{S0?Y(`xk!y(VkChH@51P6THuNTqARgZbg)JriAV>?Rat`k6IdZ2O% zV|S@M-~J6KO*QB4!qe<1I}Av*hLN-CwQKc}(%TZyqzoM|>74!mp-9$WLgB148CU-_5A#n1O&G~y6#kjY7=W^mOVg3V);WeXaf_Ac`tz` zr8Rrcrapup^1M@@>h_4f17MrAgWG%u`fvp? zv;#3!lE7Q|lOn5*2(fhFWqZq72?B2jbuk5YccWhCO|8bkfAnky+n4Vw@;tuzil`3f zEZat#0qU-Aq*;yFjy=_w<>D(g<>%<0Nw`cy^d@g#G(01X)Aq{7fa;JCUAXW#`d=gQ;exFL^ID4 z&Yjo*ThBD5(g_=X36$6MYr41o$+h|ALp800bNjwlh3UY$QLgSdZ5J_sgh&$yymjRc z!)Mwgn$rLNzu3x_7YtXe?a)vl>h(WUGIfVkc$Ikc5{qz$WtK{rWj0XJFS=L+u+cL= zs0&ia$!ED-}xQVnr3kkn+*W~ z&uotdnN7M5gHTaN$(ZT8)iYp;iHK_B`tRM}9JT40Y3J{qnQ6D{egQ7Xe`fGQ_1|Ry z=q**jl+bu#Zt5lr<{LMXgWnXu4AlSw7b&ng23Wb#N*Bqwaa&e){#0`+pBtW28EhTl zgtIZnqN}{dMcucLU1XrVd^d&`d+9DGs?SA@_l-N@4qnX5ol*nO0~hw+FZQ8Y+GX~` zz@5UrLe!6D9pXnKs#I||+CD49@f)|MmC7+VDiDY-rVo89B#Y2ax1=Oeqf4Cbrm0|)hcz#BJ~xUaDYwq3uI8sZs1 zN>f&1AeXcMR-nLFGMXS%vYkN2+l1v6R&THtIF|;Zhfo%(FQW3Q$J&T6mp!z%!|O6@ z%);fHs2k+sR5XO$vEx{(i+;oOrlAu`6Nv1Jg8s+6CCb@yMgHdt_v|f@ehdG+E`Yz3 z&i{8NwSyF=*Smy5#nfs)bCFZ2%{dsC{U%H>0!mFNy_&noBdtp{3zao@_2c44q`EC| z1-n{^0JZVQ{1#c3AT$4G0VM>ZsvWHaL-;3oU@vW_9P^8c|MRstjKI-z2FcJiRY80)sDAZc zyvY?R(=5}HAcS}KUf=+w7Gr#pwKrO~i!Qb7twHvjUUqmv|Am&(ty2CvpjjTd-w`cS zNR8n)b?X%)c<+lIm(HLZK$K@+zjjjP0mban+(^IBTT~ViTbB#^YW#4L`N5=+a~r2< zAujhF9OsRO_ga$0CY@!IZ@f{N=zZ6kUd^ZygP}3}zr&cg{^a-LcgRioyEuPp`EW)* zT-&l?zFfAg5OD1QH;7@kU-m!Y5|qwt60cUUg0im-JwRZc=wdLPyO zh?IvIOScK0=yu8?gT0N@8qHzF3@cAoov_V*lLzLGA~hpDuA}8k`4^lo+qgi*Kb~DI z8hZ7WSB)UFSGJ0;@k{^r4GrB7V$%CqvI?C=zQJ0Y>>hygi%AecPt4v3-u6dCyP_=r z{bjE*MeATx<`l>j+|bL{sIySqrO@bNKjY$ z--UNrCRc1orB$g;Es@Zgs<$aLbjKIitV(UshTAO5$V4+lJ)(rJ`~-#XBa;c56V3LLZ?b-YgoZ>f*0s0qF_y;^YG zV5J>DTe@^m*lUoRz>0h-nDe~+QwjpQw;`bOdl>Cc1u^J;7O;l@c( z{^&)6*&kthYj`1@A;F*{$!`by6Y{3G%KzP=pij>x@88H&^QUE$2#n0#<04n)H~WLX zKJ8qecRc0*yIM#ULeo3LqO(?V{!_2X@#v~+e(=>2NZmz9CkW5wpA1(bYDr70`bP6b-*_@~u(c{vZVp`;6H0RG z)tIYQ-!NeV&8Mzrz`}f@H*db1+JVLNug!D?$lCc|8$)OuZ|_ZDOH+VOFN3b&Kc~40 z9H?PkeFw0!OMta%z6C%e9TkkJxKGN5B|4>{}ShlHb+P zs~?mk<`hpAP1R5dn+QQw9`phyp5@V`*{&w!QTEUck0)6Ybx$i8{NynaPb&^gH3XvZ zO59lMWc&>Oxt__!))d3sf^GK+`E%8!ASC|J@iSOBTS{x)2Ein4n9zkroED(EUgnLv zZ2D>%xHE~BR>!DqIL#ESG7PDiPk(4ne>MT&PX66hf}%Hd7=EU3Px2Q(jL#5X7Qdsq zSp#qkF3?X0d$ou4DF#RA(CZO;Wz5hqF}dj;DVy3XBzSJjzP9rK;gq6S&U1>3pmT zGp;-%&Z1h|YfV#78N&mPE?pc6f0B)qV`PEJZNgJUhe2y4X|Zc{8zC8b!`#ssJay{n z;arZBZK%ECAM`2L4bDdD?#)seC4`@nx z#%gN{lq61Fa*TADB77@P_%k7P?p^Eri0_}aDaqWjIW=&S)0bEAYuq<{Bt`B%p7qT} zV3JWem2!2YdRE;^hb6%D*bfx+fYQ{YZv!LL{}}e20xC7VU(2>nu-;qyhU^83dByF3 zHZ?$pHUtJf;-~UNP9~Lm)u^!O1VTkc96f}@+gNb+Y&jBsDH!5997E-F7^vPM+z~Lf z7_M~mw0otX|y7brhG!&`o#OXhHiMq~jAFa@zKLwosuGGcJM zv=m@(tW-qXdytcufpvA2F7s+QFi9Ck@d0rvuU1{2AMMoA68)^E$$s)39__zsksI}I z)%_6l-p1dOsGC#-V|}}j}t}S-;c8|*)ZSYJrFA3??^$sbRCi81p*x6PfPs6{rawG>|VavZ)JH%J)iq0s5sdIAbvgkOUg zU!A`0T(}y{u5Letih;7ct1wnJhr77q&vQ zP~MirKYuzh-kF^ZK7z?Luqol%@w=OQR*9ds-3o_fRP`V?1H3`0;F#8#Zma(H_Dm^Y z3u^r5Z71AYGu){y5q@}+nH8h`T&Kvt*YA=-33G^l&006M7?!Acftnm{x1oe z#Z&IYN(d-rFsOj`bt;&XPHl8ib0dF5ioi>v9%EZcqsLK6Rm<37MMkFl04V^)&~nIK zahz$E1yD7UYG)3#5b0IwlfdfaErrt&u#)5?1{tW?T9a@DkfEDlPSYj=?PdYWcJqT; zd+uET&mai|3;KW4tk3jUn|zzugh$OH7nBFboMV7+=J!xiq$)@#0iDQd>Bije4nXxF zBT4w1jsBZ@oqO*`c;fPiTpiW~4&j_)r)~R>%uP^HBSXr$1Etf|_Mm^C_?s z;7dC)5PE$<whBM2r0(>r7Ua+XY6(P6x?d#jmqgny<%7CgHs~hLU@qF0S*Bc9% zwHHrl2Z5puE>E$$^vN*5p^B9k``|uNWusFK*e|xg{{pZtMkU_5%e#W zTAUNjWHTQ-Uw-&nWFAoTRNh=rA&PEtmP~K#f4!#k}@d+485>q+!ZXGayUjbC`Q*dB$f01{4=XF%lX4Rkm=3D%Jv3>v)cD`^(7#P_d zl}%%>5@4+w)93rENl@802_}A4q%3S4{k{FaqL`B&{xT1f4<(5yPeBhI)MRL-#<)b zbCtjXV%k+Vvl1_PiUbNuz}$|Y7Ec$`J38ett;x5I*tg;g=`64CQmb*nXc? z*aX7tvDD>6=E)xh5;NqC$gZy@Cu|7@H*6%OKm37BK%m;6jnZ$rsuc|JBYf|( zV3KMUSZ^;jo+eENDliUk%+CS=IxO%f+Nr5^>K}2v_@6)2wVAgn&W9N(8|5aKYEs~# zy7+hNuU1+R)q+43a=(IJ2_fL|2+BfHW!O_ZggFE zPETS#>u&>Auj|2|@UXTwJXAZBIVlD(=Iy?0jbGJM{N!dHr`2(wcsqQ}BWUJxRg-jBo+xFx_itMNZg2CX^GwCxa{XC$?Li9}?sPe+_wWwnwx2Vr%(B;ZQd$5X=2g2LPijR~LyQR#crV zrcE{rvZCcH2@+rC>V_Ocpq}GLgWW*Z2}}ghHn_Ao`7I4NMS#V^N+I<%)Xgb%Bknr=V{>~!7tkbDE&}QN2k{XHqLSt6X2a5p?xvX&Y0q&}521>h`V4tp zU}EjJr5XU%iu7v6+ z1DJ|Wx+S8!M92S2mUiI>6h8`1i>|5(!T9moCv5+JCGzN#>h#~{B7jutaSjld#m>BW zHa#R5=#*t`Kkm#Wz+yV}v|I456;%I3YcY<9Ukw2mRPtby&l24oA`RCOPH?@aLw_2W zVpn&8P-DM0tYj;FbESo(2=0NY>l(6G|CS<#F6CE=MBQHaG7+9vyx#>TWt;NT4G=BT zBkCQW5-S-;tF>iAK;cpT{-75$98{bCdCto6tDs0m!4>DR45tP@cCs8i5k?1Su`2;N z#e*KRG$Gft9sNv=YtM!aA&=nqp!qy#K?1QbH1S-j zEsEhG0jHFbL)}2cEPpuNO_p;{rzLsVbpuNSH`sA63EV8H0bo~Ckd04hm| z)5_L~##1<^!*SXR=-shKQ5`Lgn-xM1O9jiID_PSc6}v93?a1l)tSRfGh39TF(djI} ztZiG-_kt<3gAPqppq&;B`8(Jv^8RmT`71yfwn)ojswOdquS6cnYuQBIzZ)0R2inB6!7^EW!>b-))?VEt^m$4bNz&lBEChCnya{ z71+A|na+Uz=3Y=d6cKa7A8V#oNFw+OcWsrxK)Z(?gw7gVAhW#IP7H=$49VrIhmPOR-cyV`_NS_N?w|HuefWK#!^l<) zM=NjL?^8Fi(0G2n#*L2OsfzgT;acVGY^I^P$qREeZgo-P+AK4R&JI}eOq7e?0BFL@ z^Hw(i%Fe-r?y-&rghY*~cWUnll=D9X`L$W~yP*?yYTZxwZvenCkWDXWCax4Gh?=yW1;e4e9x}USn}8Njgfi zSxoK>D__rLAZ9Kfs)rL#w*g~u*oD8ChiVvby`}WuXam~@pcI=4Z@jJ!$Y{})H~4_* z0;!>%!*%)2-x}Xuk=_X8@Lf8>?IplQ zZ0Ut+OKiY9#+2amp@OiNg(N*3LebRLhk;#m1|utwXjY+fQIq)mX^Y7^|37mTZN<8s z*iTMxT9W0pzI+!*5)BA2iACsCvjPOpJ^{PY1v!||#D-J7BO&HkmgB%y*xdawJ)7Vk zWgTCs9QBrz{VM&YMJ3-WaF(JnqSXM~B5eg{EoELyI%p8KDy~_i64?Vf!s3qO^u&fG z`_N76U`RIE70kVjwo6#|TTEfxN2_l6!90;1$QO#bozbZy4mK@wmd;0q+|v9$z}tAJaTLp@Rf zW&W9D7r%8x?s-1Bh~5P{INr4qXs*aOU*KPTYpuz5-pe;pNH0hP(a zCZ8{h2) z(m56ZIOjc(*yPAVNa{h$$w3@!f_; znotCt0i92erGebs#HgC1w*($FI!%MFda6^m3z0OUc>6#6J=GUeJR1WU>l=CCDBK ze#X=?2Ty^Jd{oYZ0-m?tg=ySyuG;+1b5cRnLi2E=httu!gTa&``R@}FmTO!Hu!hwt zaLfS9?~vcRB%Od{xD-L`m;i`5-aHL&tJd1e zp{q4>cbF}OaOoOLTeeK2v$gt$rlEnuY}6nQrLo|J^h z1U1W(%Oxs8{oy@5UM~imN=JmofDyYn#kL1gt?}Z(6JY(6xC>L1mmECmX~jYZ==`=C zGE&3vGmP?<8wZ{I0PZn!3}ZgMcNjBCSnsfqfDVaMc0ewgEDpniN zc2%gqM&&)xbc`AUNb6R!(x_uN1R`Zgq00rP$DyY84Y>Ee@SPi4eW8W4RM$CoF4YY0 z1{fwcazM#x9F)!GDI}@<5?Z;lXTGami*N>pg9IzJpqDcjDmidJC=DjPtX$?Qbo7h! zJ=}w0aCd%G+|}>C6>tSw*LbwNVn!JY?${_7*bo zfm~D(mS$vJ9SuaP=t!V|{kM3gk4M63k{OikW!MokyS^$Q{S8%Ohcp6I!Pn;e^riTN z)d6*n?iDadHZaJd`-?w8YZ7*rN~G1qX3LrUn!n}u+LWhtPxIxq%4H)g^8X zn^1$JecL+hO)8he`6{OiLsEYPYpdTJN!97{qdz6jq^pCa7LOYa)jyKiY7i2v8#3GCVEfq(9|&3F+-TzHFU^{BTo7>VFEGz778;$s06%Uq%8kPSCP;)-UrO%*Z{Dj#%YR0 z(6sI?wy!i&s2R|F$?RP%XMSd0-w!z;Qn$9dz8*ap4KF`J1>lK3H3yVBMVU^DxOcl> zZL*`a-$-5{wvQ)>(;ThD0Xg@#nv%Yr9~s*y>FZLNqEl$4Kn)~Kbp~hEtK}K)^9fU) zxQg;4Q0^K0paR?th1Bik%0{RFan04^Rp+yZt0w%&k~(L-79v3^%lYYib`ZETK4 zrL;sB4PmD|N8EpT^!7{e*CkZ1Ug_&kC7v7&K&t$@Yj3*PAUTuifCW4m=v~!1naMEf zULe=me~9T@Lmxe{ zl$=*3mP!|Yjb`L^8Jh7cwkbcQhJ1{(&91o z`h>jlkHBNaxZL4Y*hel>at2c?<-OX#!$fQ)^-qq|DMXEd*Uzv`n>})F{yMwv{aZa1 zzWbXc`?z1u$VKM#uDV~YEcl9 zH2Hl$A@`PjrZ~V>>;+7O6v(edO`S_bhF^#Od+U|AbJI6N?NO$CQ487YyaI)B`^Z~HXZos^`3lJ51@*&10;E}1go7E zxii64-vlYIBWt5tnai5+ZDJ_;Z~k=cA?S`u()KFQ$BBIoDEp*%cdlO{*}kx`Sg z+MC6Gb!MZu5KI6woJ7lJH`b>KD-Ch_(X@r{oY0nh%bUk?RLc3Tn!tvqdv2bo`r5)| z3)SN$^WgHrsVLM=kSjS!Rt+mWoz@HFc-t{@`%5j}_tV+Eh=~o+X6Pt|L@eiudH3^s zcl>W?{T@cZEsRF-W|)N!gfLv$_;{=ax(&rKwcWsIGv>)*>_4metGMyL&70?zq+Wq? zINArMq}8F$Wo)whT^hsZW?Uas>`oidxr<~=E`D&Ai_UA<3O*J20q@aNu^NHoLJsi? zpsUD*RsWsV>cP69=wwFd*xfVchu@j#Vl0NpeuC7wuZFZZ*#s9Yo)qJ^97J(Ir7d^oXcfw+Dvg@ws;B!3R)G)yas0 zY;!hTkpBLX^Gt`pSN9Lw__5vE)5RC&*Y~fzw|yLX4$`~xHr~)9Ki)k3TW%4J%7Mf( zv*_k&sc1|zm~Thxlrt$i`PZ<_FyW4Whb%f(H)MzfrkOd2gi{S^(SeeRa;0S+Ap#ESuxKtv+ z!M1pqfXgdAP}H%G4=Uu+-1znI(GxpC!KE8GbMQ@iF0HtTUc^_-%-}P9G%KDB#k*3( zsj~%3Rk@|I89j+7AIlB-&kKF38-M@Id2QUGVzudDYV8-B=NGGxhY6w#>(#^B-TQQo z6v7_v0XZCX`MxA*iNZ-N<)oM6(GmpRA%-rA9I!20{a8n~L?)6a{tJ6+bcru9Y-(kh zgCuT{VNQK9nFS8CLX3{5JWtjle|z${Qgm>_YYV0oR-$|KPrg9UE67XQlDW@-5c^1j zd|xKlX$;h2}cy| z3JZqQ?ka@NW~2O8%fYwe^k7qwSjwB-DmMoz3r_|uZllYwNBh%&GkmaCLrvVzNn+(A z%??_8jzwZc+=_l7<9YcpQuc416V0W8sD70r2_w0zqKrro>HP}d zy+3DCdc*~L*x72cUc`8h@|DSc0!xkLiw-;r@{XvU3q%p7CV-%nGbp4G;1eeww2K)& zU7a8*r7j@>9k`BXGIQ4xkLc>UWjDni-JmZ|`nK4XX0r2e@N8KnooRv{Lv%Sm1Z^gE z6Y&fp<^S4g;BuMcKTk=GLV1%C&y4&i)JU~RvfX=+Q%SX*4Y+qhJ}|^sga<0;g6*wM z+jpD%oGIL5XwA8FxX4(aX~X)J8?vicSc1k?1n;~9MfK@szk~e>TsWo*tVa@(gO)mb z4)Cv8K9TYbO~~?6e(W!2OdJtJ30SZ=>BIH5aJhVCT&jTa(EINZkjf0sK$Q#VONC;2 zrDe`09zK@wIJ04)>gyt3AJr5tZXol!bUa>^#NU*61-dg^GbMBmZnr=ooMb1&byQBc zJcp`w#@|;w6rW_E{rQ1Pdz)(qTdM3gE&P>tww?!{ZpiO==^Q zak;;3MfL5smX?EbBzGO<)NVO+bk$^h_!aEwwx8R^?BUGiXac#JaQlDluIqeVXaAZh~g1%m$K?{h3if z$gO(&-d4AS%SA=!Tk~?o$5`PBJ$X{^JemJ4$G@k&$~%D2%-wz|+N%D!f)NMv8@)6m z`dvN0;!wQwctB2nuC&aK;WG}ce}@FE7-nF<-q~3yh1TXQy?#o)-SC14Ca+&@PMb+I zk1s?@+elQHIH&)dc){02htwBBK6S2)q7K#s?&-W57FD`kFRB!&Tr&|aQC<#|*}9OU z*TSz1NrMV!l~KvT0<|UgJ%bfdL>-*zWA>6`*#Gq@w!HNcMhh!OwIyW(i7mhf?jhg1&P`$hu8=pp)u`m zMl$-&NbJt9r0XdJ^Tvy%VPUzyFW-fw+70wd$XGnUO z|9)2Qr4r`|8ZCx0sd*i|Mf8EOk?DxGiLXp~_4I3G3|V!Id;08+;U&?h>W>W`DcP~1 z&-!sYyY8rE@uz`xyCSWVb`~5FfT!_ysa~ZMY$*RSFg#mY`M5V-F>`@3WL1%Sjwov1 zg2}iC3?n_UhJh*PwJyDJzo9=Z1y4X;+%E}Wu}ii=s3n#=&zHD8x~zHnf9EeQJi!K) zs$PbA3-XwsZaxiHt3%YX*qN8t+vqx~UwHohyjk>cf&R6g^|l1Z5)i|p6+$d^r)VV%la3*(e#1ZjY)S5m=%=WaQ{ldsCKkeaT(Ow~u z0u?m_noT5wb{D1Uxoo_&#Z17d$CUqfTq@m-KxUm|q?b!0CCaq+K4oo|Lh-7#4`J<$k@o{ zjh2@Nv6tm6(7xoiDgJ(H_ujf8-cW$E+&Z@BVTQ-$)!|xys$OyZm}8zUO2ALL_4i)H zgJ#0x3H)*hV6S|N`@H>~3G1~U5RDrWAJ7!#?<Y&r zvsyd6=3{gpHt>+*Q<(dcKNlYTMuKB1;2~KAqlpmYvblr&n9y4l{d{fe>b9O1aB|yn zvhMOUp9DRJ@u#rbO}iuJYmPiyKF(hP3?ZD5?9E$vuqrM!0cww>n3zT|aV{Gilel{#S2lmCu>8(d49yn_Ts%R=d zHiT-mHAWy+j@CcWT@;okE`jkt9POX>ar3o`nSj|;boD7T|7+81$)d6M-T{?O@26Eb zREELr%c@Z9u%lf5FBdxeHKG zyYhqGD^*%-Nn6LGDDS7x#6hhjGa-y4N#?a)KI@;F!HkBnn!IW%NnJYG{6>%5mz$&p z>9EUpX54&_wbB@fSBaKNdxHPxA>PvJ;W7=D%))|M5V=W*+JW(`fW*iw?oqWzItbBf z<0*ZC?BOE=5;U#-$D|eO2$dgS!~w_1jBz)e-}7L#c9+<2Y8wEu&{%W4ZSvwQ8MZ1@A`)-qxQXnTFk+}qO7 za9mH@WX(j$M&?w*1Jh*{87&cT138)kyPc-|SUDfv-A6KZ<3{GKPaKSZwx=rr9yaK% z)Fc$r6+0p#DyW4w`+8J81<1k_4%UjdPo6GktgRrMA>_%KL=^t=Bbefc! z((U)t$BTI0nEJE?-Py7B-yVGMOC(Qpa*vHCCEKU?g%aLD5q)wNqkiT5FyJgbypHd< z;-)FSVNb7-dXw#=T6{}2(5X`Vny!=<9;2+ov(=HO{A~N*(`t5bjv^&Jr64@Jl;2buxe4u?Rc{+93 zaO}ZK&6oFK^7wbngK}E?*3sJ)_T+zO=G2sz!zw?@hGm{eYD{LCFYs}|8!X>e=54`z z`XYvLq31sUVts5^QVW#7W6sM6&@#i@I)85eezgG4_E(OB*yBD(Y7W=&-+q8PM;yZ` z;hrl|5F$4+ zJcl`#@g!6qeV#8>RCEeKNB}Vs>@)LF>&pI{o|~h~dd0L8&0SFo7wswZA8O^-rpZbA z3YUj@@4lbO^kZUSEB3tDTX9p4zmsX3^QR=pD13e%Q1^Q%AJIFfv1f=oBH|2w0rK0i zldMrjj4L^H-DaP3RU}bt{0KtArf6_#J5p=>a{9FU2x-3Uq{Z9zZ}m2`X^)iiu}N2Z zHqsX8PcA~S?BZXUpb~UfXJgo~aT|}4El_&rG`weMe)*BO9aWxOVr`u7i-d=-q2VCp zpH0JfJvZK@ULl>cXTDwD6?OGR3TGmJFI~SGE}-iA+8r+O%mOuBUBb$2;-Zkum{8dY zma^)ef6%!b-8_-Vud$JsWoG!>KpaIk3+!o~wf)nN~B0vSa8t{&w z`}}No>e;U6ugR|g7`Za74sa&u7M1ht>}d}aXj|a5y{q3A_`9a3L@<_*ETI~=D;|}% zeEGZl_Y_|(zT(M}EY@Ig#DHsBz`R58b152Ln6X>Bq+*dhO0SR%Wf+C9)^mN9NsJfQ zHr8ba%U`%$DokazsQ#PeFnMGTkKO9xKI@rA!pA?i6Jnh8x7PEwt|Yg$O+Ijx0vE44 z>uVSbI>|E_cR@vmBB`F2o&BLIRaCtoTyWZ#GY)0Nw8n2da z*e0bU6E#|QECge-&f8{lV2XX&S-6n>_Pr2@#=FuBX2NV>xwX`-FpvFmHW_MM56GGZ z-%xp&JXZQ=ASG8wx6$^y?|i}gQ8WIga+s&z?yRVqrJ0;eP@q%3p1K_cFN0Q-$&)#C zTlwk%3wOcf3ylo9lKc#a`z|sCO>z1xq+qUZT$cWk1qj zWc>=MbQ4wb#8%+%GbILx_wy_qMI+SmUq{$RxN>dXpIv$tB1Knx`lO^_&s4?Y{RnUH z`9?au^mHk8Bh#(k_{U}xV8cYAXcHv(mn}M;JmeOu05^ia;-x&jhs$@C!^bJOGhnp6 z9pmmGs$Aj@ARL9V<){div~h$o7LLdqsnL7+zYWjBGN0xRCS(;_;1PB2=w(UP?K+*AS(we$oIls%l4ud7_x&3#xcXc~5Jp)(h zZ4{^D96bIcdTtF?=#6tE{at5bsUW&+d^T=ZM6(ih!@w^69a74kGp*h4qUDQ~sA$IF zJDR7o7)Fv556VT8bq%O;S^f?s&Dp$3m?!Cx0OnPs{Kf2Wm1ISpx8iJbRP7Id5bRf> z*FylJ8^=dSZeWI2BGK&WJ-GC>dULo!+!}dSTYc~nV#RAbg7J|~|LT|2zJ7Ea5nK`CZEJr$D+C!`JcuKTZ-u6 zLglI4D}M*|W>?TSjY-{cPtQQ-b4<+f$U|<{J7>_X#3fPu;#{fOd(Ulf3(3`ej*Nl` zeki@MRyP;J=^gm^0*%DcNY_z@frM-aU+4Mh#_7jY=eLVwV@}$|w#}jX+m15P@|JKE1H^ zCHo=LJGX$xLE^PTxlvtNc!9NW*7HW}1$R=1;alZ7^dd-be7>IQ{9GLo1>r&>Y&||F zaNWf+s^6d+IJ<@Kp;Q(4N%Bm`2AzCOR8(37ElTDv>4CluP(uO!2?>nMYVar&F-PjG zP7p4)yl5z)ExSFUC>y6OPuP0RBFISXAx98W-k#{w@dy~_EmDtGAn;03bG`*p2u+A9 z`PbqD+a`!wWJdpWit^y69$Iugx6#$wg6>o&!;2%P5kS}Ujhck>V}$qIO3j2*ci7^A zs&vtH??4roISjRQi*cDFR8-kzv}2!j^!^r}1*;uh0BKKl@~Hq-p=)#Mj?YJX+$ z=k47SqumokF^<6Vn;`f&Bxa7G!b`d4*t#1zftYJzhqE^Cgn9t0!IDJN7&dKYZ2aoX9Y_>JW(EpMdPb(!8Wir^vUKJPP&c zA*c!7jeO-GI>41`TFILqRBa(0z{--A9vsIj~fqe;6d$!I20 z{^H!3=u*eq%~np{Hg9#p?;jUiNF)7%oP4KauL#M`Bk{uZ-K$jbJ6&H-F_(bNJU+tK0+tHCo?*%QE)q-&5 z-UMH#ev5nN`sL+3HR%LuapZu(FYiv0tu93{ydv>M`RgxTqqd-;i z8j3pd6)vc9?lrVL_X1;$4*HImgS#7$wqtsfKlxMgQC^Ac%HLB^D;sth{=MENBKrC= zF@YR2|9bo(DNeEGXOh=3H({wupD9|EGjnv>{>Jeo9hpI5L~=6dq!%kpO$-p&micxm zoF>@%V!X_UynG9XA==;BMNdSQT-)O9pu538FA%SeMHJ@LRi?l39V`7-$q;Hr!Ew!90G2T(-j^X58MXWY}Hs5e#-7Qyv;VI|}n)IJq&9I`}BaU5)$qsB?Um zRdr>(4`Q~rdvphu_*W&?)WXfPC2jVru3J9v{P`swgFzB|>pSy2e+N7Lr(I%)769cH zR8Uq+3#25>MrIZX?>b@)=zO_K)M$1F8C*F_ny*aZ6AxUv2+a|(0~y%E$7tHqp-;v+CgkJw4YN{P{+a6 z?oqOAlM!dLJLk~~94mETGW>(_p`I7l;5fEfC)&UAbi=QLrgKsOqjn&9J%eE!pxKbf?|g6ze&DjZ}P2HC-%;oN35gU&vzZPY;(o^ zuS%&KCU*A69#-gl>|3Nu)AOdzk4-p-mS7vjH@O8wHSH~JoPqKuyZu(B)wF(r%MoXq zqhh2%+ilwf?ASCNZ)G11U)ALv4VT->eNsxQ6j^vMv*SxG{h7mQRh2;BY4ZQLd+)F& zx2+ElyQrujB2|>$kq|&SDn%d>Lhq>b5)cKE8WlyQN(og!2)#u*BE?GYogi?K-i&kz z1ZKa{bHDqYduQgqnP(=?d5*>;``vBz-&$*UUOsD0Dh_5Mdr5lCbw9l43-Ksh=l-_h zK5viW-h|@mHK@03yaK5!+%^(*x7*!OU23bpzh*|BNZsO+09uKB;FBVG1h`?t3-N_z zm=(+9HL`;#_oJNc)J4V@2b)nuzvbcuiJeO${F2^_X}G9ApC;L;Nz;z!3SVhLw!qy+Zx zRr9%{f|lFOM^Xm(c(9iBl~8A&FEFln4N!mB3)6B(FBzRho;AX)qK0JJqupgiv9TOD z1b%0uFP)ts=oBAIUk2uREeoc->}{9s{_fl;==Qz%bk{96@=j*f9V??UI>aIm&Zn!ZFSP_^K7(5sf2Xap->2N{q7Z zPRBh9sfM)sBP&4M*#y*BZ<9*o}$JJXm+}`VDNuhD}P;Vp*?{ zX1Ihad&D?*X~F0~9Z7+#kRjtWv7j|C>^BtbeWrS23ODdEK^b^)A}qXmXj0GLHv5!T z0@CJ2d$bfsfQ@C!Ky^lbO%}vPk=`gMB1^SG zZx<6acKXll^)eXArqYJOj-nlL=^aA#d}_jJ>`eN$)=gjay77r;3b5R|zkt%j;+)mt z$ZTy2*oG~0Jr=>}x}uDWuzpSO(DsS^f#k$SBB9!WsU)!N%v`wg3RQ5}#hFBAX>Wr12q+E` z;lSPInDDk!*Q|TBW|^>;c=or^!;nTRPPzrx$qX8Li$+_AH5&;nGQxv$P7p@heN48p z7#(x(8kZLM73MP;^1{-Dw0bx;ZCnMQK+zNGTv}^15a4kRjE2m#T%0P+iAVWXPO`C{OvYdV^pJKN|Z}~&cJgd%`UH|mnQ{ClW z#Eh*jOMw9}>l`y5t)87l##QTKgCVt%+h*6!1G+gM=LKTK7mwim-qpGzuef{ zC_OQ`mne}cko;dG%eo(edF04^i%IpVI)tT<`$F7o86E&=ME zf+=GqSfupK=(oc=Y2I&$wbLYp+9qzmQ;wU0I_utdb+vFFo(ae6`>yoB3auv_qxnCK z7BnPYsah8L%%a#;?%g&&#fQ9WMm<~}l%uyXsB_2c8ek?*`)_CJC%YZ#1s}-f5Tvm|&s5iZ{99?V0=1JM4p!M+ONy!FSBIH`kKph%k+TrEVv$oop6hRvxTB zUwRtQ|W3>UVD5d?E+hxWpyr{nU`f&s{L(W(V1z4X8f6d&j|g`{UnfE9DB zv@+~hxyS`KiWgSTrFT7^yE`2?<{}m%sQ5#G$k?J?8ea9O@~Hq6+CvRd0{Y`ZL3 zRsIz|#7&~Pb$NK6r|*}O<@xwq9!!QR!wm5n1WDopev(KTJHNytFs*YaOAD4_lW?AA z-RZU<7BMGxuDsX0+oK_47noG1YOuW;6~|)~RLE#3gNHXg0@@%6!Y||T38qx`GoBDq z4x|ZUq#i4E&pMMHHJ$TxF1nIF-m1J{13k%P!tXD0$B}jW0h7de0}nzuvaOs$SACe^ zgWDLd=yL}ojDIA&1ENdsSR407^mIX-LWW8ulRaZ5*III56SNJC#aE{)f+W`X? zlZG7OW^AZns7x{M@dWoTse|+y{mw<}v30vsH>4i(AlakduM5_#2o8pYb znYR+Di*nU)=9~BhvT^v)?!8jWM1MmrbO~ca+zUjm<^HWw8VsNl-GCGzz$$3x$wp-lZha7<`)yG`BUF>|lRA-Kmj7?!>?Gug3w zJas#w4+WUw7+5yP-a19=JO2DGcju|8wV4CFU#N2J>GPTOK(f>t;+)VC;5wX5bK~Z` zhV$B`mkzzEk=4Zisbq6W-tQrBGX(Gl1pXN6$2!g*XJJy*xOE;6TS~ZnI7cK2v$s8@ zC!?qVX^q}m!*H4p)ctPWy4tv()&@1>G_H)*6x&I$?=+yRoZ(8k4YEd8v~Eod^Ax}T z60dM}GRFju@D~^SBMaAYyotl@wrqXR*qB3>jt=4(WOOsja&%=S-rh4o^ z7h2VQ?wG_~2Qz@8a5hoj92*+551O5OiI?{s_cE9GFei>a2Asek>Z!Am!MnemC>=W8 zg}mkX!@ep3UE(}cyqEW_@w?+hf-3SeA(Urg;7rNJe7B}(+g|_rg&+yxAB6+-u1MzR zQYRpF9&oMo40?9Q0QW;V5n7{@%UTzEef&!!LiiQ2+VvsjC~1KXg0yAesJLE?>q(pA z+a0Dud#i-{*wp=DA0D*mIdx)=oGas8jlFxEqTP9Gb1G;>fPD~d$fyO27bK;Zy-e$p z^ImG?3}^Kf)eoUB@U-6fE7NB0HIu_apo2MFoVTXSVL1&YZfo7|;EiDUy_Q$*@Tgb> zr?}1T!kj(#dd{ek2KR%8a08wAWP9t_*i({B$T@{m*qMs;X>Ea;3-Jb@W37TzZN0vT zMzXmAaKN84SjFdEV3x=GyU7j&kb0Z9%znm?T8+=p0)tw1>YmH|_Je0ibw0gDa|;dt*l-z8mAl?cM{z2bn=! z6Dqb?wdp7T$>S^yRb?A`7^J&iv5{PJ5vdv7gkhLq_F`SK zP__jAEgg{BR-++;=7}ZkGEP@iG=mr{!aHS@CDE$5QXXTRMvzomM0AGlsCP zXF84gZd~m^c-zo`WdNl}^q<(63bFR*KT7VhdGXyhQY0I8kU*L@vpqrs>ZcQcP@>fiu^NI`I>Q_fe#&p$*QzpPmUyN|evgd%t$dA?T) zLEPmwD`+eu-gU-S#RwqPp6b{)jscklTh0ABj-d*^RXU1wU)1C^edotK+w>ykDzAbC zld-3sWzW&mk3r;V`cq{j|Lv2unkpypGvfmyAP?@kn+PXjYMv1ieS5N<7AhIA6Ppuo#nY@x(Kx*I(p+l> z08~u5a%9!lt9jQb$|xwPhBEJ{K5*qBP=Om`U*eWg_UxA2^4DvNH*wI(+~i|HMirXt z9ZSfx*S(;1b^KYrg_6&&llTk5?+P6r6I49}t`C^6BZlp{M*9oe-h_owJl+b7hGL9{ zo$&nRyJpuM;JaG&Sapmh^OJ9ULUAYee&!F-M~=SRuiu(hK|GZZSS)+d(7@w&M;~*8 z5z>~sR3|jX%0eHD&8c1!(?7rdX`}ZVQus~(OTK+~X<=r?f;lQ}McXjPuV!5yf)7dr z&b1<19pchGetX*$yayl4=ZF&dADaV(Js}W^mTFV8cyJFP0am#6JhRvN0xuiynY~G^ z1;C(?Cwp}d#j;*t59Sir3@-ih6}l3VOopUUi6fn=YU;3aaQ%B`pr)aq6QQR9z>*aP z2LQZc1am$i#4^!Q@68-)k8rErn$}<@T_!7|{|F!&@Ic5yV}{}{*SJf6F@xvIdlTNO z8mXv8sM~gzp2CeN&aaD%VQ#$5I;6TISu^qVP|q<+w7cOh@J{Wm`C-{S(wE!=@U$of zUK1x!%u9sXIPk9aj?NG454E~Ry#z^_e%)KrHup}CmUfCsddwRccC8ne^}y#H>1}9E zWApfNgz~~;2YRXhV?x&2K-uOU-1fy+{3so~vJ&~p<7cr2F;^n0AYgrR;+U$TLyC&- zLGIAwJ39}~gfu=Sd<3Ay5?Go;~;!@Cv`}em)>!qCRlx%(v)M z9Hv6U=?{9d8O=q%Wn%L%>zhKI{$W5nb*@v~DN#%WIWm``F7xW%7~qBt-+s6?m*j|7 zy@T7N*TQN0+zH-b2g(*F&7d)GQ3Hp+9*``5&W|Zkw-je9~}YWksH zFxR1$S~Vu)nBASt^EbTsmNgWkvLKD6V-M?<8DUs*-Qx%7Txm%JbydypA#$u&3QWEc ze1f0MS0a?nj|p#C27;QLn0q75Z_C>YJ)$zh?ot9M`ap@!yaR=s^=tvB1lL~}9yt%N zk=}JBimBnP>Bct$v8^Ji#z-1S$RH;`qi2bL1%V_$-$Q&1@-|vyLjX`%f)emo#=4M} zRJ+n0OmMB-Oy-hLRl1)pK(4KR?(W((e8>^kG`9PVmut>ow!V<*GQ(B-9$6vl8=-Pv zA0{I;i>>s`JJ+r;aZ5f&b$cGhpAEjq+<;G5R$w7IZcLGSrD=&?ZWqQOB=iTR>k z(r8;_FQ0;62lH}z7uv*CW2}bhg-T{BmNP)lcEXk1?`o5>9S{D?_Y(|ge#c+Hd?cIP z)Y=_wApcTnz^@;D@}F1Q<{@!3)a}SfJ%g6zLk;|9;)`#AJzO&5FAq7HFPDCde+pP^ zrj6MUpTJ0TtVAIFNn6J1Ii#yV&Cl>qlZfSv$-Gm9_K;$f)|0$L$oSq`F25O@&TXgY z1O*&1qjC0WS3Ne;H69^(Ll#XE>`KIGFvYSHf@L!zea% zFSz-&`%{ipjQD0d`PF!lXlPqD zN7HU3b!L~@=GS1kFKcr_ZlJVjA<$}v=9H0Ez{gg%47zAJE;J1x(=ykv-$GvIE?jdk zNmj_}&|*pSHkh7szsSU5I~~fb-tQqqZ73U+7NIBE-0t3pXYJd`nb4Xe_Yy^x*jDly z)-2X~j$LSqjo>QbQz=@?D%XJ7*<5L+(*I712&j7KrJ6yYaZPhDmCsiSj8!$8+kd#k z9eP_x{Ggl8e_R$2I@ErgteUMdSVpxYu@mJk+%2N3sdaI8VUvBUE_2V%3XS({ARQWp z(Y&Jt`X3!2@rIA3CLlLPGO|1r@ujj`LNC6m%iy!_kvb8Bcv@AH;pTF@%zPV0l9&_d zamUFC%Z8rn(mlFv9B-+e1n`O%;fbgH+=y9wDN{AE`Ci4FJp*fj>r?FOr~Qa3H8=d1 z-+r2EyIy9up?s4`ql!Ni0ZSy!(-fAP_slqdjhR;;?M2mO#)$=J4&U0jbC41_zNQas zCkF}$2n*1PkG;}0>#;J#P!_3;i$04*&C`gexInjzZ}s}qR{;?m3_iki8rO-eMqBn( z>ZIAG=*!(Y7nO@$(t)H~Su<{6(CtbQMb zK>J|w^?4grx=da9Hdg%)_RDWkx0z`qPax01Ih_d$#dpkTtN?=lW}oFVRrR8@IEc60 zrtp@dK--><5MeT0eH!WPNAzGetfH-0F!soO8!6y-OKQXBwe)S>lH6P8;Q`1LL+Kvt z?UkvaTJT9yU zx%FN#?E_dqzEEhtQ1CqV8wozF4is>p(nxM8oXzF9xouX~iU6RB~QzXd3tk$$SW_~sA`T0(oK zJ#$%LbjShm54m!$w6`Mom~$lNZB2>8dcwf}IV3@?foQwk zv&pfV+ep>5JxtF%2m1aI>kfGhdQOE3n$quQh57l}-#S0{ja}}xw}W4fdMl42Ss!s% z0ePR*2138au>rIhbDRN>*AWj2BuP*80EAgwrN|~=Gg!9|dSKnZ6fWdB;S;UQcY;N# z_&!iqpsl(1pX3H*awVX^Z3@YBcc2kk($pHvEckqZQBj&@ z_jT(k2NqpgmEjzP!5^cMAu@jZFiAZr_fc=hH*}hb7x{{AX-F^b-2K{T4Y@^HPnW&V zanmnLVR}wcl$hi-0EUBV`j3p#XcQ`r(8F;BD7lG=!0^@723%-Qp!}cS6M>~i4{mob zh&jr#s81ow<%VviIN<9fLXd_bS_pfcC>f2jdnT1`Yh%U)yL?M_snppdeLv)sFw!cz=~IG1-#cO_i{a-? z``X#g3!4_zD}>H$=I#qAb8IUqXBgiG8J3y;0EEbH#u=Q-(YrTvjz&KW8LRJiC7^8a z_!kIseO8cE(Rxm*fnwoO<`?1c{9mW`+zp$?h1A_@79l85!!V`z#=`_qx#*Icph-ZB zEAM&KL)q~8R~z`Uaz=9rh5o?wdTgzwX%B*w6Lo8$W^#YW-`#^KXmn#4emsv?sln?%xdyoT<{ zzUlL+EC=PqU`hS++^Y+qBTsVEaTO>}2|`)FEt*}90rR~QPH{F1#$Cv#0f{N6$}}B4 z;YnKR)eJ3`0B$3hq$PFM1_i_4ho?0+M^I zHT`Rho9;!Pi-Tfkp?i4_+)I#3IDN$F#+5=ID5;QgZ#lc{1);q+tgrykoP2S&54A8j(CHZ3Ud%41Skii@zgrowjh5&PdnnV}E~xit36LxV|r#3;|Nsh&V#@m%5%k0VN8(l{XkZ zN=4Okp9S6_o}HvyQcd|#sm zS|!z0XzRc$;Sjh5RF9#92d}r%|MB7E=L9OMPL+TA`ea&z$vnp)DtBtIO7MC-4H*CX z*-TYJk(Js;^kWXynln_L0O$;TNO{2I-Fj@i;RjD3REH}>KMcZh*YJD&`yj3Bwgq%6 z!OI?Cu3rbY3Pd3U)fX((RW4kldfP^s3>6g|Y-c0HvZ{_llcl0kdH@c?tTjJ!QMFy9 z9C^k%=n_>cK>9+TQMG|AxBJlfiU1&#D10+8x3|GSchfQT zp9qyY&1A^@9PBSxC)bt2OBYN#n(+?Ljlmtg1@367#3>@EzCRjB!{QBrgX<_e)yWEd z6KudN2f!~uzonX2QjPGZPdW-MBwo(~TpLR26L_W<1zpsA4vKj1_mTcJgFowwo`4#Z zn24_jvbfBG9l`_n_TT4l7wW$|8gNEo@WbY)k5ku|Gm3J{_wLKfDQjI1X=YceDu*z$ zu?iPS5fJEjb_BCe9#_cKaC!{@SZ+TKYY%A6Tv)PVog+c|j3wz2VuFOMGBDM}r$iEE z@F}&M>i86w!}GQNRon50KlqbIaejJO^SW8ptP?$z{-ChQJQy5V?N3Tx_Z#pa>xE09 z3Gn6h9@)}v|X<;240A6?U#YcZVxyL?{9k!6QQPIAT zwd>q{Y%x29d6UG?zEO{b&v`}zUP49n*T+heMxcZ-aB+Fe0(paTmF=51^ug(ph(#{P_;h}|Y zRGVqe2_WaQ|;ef$@2w{9|Mi8hsHD|pisrTY8iN8 z(_5mH0A3{2hwQF^^76PjaEH}>1h+EZS;;e2iXW?7vKz4t8K~s+?fa=Z)t3q;x)PjS zt^N$!?JIze8Pdi{C>@;_^~mp8>w=Z;!|00jO&zv)9%zB}FMix-1v?B6wW9pNZOytC zyjtpiWB|W9k=A8`iubVpzL1spIJ;UVJu%!ZZhAe@x+ccKq7K~K%}pJeP>m3IPR&iG_40h&wr6*06UkMBRtIf*0^=Txy2V+)!eMqu=Y3sjx2NANs>A2jqpmSxWnZ}`N9B*eLl?4E zdiTOxN()o^u}l}tH;KSca_4>2&6)Rh9m(MIa1qKLiR@7pib>(BYgjg zeOica|63EJrqTrKrRxP5IsTUgeCR@DP0X_QkX9zE=>uAdT>W<}kc62jNKiu8yjhL9 znwu)h@%x>9eiBwn0K*z8i{(;LchK9bO!)eJ^&Yty1)5rZWnFID-B(hxH_Fm|EX~j_QtlVp8xqik2YYz0_Fqo!W1S1aX)Y&DGvqw3~;Ic zuYXwNNgPE0?=K@%%`+a#>_UuIR!t{3!JzV4f0iu)Py$fPmUxIvo+N|Xeh46L+Sfjx z6g~Bu(G>hc#BoeGf;{W^*2=2&e$!(l0K!X{1NDTUL=W)FW@;(wkXvmXt=ny>vj*)^ zYq4{H6E{IsMV~->db{N|K%sJCq->vZ%XVxGvY<$?&A1_DoWCDI2KqkZ=rEPlFt|lQi$1s>o-W1>aZ$T_Z zq$gS42L@_mP<=ELd*#MW?olN7D6x}g)x^o*={A{XeE&Lhh&+@+HqHc}D7LNfVpez$ z)sUB5U7thBi$+j$wrQ7tFg;d`MrEg6cWv8&kKh@R2Ael19pg~3LEPx7U8VQ)XAash zPtOjiI=@tL6UEe{T}qTwNl6Jz^Si(=fOApV5!TVGhtn^~YIV(IrP`0F{ND|$8+GxE z?C5eBg6KOVD^9N1-xxbef|6J5WOf!y#FN059wL*+_Wd&Z@(k@@JtrVSlL=)q1TvC& z_J!i^olN%KzmCN(&T)4`7{$5VyuxPpA)8nl_vLRF>_|{nIKuX}I`h0x%|4mfQ3_In zo{a8{ZkAui?9E^-)KN?xf&^Ev&`dC0uOJ0QuzV36J=Kw*4O6gCXcv;xI<&y@!QP-M z(#Q<_0#;xblCox3q2+_U=`FCY0}G7`*AxLFv}O> zo#pdDg^Js6wfyFF<7?#(G?>xP01uO$djWf1voy0;@)z&`0rzW;`W?! zRkZ_UVvxEvC~+elv4m~DqdU+o0Fvy+?r zFV!yBvmC2hDEt7E-x#!tU%aCcDSw+9I+L=0+K7mV@M> z4u6I}ljsB9b#cF+4^mhfK^V;% zg)y=W(|-is)YH~I2bP+iND6DizBI?Iy<<<{ZW(WZJYiG?z-0OT3(_Qw;O|RFlYTUY z$XHNP+TNqZS$eruOOfo(v*4Zq(rc_?Q7tmIzpw1H@!kuImQeII&5oE+@f*_iOiITj zMy{MaTDL?)KwPDhvXAen{(gWrMZ41m2^5gTxPH8k6Oty6`Nw6IqvXrJ${_cZ&xw~o z={QZT!lDZ=X({_Q|1|B*;EIjwEn|_S1ieV|TW9I_@%lD2f)r6XeZti!{_mX3jENOx)S^7OEbMkcK?t!^ylF3 zd3m5>IDNqTsiBwvEf7S21Ss@^17iLCHy}{CC|8p5w(};1R4IRd8}Y|n0`HVKDbhFl zB}V7+^_=Nfv%6;_sou&fdoW(k5vF`ik86I57S?vIYx^?S0VSW8^H;9x05~2XzV}@7 zZ!jxSDW@F9YW)r^0Tyr*;$^hC<_>SR84(lWLE|so6H8h7McXrGuhJ$cquxw?dmml@ z?re68x@V=`<8ay<#M?XP$MDG z=F*EgAdIV3(C@#^l@8yBHpX7QpGj5ytw$P)&OXVt4{Q!Nk&I=!Z+Zz-8V+=`Rs%nn zCQeAG{NoYnQnFM?E>XLP?(**g@ukAy^JaQdIGK>w^xrg@bs1byrY>$WOXaK(lQ#(` zGA`MU%Agi*b1ytj0j4qn(%O_DvdV-CK@dRy9wpFIqkRnsuFHBR3{na)LbXDpzNbvg zOx}cEm3lFQm%H-z4`-RKXj4I+n?v6BgIJOdy9b8fhRLIN! zGpkJxVRQ?wDo8t7$R(}(`wwBq7U5es@E2LfmOP4OThJi$g^#-QR&V6TGycBtFA5tMTVEUM5{pZ-Qp*lX ztAu3~Jz8nv-2|x;ETfAzy`(_4$(1vcQ$mrhEqH)l&M!<~=gqas%y!|>Q$d-j^h@18)}Toyg_VI2lk(sANm8ouc)= zzL7vhxn7Y{g%`ahBmDWP+Wul0+p52$umB?aC5JlUt?2-`A~ODGzAgaOfADc;DtiNc z3WdcSF`Ahf2z=Mx%@Fz>gnpF9_BJX?P8)!{9{}SM{FWO&;x48q>eIo18UeLBT)zdK zHwL8#I+a^1NlRmUOF|aH?S5varY`6YWt((P=GR!3#ool0+%#aRDAPb7`*#;cS$$Uh=u1k{^^K&e9dJQgVz*2as5=-JJv_jN81M>J2-63_-9@c z?gtaIg)6*K<-(91^teBQ=gA)$rUU+XrVzo|6JUDW-EUNy|1sB_F9^dwdLtQLL_?)) z5%V{ZhL(OF{hu_z_u|jzv3%u^)Jo2|1prw3QMLo7gdOKk0u3&CVKpTM&FZ z_Fqo|Z+a;5h3}(G6_5{hD-UMTBkWAN-R7Pn5}gnbHa`G*h{GcQNXzBwbl)d$+Ia&r z=|baZ3EH)4E_2jpbTv62IEta2B@gVAEKB|$I*4~+?aA6JAR=+v`b!j=&0UPH8H>n)Q}F7Ds&G8Rvqz_b<o}(|d8K(ncE+)OLN|i~g>Q+ z{E;7Q<`RXsWaiR8Eoaw(|!rMRzF;7p1gqB{dC6vHN}!oPi@K66s>W8eXljZ zjT=ioPdAaf4V@{;uq`0nM3lX zt?K(Q+oJp2!G=z_f{~bf;fiuUfRurh3aN9HtD91hE*)&We$C^Chq8yw#Sp={5b^fe zI@#a9#@-JDelqgf=dOnnj)2Vt>^m{{WqWXffx{t21NiU2vEubS zmpbys5vWY8_)xhw$N2E*MtS}luPcSV`J%?f>2)gL8Ep+Ad1G6lV~s!h2=-1D+)gb8 z=&>TJ1|@x$hd^_H+bw%(Cj1YNB)FL6&_v&{!Nhvag2?fz*uPx_3TcmB1^n{gNP8MU zi0Nkb``i3z^#RD+xeISswHzC5-OQB}q=4%%kp{^)l4Nv4`hNKJhRGDzWJ|)a-}{*E zl;35rQBmkas}In4so{cT%o|>#oG@di$hxjv??clq%oRT+ARb5O(B7>-(Y$YNdwA*U zlC4E8iUDmIy)(Fc6L%AQ$<+by?0@ZLe}U;Si1fvWkYvc^^CbGQ(PyzU^;o(~JEj6v z?~&&k<@7QcwAQD*9oz@_CX{B4c9*^R@02OGTlDO&@(F<6CG7!EK-m2o@IjIKo;Yuj z3o1sdlr}~UYge!o50VckpwreIe{4d7Z@i*TE%b^Ii(&3L6}ODdp!w)j(p#&b=69qY zo4W3YGAGt~k_?Bc295%XT+g#if+&(yi~K{8YZL7ISAXBke`l%?1^D`lIUYt2J=7)N zuwW6ww$TO=EEmq~I(gY!s(9%Q)n?HAcUg8iBribyO`Uqh*DD+vmvq?Z&k%llcN1o@m)e51m-@h z6NFUanq20|ZTl)0%;7f~rvZ{@@2IpWZ4li}tptQBr!1Vgh4(4UkV6%XEABb0t!^Hq zXp3g&P8tpA-PE2o?92QComrourTlB*ZWrC7HO{CF%>E{(Bc1UP=-rio71yfQm-&3f zwfXtorgEn9hF((Qg!%^i($Edp;aN*dUK)&0roF6X1*cW6^N7<)TeF&ZeS9-SIIfK* zKm_hUx;lu`mcDAN{4H;DS#mjZ1pyPHPKvE5l)M^_s>ixomZ5w1W1DEBqXs1%?Rsie zQq+;W{M>E0G~ay0ST|2=rA|9-*%ZSV$`q6YqM~{Ln8iK1nW!7w%BZ_Bu}aK)1NCW% zcT?i##^9e;sdMOR_aSg+L&F6cvM*Y_1rP_FC8eDqymARv1#umEgYu{L_8mG3L zes~>7ePR1rd#ovHR8E=mP;iJKyP*nC#JHODHcP@I7_8vEiJ)qx zuNi;SMHLI<+V=FFm%omITch8XEddc=z%e24M;mI* z14MiJY?&P?q$XwhfWjD)%~o)Aaqc8@l|^~MWDbFso=i3P-%elu&F$^4{7D48XeE-R zNXz%u1)uUIGCp>1pwYh9P$QZDlCO@*QOxNszBeFY&*SXQ#q{Htl8AZ}l$k!$2-iwT zTJCa;p=aLc;+Ew)ZY~%NVh&ZH(2cX%+^%2ZoAL~=CELC(Bu59|%Fe)Y$mepboc^gR zC<#kPiYf9(2!RT<60hl)n`DrRBlG7{w!U6M#i>nbY(=msNDFFQYKcy~JFKZ1ap&xK zIRD6~Ma8CSQ%b;|3n$o}4?Qm-ZF`1`uP!#Vpb8wq+xF^G%%RsMs&&=0xx zC)9=>{HG%A>KcBGiYkd4d9Xm7qV#8oFd8bP_Jr1OKT&yQ0S0sSY0`$-;^+2pzZ#-l zT=C&J*p3Saqv5e4;``N*bqZkw;5~7F1-Apa1N9YI zwQcQAb}carO$!$mC2~&f#mVih=L-0oMgqV%i0VK7AJw?(wi&e|mqp$Dt2AZ2^ZSd# z7O)k%FLJx01#=r4RW*hxHXW%BziBMzxZ>x|%=ZFE7bVDJR(MUD#ylzgg%Es{zYiu~ zpyzI7{=ZE^b%wpb7(37&BRqiH=vJ5*zU6}y2p~R?+cRnjsaz2#QNE%3Tc-vL`0t%4 z1lhJWQ&CE2)96{O@Ia%WCUsnmg>Bo>x|%&=(Le_1`9!&W9KJ$?<6mSZ1n@$Zc09%t zxwA$ye+2bYx3aTi41{uU<*@fdtBV&UnzVFVzqk^c1Tmxk?SY>~cXKR{2h-K$N_F^O zb8yw(_KP;YO299_{yDIpo3kK&A_?F8y4iV5`M25tKf$gR)R6Y5(EGuiG2A4LKwUVD zr@)eGkT?)7Q`!}w!dEWbKD;X^|Mz(Wsb6pub;K`ufOBZ5uJ$NZB_B^+upZX9sF+)h z7|vfUDz8RZ_Uiq%@oq*bEu5kl%Q*m``D=KGWB*?A6^fZaC86G0e^6T*d2K4_41yaQKa)fY_i% zAmq2n?v()AQUIEGp8NHjXp&d@D;o_N5_dTK0D|TyW)bHy>u2mBjRrFL(uVv^o6gYS z{g52KLTlAIqMWapiWG+pxG@zvCZ2vt{8a}8VcmqIe{WQCL&XaVhYxeZyC}|TKbAM; zMWmlgHE9Z;D(W)n$9N~Pob}rl2^24((g6oBmpjpiO?U-7+JK;*2AD++g$SMd3|Ks| zZ1#e71dnG|=p$>GGN^pZsw_3XH=x62n&>($M$w`57%UYLxE~r~xXN27XH(XNF}}iX znR8^vizCl6*6RnD$flV;&oh~U9PkrW$_XX6&8DKJ&t_(uuc7_pSuYbAyA>`At6_FF z2BLp-R5*`yPj?v{{14KyH{%~)sXM6Q)nw|(=OD}N9?5h$GmXWL`>w*W-a)&mtp~q) zlGiMr)h!#wlR)8&5`~C2FAksmvvpnTn<-Jv08%F!+$G?K?PszK>yr&O9p=gXpyHuw zL)z3c7=|PoFlZSKzdb5xqAyF#Nllj=m^rSn+4V4@$R^$Isb~Yi5T{lSHq!6G0 zU)Zi)@DB|swb8R08NfH*#255K*F&jjE?f}5XD^Fcjn>%VONbog0ameQx-`JE+jsA# zW{<2XKF!c!!Ed%q9(T-IB^}0I-gTQoMepDA#?Ky$kfbXHu={Aw; zNTvgoP*O=hT7K;?>X(mhddL?@0r(b$R{{ij#%l4ye8lMZ^80}F;vnr|;_z+tC6_+d zYr{JmQf=b9KDbWlbg$m*Rr~)Wgogko2lAu;c7bFFf+QHle`yVW$OSm!kpR(L1(K+L zv*)Ljs@wmI2mix80layl6rVNy>;E%%3y364AECJP4|W5`8pW@u2mDk#kUk7aU&@{R zRQ=oB{Xh3_K!Ch|@*T*X1Q5jTV0_5(fe?OR^1#RbZ!`zeYofs21N``iIY*l@1i*uQ z=@f1H4>SLFUl!#CoNqn(|EJB#YucKYsv82W1y=*p2jpzZyEYSD9&As=ZjL>#0^?d*fFe*Ej9u*ipdMWyv)F}t)PBxJGs3KMcAeDO`ri_d3e$!?mu~Z!b$j8Rp z#pRzVqoWkDE;!Qh^~h-<z>C#iwRX0pS1()(pz^b}omBv3A00I1H_!=`CM*4>e1=H1MpPD_5mMBaY=sB`9SpDCJV_r8uLRN~6iKu66 z1xfOMbn`z{20uB`JQi&jFEX1&P1?WVX`kpeWmU82eoYbC*&r>|GI9`rLwjekMZD)E zwBpfgV^^IzbHF4Mp{#X7fKT-Y6Tk~7H zZhY$fo>mRxV=T{`yZ6vOvRP?FwZXF2YnW8>Ldk}A5}1?D_Z>?@%h#7|vFXIIJ;nJC zdP7x5gAVQdQ1mV%UOE2pNA9$^%r{2csQKad)Dspo%K_BY)C04cCBJVH&C6#5RRSb z(6lj1!C{uxQA-YMY}>^vzjl6+pDPxkY@kfH|E+j}ZOvIVxO*ET$i%GTy^5dwyw`qX zr!>M`rTN3qdP6=rR6tAJ5%n22(O;X^;qAbVYwq%7@H6Ok<9jc>%Eo_s_hE#4tJkX> zI+fw}NL5Thx{O1jTgF$D;yN|%2~slJ^n-M9ok!A2r$SnpUiF&)=$_vxzK5#c=CWmE z9Y)0IQyk>H=H$m>5G!m$Z8jJEO~>&i>2(W0Lx#8~I7A&;E5E?>3I6L#+3387?K9`R z&?;=wPseUGT+PL>_wgbL6^t_bY4_JgNJQsI;?0>io_@O?oeu>%EDdwA*%=|wFV*9J zW6%lD-E5>{A6n&EXC7zE^D|(p`_*LhY%C?61r zxrZ9Z^#jxQhc_=fU}T8LF`Rf9_Db$4^1|kLxb*c`uA^Q#LUrt9MHqg!?L+OCM(gaT za*)}Hp|A6r^{GIgKlgdBzby3EPS1L2VmcoWm-$3ApG!AmpTBQ)_Tq80)9}rSUinm_ zu&9Ec-}3OJpPGNloL7d_HLn@0%fx=n8W|*4QskCHf?Gy8hhxg>FLW5m#BX%3-AQnF zufC0T(QjCFKi7=y0~TBU2_>BtR;x0*UVR@*`ra`Ns^y%zq-e4dv3E*;EHOKgT;*4EelXrAvLz^AOSwZLrd^M!Xb7x#Iu z+7U4BLVc5N_+Az29tnh&{n}^oc{j@E_LNL9bS}nOzR@C*!yyebM zd4s^%TEjlHpJOSH-%#58Fw$ueQuqQ^W_8p@$=ftof2o+L2X+(dz|H50bk>`DcMX6~OP$kbk@5Si2 zyKGpDww0xBt~RTmd&3t2^X+Cz3fB6$g5VgBDs;!_w+l*e==gSuD{W_ss~F_$U6Vx%CDnZhe1674^t5yM)SZmWA0^gK#aGjOb#i)z9H88sX}gVV}s_7Y^M9 zz||2Zzn3ty-j*j7I5vs9VQ(vyXV1Bs<+?fb4*DD302F$H-k zpBiW5D4?TJ7`3C>b~zm1Azo5HL*qq$ic$-^v?%YO$FkTVs*+MR!BpYeWw$yS{43DX z+hxO>=@4RYBeH>Acc>WDf%_+W1~q5~a1j^xru%udHWJ{c33x3o-2Gf0bNzB;*Shxg z`3@;jx2WdV6*Y=PzGdX{D+kBF^o5;t2+rV#J(i!zp81Sda_esgHS05t(g!zpIc?OI zi+)aN$VPLnF{a)-mPM?q)i%oiQt4{XZ287DEn_ht4eHlKA{D`Hd>QXLR!1 zN`$OQ#aMw1$EUisrb$t%5p%IB=$RMwSsLf$_br%K$~T)*H(bY%5zW!qi$pk7DZsLK zY27_Ho?T)x%O*PX+K6)YsS#kurIcm`HUA&QooQH;*S5#4dQ@ti0Ko~ch>$Rff>@M6 zOAwME21GTgV9 z->hDI6sJx_f?Ode%If!w_&qjw&vD+=$@UtxKx2MrPS2GO_z}*HE;73YW@QJTcCpMS zrLOrrW}N>H?0S|Ga`TWo{KQNZiuXDlBimhIoErJ3I6mz|O<=cCVht>Hv<(s2J3Ukd zmcw*2Ge|*=^-uzTIP-3~wKn2K0K%+A<5gKqzprT>;~F)#W*Q_2p!Jxvx~rzd_NXhi z%x~R}hi;lC*wa&&KrIaDT?Lopmgt!Mw$a0LjCfzEU(x^szmGshr&oI4cqSCn^Dwx8 zLFi-#m3mmrglT_aALR!!$L=-F*iX_LZVtM`%fl3Kq=O~bs^PcPvK~GWImM#S+t4=j zAM-wl<#{LhK4z$fd{6L<-lHheq?`6{YMApWGUzupUt!>;D9)LT>?n&L;k1%F6d5cQ z_e>GadmEyQck6*&r#Ok&M|W>vfB~81F&L`2r;5nHcVU_vg|D6{Dn)5qm%_~={~e8% zO+ouMc$)4)fQ&|eueIT$tH{>L`G;Jcz7fSSW_oEx`oTGr)aUPBbwFz-I~QWcv;@Ci zA(0ofhhP5t{kXD>pMIehWVpT`(vh;J@pclUadDTkQ1P|8WAv;@v;^j|71sHw+jbMl ziP0cedvTwV03%|agMr--FW;^i%H6Oy>?hpnHsvH>2z3b!R%drE4!WL_=4?=C#Qain(?Ruepx#;Wrda2&(^9wHJotlHQSZ9uR_ZZr zHBZ}1&9GUD2eNUk0>PT(8t>XoMt`?JP7lbqf9(UA8675?igEUQM5RfaHxX4r8VU4MArXn3;(oiJzFqAEkGiYlJ(~U9sNVCvhk~n_ zj$KB%i#H}vbFFn?j|Iv3X#7xr}-3=x~Ib@AAd*)MIlnGN_5P`W zf($cz!$uv_n)ngemhwz*l3B66o}xp;^$&sYVhJ$frCn4{Q3{$5cUrKFjxcMmv&50e zi%w~VD=y`N#Iyy^Z@|!8cq69Bse!96F>OJ5NfYOJa|z6&U%s7XgZqBMpLpy2_qI)z zfQN#C92pW=^as3k)Bo%WDa4iCXIMHNN$fI$W5vu%YkDu0T=~HrBoQt6kJtN$c#ghwZjarW%XdrFk(B(@ct$3YVBp6;`T}31o%tR zdNyf7XF4Y?_3V5hIk>Jk9ON*0Wo&WpQiOVVt46|1-3;cKftGAZwR1FrH;yXENE;R@ zeZ*c@N|_%C{6oC4enMVZ=Tb-)A#-A3gQtDuzo0yL-NVF96(IqYty-xJ3anq!>u6Su zY4ARQv#p6goSj*MU=A0cu4*$zz^Sa*w0&I0+c27&vV}WmNYmqwX!oln0jKxr9fwMi zTNmMgiFWmdQWHO2s{9!cKA8b{gVB!wt*X} zk3C^#mTj`vyh`TsbAUyzJNvH09Gjx-Z9mV=(^G%a?GBylK0TI^aW*2+?_u9p{y`6t zy+%yE>1Z}~!8nCJE)u9me5r@!xMTUUPH{+5(-&#*P9z()>`5vLti4+MFT?dys{(%LG^987H;TS5$4HEIY zQQsln)$GC+aC>^EdRRqa@G3gBZlo$RKDhNDFXz?6EEs!1!`a8pV)AGs%Mknf&viL|AEP6dHXzW}MtHCat|IV01TF4@ z?5n}IuTY$`GY>VsEsd6awdd)AwBbcgBIj1;PYqi{;s&pej7A%mKE-`;Vg}_TFVK7W zZ1;zaS$?@OK$84gqUFzXXz@8@+u_CV0IqDpRfq;>t@b_H^mzaft+2Stx^^()bX`h9 zHOP~0q|vwgr@3BmQ|+Q0IWYUCI-PhtdU4dCAwY)Rr{aiMhgID`Jz&=*w4ynlk5(s`l;bZkO~?JKFVB zh~BOUPJ*#%j&F=H+?Se1Dea3+PBjOr<=wof9pzpcFHdJp|LWg^`^k6lw7$b~{0`~c z7~<)AcQi)Tv4}D+rTL1E|BENiW%Y^??D`fCi?_9vp~ffE5}$((&wY+H+uI}tN;Vr| zCC2dJ1{ZP_pyaXc1CcHgn(B1H%6iz|@~N(AJp&{~pqKT@;g%>IGe9bWb}U>VEo#Eq zEhl~Dz)&M#MM3b8)@M<)J#RR@VxTCVUn6OzfVA7FN4dpo{LMzEiV(vwqcB?!s}U8# zP1M=V&~k7!eAGia?78~fi_!qZv&gz-u@Y@7?JJ0qx_$UTx3Vt5_^L!@zfb=?06Kzh zBUf308;K%o)aGEZtoGujalpkb!<7s-i*?Yboq-O#MPF;^QnL|Im|9#P4RnoW+IN{l zdj+(*S?|J3ygAqYVwrVw1udfY4(ug>3ioLHxqi#_#i007&I--Fl7#M*nR|_$=|CEXbu8O*X+z~*IuXVt^HW0uIDLk<20kb9WxL^5a zoM<5^vUx|E$ZJQRS1{zW-6)ofRoloA z@U$jC&;b!NRx48G^GPzD-?9yyZ2kl9C_o*Hk~epL8!Mlt3jMti+zIJRRct?DA|V*_ zb5IWMAkfbqs{fRn8uWDu)^3D&z>CS`)8-SwVb2n#NDCtiZwxO)Tsa?8TBQE2{$Hh_ zFWzN-U06xO%=7iSTSF1Q=XrWv(10ir#qqo^g-CuBy~4Zu4@lqo=2tfyP5*R z@KunUEXLj6#U|B!E}6tnZe6d(dx3_Oi|hGRy=f|%G;xwd>w_9v^@71n9fIGXLACc~ zX-Z@<@@^TFguMhocb$r=8bH33&nl>ACIos9I&KtT-^EgD*#WQq?I7ppPR@WesmSl2 z3*Xu@$lLvR;u7FA*%fjXA^rEW4&kha2hOuYXS8=UUc8%-Clv_L(Sj5eP}ZPp?}!mc zz>x~)l^OgTVvhOkTS!N0ArKl3N(7k7M;Y$7b#JtYP<=X>gN_Wq3guCx=`d0R4iIQ- zmL*c?Cfq*p%8s&*3TAM5Sb0?7aWaZ*ZvDWvc!sa3# zp!kOJtqo3zLpQ*05%BOhA*#A;6)BZSs=f>gBH$00tX9uFO~tB<0=oXJ_<#fzBz2>t zNPkdU2Z8Buo)?}lIs3IijZ>p%<>~{0&I%0#goFB~$3_j3)=S^mTQ6Wf!OVp9k?m(4 zl1xeb`ZZQ3XIX(nje0=*Y1J-?txhFv@$UU`L)WV>=Jze~Aho|_X4rbKXAM2S=47_( z48@@(Ane>ptkcTTW>DxJ);R!f_HY^5e`xh8wmzWyZvH@~0TB8to21|fWf3&?cy>jI zBtha#O)F?d;3liRze_USTe1D8oRy1s>sD?j0kql%bF-!v)TKO44WV~JW+4H{rsPHY zfNmKNOq~-ggme#_0)mV)B=LmcaI}ZH+PkgYE}2F0RMU0%e~%~Ya#PVI)DqBn@sRgy zr%X*ka{l);g-!U>S{PL|q#2g3|} z08Nf^qH#>%3k{Jnur)%4m6E3M4GknonHM70I@rY0VU$Hr=AE9bbrf8I3M_Ti z5|s?Nx+Nd*6*WhO9c{_!E>jWCU{QK@1A-h!T^r{XB7fT~axH%cnyL>>UjnFQvbO_! zP6*^qTw=C;1cH|Nd+TF10k;EL1!Nrbu9D0|{LM3#+g?%2O+IrJE17ms@Z-!_M z0JsXN%#yqav;x*MUg87vHdxcp4PW;&hATZ;%QCiGFlm>gyXr5-U6d7%kuo$waz+8Y ze1i;(xqcC2PLo__x0iv9U0|A~A)y8vBf7(Vc0&ZTI4kDM$;#j0@6OoY`DPW?#~CQ% z+ydIcgVRp6_f$Fcr)JNjLlH9KO5r$AvL1fmUIzhw+Z0t+21J!p-h;a1uMb5iXEFvE z8rAXV3g!&6U74u#_$Nrm76&B|6f0p(kmf^oI8XF;l2BQ)s8jkIil9YZ3=3@4YAv8t zsFxQp=u|XWl?XzMdN>Fz9#Aw`HWN(xOkwrYcRPE)?hz5lLj+q@9$)2ci{gx>WXb}E z74_K~jDfm`->lMi$*u;faG-B{m#aW^rxC+*RJIm@j5GRhf)yWv;48H|ev`iCycbAH zA!%c5T~6U>w$mtfwA-<^qpqWYIV@04%GElJB)UnL7Y}LqxgWK2kg~AYwdisTvxhs` zLS2c>e>Fs7vFDJ{YJiy-rvN=np7Cl{AyQ0#F$Tm0+6p4+;^uG?AJEMlclD?!YX8Q0 zweG#~pC#a6CLS3BCE8m^iI%pZtFl8Lh;3RSHIjxhzNzxQg+{`KT6&`w) z<5`yAgq!ymwllA2Vp3eJ()A;Ln31tbPG@eSK7#Jr5?QI2|3>?==dIl*uug8Bj+Gr% z9dDR1<%#8~fwtrxqfqgruTQl$qcWAhR`IqxbuqM}!TGB=l-`B0MZioYv_21ppnNAJ z;c*mVQ~zg$a(A3<5=CMbPcpU4%<`*p{z`expX~6S7I6M}aqPiA`KqFq+J=y?TAvee z4Rp+l02&#A;`I9Sq@HM!bS!X+w59W#%u}4r$z{k* zFOnNNmYC<04_SNs<~^~-A^)$x2jpPEpWB84eWLIOihvLme%>W$=`H;HRYk7;EyjXO YMY@dHqk85A=>HlcP8npLxbVyW0!NBm761SM literal 0 HcmV?d00001 From cc8b83a8e85bfc65974cf5e86337855cd4724c1d Mon Sep 17 00:00:00 2001 From: Eric Payne Date: Sat, 16 Apr 2016 22:07:27 +0000 Subject: [PATCH 19/26] MAPREDUCE-6649. getFailureInfo not returning any failure info. Contributed by Eric Badger --- .../hadoop/mapreduce/v2/hs/CompletedJob.java | 9 ++++++- .../v2/hs/TestJobHistoryEntities.java | 27 +++++++++++++++++++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java index ad4e6bc67c9..4deb9ae2f0d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java @@ -142,6 +142,7 @@ public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job report.setFinishTime(jobInfo.getFinishTime()); report.setJobName(jobInfo.getJobname()); report.setUser(jobInfo.getUsername()); + report.setDiagnostics(jobInfo.getErrorInfo()); if ( getTotalMaps() == 0 ) { report.setMapProgress(1.0f); @@ -335,6 +336,12 @@ public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job } } + protected JobHistoryParser createJobHistoryParser(Path historyFileAbsolute) + throws IOException { + return new JobHistoryParser(historyFileAbsolute.getFileSystem(conf), + historyFileAbsolute); + } + //History data is leisurely loaded when task level data is requested protected synchronized void loadFullHistoryData(boolean loadTasks, Path historyFileAbsolute) throws IOException { @@ -347,7 +354,7 @@ public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job JobHistoryParser parser = null; try { final FileSystem fs = historyFileAbsolute.getFileSystem(conf); - parser = new JobHistoryParser(fs, historyFileAbsolute); + parser = createJobHistoryParser(historyFileAbsolute); final Path jobConfPath = new Path(historyFileAbsolute.getParent(), JobHistoryUtils.getIntermediateConfFileName(jobId)); final Configuration conf = new Configuration(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEntities.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEntities.java index 9608fc8a3ba..c6ddae52ecb 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEntities.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEntities.java @@ -19,14 +19,18 @@ package org.apache.hadoop.mapreduce.v2.hs; import static org.junit.Assert.assertEquals; +import java.io.IOException; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapred.JobACLsManager; +import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; +import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.JobReport; import org.apache.hadoop.mapreduce.v2.api.records.JobState; @@ -236,4 +240,27 @@ public class TestJobHistoryEntities { } + @Test (timeout=30000) + public void testCompletedJobWithDiagnostics() throws Exception { + final String jobError = "Job Diagnostics"; + JobInfo jobInfo = spy(new JobInfo()); + when(jobInfo.getErrorInfo()).thenReturn(jobError); + when(jobInfo.getJobStatus()).thenReturn(JobState.FAILED.toString()); + when(jobInfo.getAMInfos()).thenReturn(Collections.emptyList()); + final JobHistoryParser mockParser = mock(JobHistoryParser.class); + when(mockParser.parse()).thenReturn(jobInfo); + HistoryFileInfo info = mock(HistoryFileInfo.class); + when(info.getConfFile()).thenReturn(fullConfPath); + when(info.getHistoryFile()).thenReturn(fullHistoryPath); + CompletedJob job = + new CompletedJob(conf, jobId, fullHistoryPath, loadTasks, "user", + info, jobAclsManager) { + @Override + protected JobHistoryParser createJobHistoryParser( + Path historyFileAbsolute) throws IOException { + return mockParser; + } + }; + assertEquals(jobError, job.getReport().getDiagnostics()); + } } From e6c0742012ffeacad2bcaf712d86a7e5d1420b26 Mon Sep 17 00:00:00 2001 From: Xuan Date: Sat, 16 Apr 2016 19:39:18 -0700 Subject: [PATCH 20/26] YARN-4965. Distributed shell AM failed due to ClientHandlerException thrown by jersey. Contributed by Junping Du --- .../jobhistory/JobHistoryEventHandler.java | 9 ++-- .../pom.xml | 6 +++ .../distributedshell/ApplicationMaster.java | 10 ++-- .../TestDistributedShell.java | 50 +++++++++++++++++++ .../client/api/impl/TestTimelineClient.java | 2 +- 5 files changed, 67 insertions(+), 10 deletions(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java index 56907431d16..47d23892545 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java @@ -77,6 +77,8 @@ import org.codehaus.jackson.node.JsonNodeFactory; import org.codehaus.jackson.node.ObjectNode; import com.google.common.annotations.VisibleForTesting; +import com.sun.jersey.api.client.ClientHandlerException; + /** * The job history events get routed to this class. This class writes the Job * history events to the DFS directly into a staging dir and then moved to a @@ -1032,12 +1034,9 @@ public class JobHistoryEventHandler extends AbstractService + error.getErrorCode()); } } - } catch (IOException ex) { + } catch (YarnException | IOException | ClientHandlerException ex) { LOG.error("Error putting entity " + tEntity.getEntityId() + " to Timeline" - + "Server", ex); - } catch (YarnException ex) { - LOG.error("Error putting entity " + tEntity.getEntityId() + " to Timeline" - + "Server", ex); + + "Server", ex); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml index c118603d5e5..dba8fc0a4d1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml @@ -131,6 +131,12 @@ test-jar test + + org.apache.hadoop + hadoop-yarn-common + test-jar + test + org.apache.hadoop hadoop-hdfs diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java index cbe03480550..2b85ba8dc81 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java @@ -104,6 +104,7 @@ import org.apache.hadoop.yarn.util.timeline.TimelineUtils; import org.apache.log4j.LogManager; import com.google.common.annotations.VisibleForTesting; +import com.sun.jersey.api.client.ClientHandlerException; /** * An ApplicationMaster for executing shell commands on a set of launched @@ -1149,13 +1150,14 @@ public class ApplicationMaster { putContainerEntity(timelineClient, container.getId().getApplicationAttemptId(), entity)); - } catch (YarnException | IOException e) { + } catch (YarnException | IOException | ClientHandlerException e) { LOG.error("Container start event could not be published for " + container.getId().toString(), e); } } - private void publishContainerEndEvent( + @VisibleForTesting + void publishContainerEndEvent( final TimelineClient timelineClient, ContainerStatus container, String domainId, UserGroupInformation ugi) { final TimelineEntity entity = new TimelineEntity(); @@ -1177,7 +1179,7 @@ public class ApplicationMaster { putContainerEntity(timelineClient, container.getContainerId().getApplicationAttemptId(), entity)); - } catch (YarnException | IOException e) { + } catch (YarnException | IOException | ClientHandlerException e) { LOG.error("Container end event could not be published for " + container.getContainerId().toString(), e); } @@ -1212,7 +1214,7 @@ public class ApplicationMaster { try { TimelinePutResponse response = timelineClient.putEntities(entity); processTimelineResponseErrors(response); - } catch (YarnException | IOException e) { + } catch (YarnException | IOException | ClientHandlerException e) { LOG.error("App Attempt " + (appEvent.equals(DSEvent.DS_APP_ATTEMPT_START) ? "start" : "end") + " event could not be published for " diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java index 65360508caa..2b46fca4b45 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java @@ -18,6 +18,10 @@ package org.apache.hadoop.yarn.applications.distributedshell; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; + import java.io.BufferedReader; import java.io.ByteArrayOutputStream; import java.io.File; @@ -27,6 +31,7 @@ import java.io.IOException; import java.io.OutputStream; import java.io.PrintWriter; import java.net.InetAddress; +import java.net.URI; import java.net.URL; import java.util.ArrayList; import java.util.Arrays; @@ -46,14 +51,24 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.ServerSocketUtil; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.JarFinder; import org.apache.hadoop.util.Shell; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ContainerState; +import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain; import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities; +import org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster; +import org.apache.hadoop.yarn.client.api.impl.DirectTimelineWriter; +import org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl; +import org.apache.hadoop.yarn.client.api.impl.TimelineWriter; +import org.apache.hadoop.yarn.client.api.impl.TestTimelineClient; +import org.apache.hadoop.yarn.client.api.TimelineClient; import org.apache.hadoop.yarn.client.api.YarnClient; + import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.MiniYARNCluster; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; @@ -61,6 +76,7 @@ import org.apache.hadoop.yarn.server.timeline.PluginStoreTestUtils; import org.apache.hadoop.yarn.server.timeline.NameValuePair; import org.apache.hadoop.yarn.server.timeline.TimelineVersion; import org.apache.hadoop.yarn.server.timeline.TimelineVersionWatcher; +import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.ConverterUtils; import org.junit.After; import org.junit.Assert; @@ -69,6 +85,8 @@ import org.junit.Rule; import org.junit.Test; import org.junit.rules.Timeout; +import com.sun.jersey.api.client.ClientHandlerException; + public class TestDistributedShell { private static final Log LOG = @@ -77,6 +95,7 @@ public class TestDistributedShell { protected MiniYARNCluster yarnCluster = null; protected MiniDFSCluster hdfsCluster = null; private FileSystem fs = null; + private TimelineWriter spyTimelineWriter; protected YarnConfiguration conf = null; private static final int NUM_NMS = 1; private static final float DEFAULT_TIMELINE_VERSION = 1.0f; @@ -865,6 +884,37 @@ public class TestDistributedShell { } } + @Test + public void testDSTimelineClientWithConnectionRefuse() throws Exception { + ApplicationMaster am = new ApplicationMaster(); + + TimelineClientImpl client = new TimelineClientImpl() { + @Override + protected TimelineWriter createTimelineWriter(Configuration conf, + UserGroupInformation authUgi, com.sun.jersey.api.client.Client client, + URI resURI) throws IOException { + TimelineWriter timelineWriter = + new DirectTimelineWriter(authUgi, client, resURI); + spyTimelineWriter = spy(timelineWriter); + return spyTimelineWriter; + } + }; + client.init(conf); + client.start(); + TestTimelineClient.mockEntityClientResponse(spyTimelineWriter, null, + false, true); + try { + UserGroupInformation ugi = mock(UserGroupInformation.class); + when(ugi.getShortUserName()).thenReturn("user1"); + // verify no ClientHandlerException get thrown out. + am.publishContainerEndEvent(client, ContainerStatus.newInstance( + BuilderUtils.newContainerId(1, 1, 1, 1), ContainerState.COMPLETE, "", + 1), "domainId", ugi); + } finally { + client.stop(); + } + } + protected void waitForNMsToRegister() throws Exception { int sec = 60; while (sec >= 0) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java index 39fc8deb3ae..d5e186c9b6a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java @@ -298,7 +298,7 @@ public class TestTimelineClient { client.connectionRetry.getRetired()); } - private static ClientResponse mockEntityClientResponse( + public static ClientResponse mockEntityClientResponse( TimelineWriter spyTimelineWriter, ClientResponse.Status status, boolean hasError, boolean hasRuntimeError) { ClientResponse response = mock(ClientResponse.class); From fdc46bfb37776d8c41b68f6c33a2379d0f329994 Mon Sep 17 00:00:00 2001 From: Wangda Tan Date: Sat, 16 Apr 2016 22:47:41 -0700 Subject: [PATCH 21/26] YARN-4934. Reserved Resource for QueueMetrics needs to be handled correctly in few cases. (Sunil G via wangda) --- .../scheduler/capacity/LeafQueue.java | 7 - .../common/fica/FiCaSchedulerApp.java | 2 + .../capacity/TestContainerAllocation.java | 188 +++++++++++++++++- 3 files changed, 189 insertions(+), 8 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index aabdf9c286b..fbcb91c453b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -1348,13 +1348,6 @@ public class LeafQueue extends AbstractCSQueue { // Book-keeping if (removed) { - // track reserved resource for metrics, for normal container - // getReservedResource will be null. - Resource reservedRes = rmContainer.getReservedResource(); - if (reservedRes != null && !reservedRes.equals(Resources.none())) { - decReservedResource(node.getPartition(), reservedRes); - } - // Inform the ordering policy orderingPolicy.containerReleased(application, rmContainer); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java index f474aad2d0d..35329d27f38 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java @@ -246,6 +246,8 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt { // Update reserved metrics queue.getMetrics().unreserveResource(getUser(), rmContainer.getReservedResource()); + queue.decReservedResource(node.getPartition(), + rmContainer.getReservedResource()); return true; } return false; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java index 84eba109611..f94c963ec4d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java @@ -28,6 +28,7 @@ import org.apache.hadoop.security.SecurityUtilTestHelper; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.LogAggregationContext; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Priority; @@ -37,6 +38,8 @@ import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; import org.apache.hadoop.yarn.server.api.ContainerType; +import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus; +import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.apache.hadoop.yarn.server.resourcemanager.MockAM; import org.apache.hadoop.yarn.server.resourcemanager.MockNM; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; @@ -50,8 +53,13 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptS import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeStatusEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.utils.BuilderUtils; @@ -417,5 +425,183 @@ public class TestContainerAllocation { rm1.close(); } - + + @Test(timeout = 60000) + public void testAllocationForReservedContainer() throws Exception { + /** + * Test case: Submit two application (app1/app2) to a queue. And there's one + * node with 8G resource in the cluster. App1 allocates a 6G container, Then + * app2 asks for a 4G container. App2's request will be reserved on the + * node. + * + * Before next node heartbeat, app1 container is completed/killed. So app1 + * container which was reserved will be allocated. + */ + // inject node label manager + MockRM rm1 = new MockRM(); + + rm1.getRMContext().setNodeLabelManager(mgr); + rm1.start(); + MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB); + MockNM nm2 = rm1.registerNode("h2:1234", 8 * GB); + + // launch an app to queue, AM container should be launched in nm1 + RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "default"); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1); + + // launch another app to queue, AM container should be launched in nm1 + RMApp app2 = rm1.submitApp(1 * GB, "app", "user", null, "default"); + MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm1); + + am1.allocate("*", 4 * GB, 1, new ArrayList()); + am2.allocate("*", 4 * GB, 1, new ArrayList()); + + CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler(); + RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId()); + LeafQueue leafQueue = (LeafQueue) cs.getQueue("default"); + + // Do node heartbeats 2 times + // First time will allocate container for app1, second time will reserve + // container for app2 + cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); + cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); + + // App2 will get preference to be allocated on node1, and node1 will be all + // used by App2. + FiCaSchedulerApp schedulerApp1 = + cs.getApplicationAttempt(am1.getApplicationAttemptId()); + FiCaSchedulerApp schedulerApp2 = + cs.getApplicationAttempt(am2.getApplicationAttemptId()); + + // Check if a 4G container allocated for app1, and nothing allocated for app2 + Assert.assertEquals(2, schedulerApp1.getLiveContainers().size()); + Assert.assertEquals(1, schedulerApp2.getLiveContainers().size()); + Assert.assertTrue(schedulerApp2.getReservedContainers().size() > 0); + + // NM1 has available resource = 2G (8G - 2 * 1G - 4G) + Assert.assertEquals(2 * GB, cs.getNode(nm1.getNodeId()) + .getUnallocatedResource().getMemory()); + Assert.assertNotNull(cs.getNode(nm1.getNodeId()).getReservedContainer()); + // Usage of queue = 4G + 2 * 1G + 4G (reserved) + Assert.assertEquals(10 * GB, cs.getRootQueue().getQueueResourceUsage() + .getUsed().getMemory()); + Assert.assertEquals(4 * GB, cs.getRootQueue().getQueueResourceUsage() + .getReserved().getMemory()); + Assert.assertEquals(4 * GB, leafQueue.getQueueResourceUsage().getReserved() + .getMemory()); + + // Mark one app1 container as killed/completed and re-kick RM + for (RMContainer container : schedulerApp1.getLiveContainers()) { + if (container.isAMContainer()) { + continue; + } + cs.markContainerForKillable(container); + } + // Cancel asks of app1 and re-kick RM + am1.allocate("*", 4 * GB, 0, new ArrayList()); + cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); + + // Check 4G container cancelled for app1, and one container allocated for + // app2 + Assert.assertEquals(1, schedulerApp1.getLiveContainers().size()); + Assert.assertEquals(2, schedulerApp2.getLiveContainers().size()); + Assert.assertFalse(schedulerApp2.getReservedContainers().size() > 0); + + // NM1 has available resource = 2G (8G - 2 * 1G - 4G) + Assert.assertEquals(2 * GB, cs.getNode(nm1.getNodeId()) + .getUnallocatedResource().getMemory()); + Assert.assertNull(cs.getNode(nm1.getNodeId()).getReservedContainer()); + // Usage of queue = 4G + 2 * 1G + Assert.assertEquals(6 * GB, cs.getRootQueue().getQueueResourceUsage() + .getUsed().getMemory()); + Assert.assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage() + .getReserved().getMemory()); + Assert.assertEquals(0 * GB, leafQueue.getQueueResourceUsage().getReserved() + .getMemory()); + + rm1.close(); + } + + @Test(timeout = 60000) + public void testReservedContainerMetricsOnDecommisionedNode() throws Exception { + /** + * Test case: Submit two application (app1/app2) to a queue. And there's one + * node with 8G resource in the cluster. App1 allocates a 6G container, Then + * app2 asks for a 4G container. App2's request will be reserved on the + * node. + * + * Before next node heartbeat, app1 container is completed/killed. So app1 + * container which was reserved will be allocated. + */ + // inject node label manager + MockRM rm1 = new MockRM(); + + rm1.getRMContext().setNodeLabelManager(mgr); + rm1.start(); + MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB); + MockNM nm2 = rm1.registerNode("h2:1234", 8 * GB); + + // launch an app to queue, AM container should be launched in nm1 + RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "default"); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1); + + // launch another app to queue, AM container should be launched in nm1 + RMApp app2 = rm1.submitApp(1 * GB, "app", "user", null, "default"); + MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm1); + + am1.allocate("*", 4 * GB, 1, new ArrayList()); + am2.allocate("*", 4 * GB, 1, new ArrayList()); + + CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler(); + RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId()); + LeafQueue leafQueue = (LeafQueue) cs.getQueue("default"); + + // Do node heartbeats 2 times + // First time will allocate container for app1, second time will reserve + // container for app2 + cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); + cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); + + // App2 will get preference to be allocated on node1, and node1 will be all + // used by App2. + FiCaSchedulerApp schedulerApp1 = + cs.getApplicationAttempt(am1.getApplicationAttemptId()); + FiCaSchedulerApp schedulerApp2 = + cs.getApplicationAttempt(am2.getApplicationAttemptId()); + + // Check if a 4G container allocated for app1, and nothing allocated for app2 + Assert.assertEquals(2, schedulerApp1.getLiveContainers().size()); + Assert.assertEquals(1, schedulerApp2.getLiveContainers().size()); + Assert.assertTrue(schedulerApp2.getReservedContainers().size() > 0); + + // NM1 has available resource = 2G (8G - 2 * 1G - 4G) + Assert.assertEquals(2 * GB, cs.getNode(nm1.getNodeId()) + .getUnallocatedResource().getMemory()); + Assert.assertNotNull(cs.getNode(nm1.getNodeId()).getReservedContainer()); + // Usage of queue = 4G + 2 * 1G + 4G (reserved) + Assert.assertEquals(10 * GB, cs.getRootQueue().getQueueResourceUsage() + .getUsed().getMemory()); + Assert.assertEquals(4 * GB, cs.getRootQueue().getQueueResourceUsage() + .getReserved().getMemory()); + Assert.assertEquals(4 * GB, leafQueue.getQueueResourceUsage().getReserved() + .getMemory()); + + // Remove the node + cs.handle(new NodeRemovedSchedulerEvent(rmNode1)); + + // Check all container cancelled for app1 and app2 + Assert.assertEquals(0, schedulerApp1.getLiveContainers().size()); + Assert.assertEquals(0, schedulerApp2.getLiveContainers().size()); + Assert.assertFalse(schedulerApp2.getReservedContainers().size() > 0); + + // Usage and Reserved capacity of queue is 0 + Assert.assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage() + .getUsed().getMemory()); + Assert.assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage() + .getReserved().getMemory()); + Assert.assertEquals(0 * GB, leafQueue.getQueueResourceUsage().getReserved() + .getMemory()); + + rm1.close(); + } } From 67523ffcf491f4f2db5335899c00a174d0caaa9b Mon Sep 17 00:00:00 2001 From: Walter Su Date: Mon, 18 Apr 2016 09:28:02 +0800 Subject: [PATCH 22/26] HDFS-9412. getBlocks occupies FSLock and takes too long to complete. Contributed by He Tianyi. --- .../server/blockmanagement/BlockManager.java | 17 +++++++++++++++++ .../org/apache/hadoop/hdfs/TestGetBlocks.java | 8 ++++++-- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 104d72379a4..8b50ef884f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -305,6 +305,14 @@ public class BlockManager implements BlockStatsMXBean { * processed again after aquiring lock again. */ private int numBlocksPerIteration; + + /** + * Minimum size that a block can be sent to Balancer through getBlocks. + * And after HDFS-8824, the small blocks are unused anyway, so there's no + * point to send them to balancer. + */ + private long getBlocksMinBlockSize = -1; + /** * Progress of the Reconstruction queues initialisation. */ @@ -414,6 +422,9 @@ public class BlockManager implements BlockStatsMXBean { this.numBlocksPerIteration = conf.getInt( DFSConfigKeys.DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT, DFSConfigKeys.DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT_DEFAULT); + this.getBlocksMinBlockSize = conf.getLongBytes( + DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, + DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_DEFAULT); this.blockReportLeaseManager = new BlockReportLeaseManager(conf); bmSafeMode = new BlockManagerSafeMode(this, namesystem, haEnabled, conf); @@ -1179,6 +1190,9 @@ public class BlockManager implements BlockStatsMXBean { while(totalSize Date: Mon, 18 Apr 2016 20:29:29 +0800 Subject: [PATCH 23/26] HDFS-10275. TestDataNodeMetrics failing intermittently due to TotalWriteTime counted incorrectly. Contributed by Lin Yiqun. --- .../hadoop/hdfs/server/datanode/TestDataNodeMetrics.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java index 5f9b60267c5..355f7a1e753 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java @@ -258,10 +258,9 @@ public class TestDataNodeMetrics { * and reading causes totalReadTime to move. * @throws Exception */ - @Test(timeout=60000) + @Test(timeout=120000) public void testDataNodeTimeSpend() throws Exception { Configuration conf = new HdfsConfiguration(); - SimulatedFSDataset.setFactory(conf); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); try { final FileSystem fs = cluster.getFileSystem(); @@ -284,6 +283,7 @@ public class TestDataNodeMetrics { DFSTestUtil.createFile(fs, new Path("/time.txt." + x.get()), LONG_FILE_LEN, (short) 1, Time.monotonicNow()); DFSTestUtil.readFile(fs, new Path("/time.txt." + x.get())); + fs.delete(new Path("/time.txt." + x.get()), true); } catch (IOException ioe) { LOG.error("Caught IOException while ingesting DN metrics", ioe); return false; @@ -294,7 +294,7 @@ public class TestDataNodeMetrics { return endWriteValue > startWriteValue && endReadValue > startReadValue; } - }, 30, 30000); + }, 30, 60000); } finally { if (cluster != null) { cluster.shutdown(); From d8b729e16fb253e6c84f414d419b5663d9219a43 Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Mon, 18 Apr 2016 07:58:55 -0500 Subject: [PATCH 24/26] HDFS-10302. BlockPlacementPolicyDefault should use default replication considerload value. Contributed by Lin Yiqun. --- .../server/blockmanagement/BlockPlacementPolicyDefault.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java index f20f5fb9325..474a5e7799a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java @@ -79,7 +79,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { NetworkTopology clusterMap, Host2NodesMap host2datanodeMap) { this.considerLoad = conf.getBoolean( - DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true); + DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, + DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_DEFAULT); this.considerLoadFactor = conf.getDouble( DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR, DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR_DEFAULT); From 477003730e6a7c7eff11892f5cedf74073ca867b Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Mon, 18 Apr 2016 11:47:06 -0700 Subject: [PATCH 25/26] Fixed TimelineClient to retry SocketTimeoutException too. Contributed by Xuan Gong. --- .../client/api/impl/TimelineClientImpl.java | 74 +++++++++++++------ .../client/api/impl/TestTimelineClient.java | 41 ++++++++++ 2 files changed, 93 insertions(+), 22 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java index ef4622972f3..8c600416954 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java @@ -24,6 +24,7 @@ import java.lang.reflect.UndeclaredThrowableException; import java.net.ConnectException; import java.net.HttpURLConnection; import java.net.InetSocketAddress; +import java.net.SocketTimeoutException; import java.net.URI; import java.net.URL; import java.net.URLConnection; @@ -116,7 +117,9 @@ public class TimelineClientImpl extends TimelineClient { TimelineClientConnectionRetry connectionRetry; // Abstract class for an operation that should be retried by timeline client - private static abstract class TimelineClientRetryOp { + @Private + @VisibleForTesting + public static abstract class TimelineClientRetryOp { // The operation that should be retried public abstract Object run() throws IOException; // The method to indicate if we should retry given the incoming exception @@ -449,27 +452,8 @@ public class TimelineClientImpl extends TimelineClient { final PrivilegedExceptionAction action) throws IOException, YarnException { // Set up the retry operation - TimelineClientRetryOp tokenRetryOp = new TimelineClientRetryOp() { - - @Override - public Object run() throws IOException { - // Try pass the request, if fail, keep retrying - authUgi.checkTGTAndReloginFromKeytab(); - try { - return authUgi.doAs(action); - } catch (UndeclaredThrowableException e) { - throw new IOException(e.getCause()); - } catch (InterruptedException e) { - throw new IOException(e); - } - } - - @Override - public boolean shouldRetryOn(Exception e) { - // Only retry on connection exceptions - return (e instanceof ConnectException); - } - }; + TimelineClientRetryOp tokenRetryOp = + createTimelineClientRetryOpForOperateDelegationToken(action); return connectionRetry.retryOn(tokenRetryOp); } @@ -680,4 +664,50 @@ public class TimelineClientImpl extends TimelineClient { public void setTimelineWriter(TimelineWriter writer) { this.timelineWriter = writer; } + + @Private + @VisibleForTesting + public TimelineClientRetryOp + createTimelineClientRetryOpForOperateDelegationToken( + final PrivilegedExceptionAction action) throws IOException { + return new TimelineClientRetryOpForOperateDelegationToken( + this.authUgi, action); + } + + @Private + @VisibleForTesting + public class TimelineClientRetryOpForOperateDelegationToken + extends TimelineClientRetryOp { + + private final UserGroupInformation authUgi; + private final PrivilegedExceptionAction action; + + public TimelineClientRetryOpForOperateDelegationToken( + UserGroupInformation authUgi, PrivilegedExceptionAction action) { + this.authUgi = authUgi; + this.action = action; + } + + @Override + public Object run() throws IOException { + // Try pass the request, if fail, keep retrying + authUgi.checkTGTAndReloginFromKeytab(); + try { + return authUgi.doAs(action); + } catch (UndeclaredThrowableException e) { + throw new IOException(e.getCause()); + } catch (InterruptedException e) { + throw new IOException(e); + } + } + + @Override + public boolean shouldRetryOn(Exception e) { + // retry on connection exceptions + // and SocketTimeoutException + return (e instanceof ConnectException + || e instanceof SocketTimeoutException); + } + } + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java index d5e186c9b6a..41b788dcbac 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java @@ -27,7 +27,9 @@ import static org.mockito.Mockito.when; import java.io.IOException; import java.net.ConnectException; +import java.net.SocketTimeoutException; import java.net.URI; +import java.security.PrivilegedExceptionAction; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; @@ -234,6 +236,8 @@ public class TestTimelineClient { UserGroupInformation.setConfiguration(conf); TimelineClientImpl client = createTimelineClient(conf); + TimelineClientImpl clientFake = + createTimelineClientFakeTimelineClientRetryOp(conf); TestTimlineDelegationTokenSecretManager dtManager = new TestTimlineDelegationTokenSecretManager(); try { @@ -278,8 +282,24 @@ public class TestTimelineClient { } catch (RuntimeException ce) { assertException(client, ce); } + + // Test DelegationTokenOperationsRetry on SocketTimeoutException + try { + TimelineDelegationTokenIdentifier timelineDT = + new TimelineDelegationTokenIdentifier( + new Text("tester"), new Text("tester"), new Text("tester")); + clientFake.cancelDelegationToken( + new Token(timelineDT.getBytes(), + dtManager.createPassword(timelineDT), + timelineDT.getKind(), + new Text("0.0.0.0:8188"))); + assertFail(); + } catch (RuntimeException ce) { + assertException(clientFake, ce); + } } finally { client.stop(); + clientFake.stop(); dtManager.stopThreads(); } } @@ -393,6 +413,27 @@ public class TestTimelineClient { return client; } + private TimelineClientImpl createTimelineClientFakeTimelineClientRetryOp( + YarnConfiguration conf) { + TimelineClientImpl client = new TimelineClientImpl() { + + @Override + public TimelineClientRetryOp + createTimelineClientRetryOpForOperateDelegationToken( + final PrivilegedExceptionAction action) throws IOException { + TimelineClientRetryOpForOperateDelegationToken op = + spy(new TimelineClientRetryOpForOperateDelegationToken( + UserGroupInformation.getCurrentUser(), action)); + doThrow(new SocketTimeoutException("Test socketTimeoutException")) + .when(op).run(); + return op; + } + }; + client.init(conf); + client.start(); + return client; + } + private static class TestTimlineDelegationTokenSecretManager extends AbstractDelegationTokenSecretManager { From cb3ca460efb97be8c031bdb14bb7705cc25f2117 Mon Sep 17 00:00:00 2001 From: Colin Patrick Mccabe Date: Mon, 18 Apr 2016 11:45:18 -0700 Subject: [PATCH 26/26] HDFS-10265. OEV tool fails to read edit xml file if OP_UPDATE_BLOCKS has no BLOCK tag (Wan Chang via cmccabe) --- .../hadoop/hdfs/server/namenode/FSEditLogOp.java | 3 ++- .../java/org/apache/hadoop/hdfs/DFSTestUtil.java | 12 ++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index c4e1a78b5ad..a3285a948a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -1096,7 +1096,8 @@ public abstract class FSEditLogOp { @Override void fromXml(Stanza st) throws InvalidXmlException { this.path = st.getValue("PATH"); - List blocks = st.getChildren("BLOCK"); + List blocks = st.hasChildren("BLOCK") ? + st.getChildren("BLOCK") : new ArrayList(); this.blocks = new Block[blocks.size()]; for (int i = 0; i < blocks.size(); i++) { this.blocks[i] = FSEditLogOp.blockFromXml(blocks.get(i)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 8a52bbb1e08..d159fc55d0e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -1272,6 +1272,18 @@ public class DFSTestUtil { // OP_APPEND 47 FSDataOutputStream s2 = filesystem.append(pathFileCreate, 4096, null); s2.close(); + + // OP_UPDATE_BLOCKS 25 + final String updateBlockFile = "/update_blocks"; + FSDataOutputStream fout = filesystem.create(new Path(updateBlockFile), true, 4096, (short)1, 4096L); + fout.write(1); + fout.hflush(); + long fileId = ((DFSOutputStream)fout.getWrappedStream()).getFileId(); + DFSClient dfsclient = DFSClientAdapter.getDFSClient(filesystem); + LocatedBlocks blocks = dfsclient.getNamenode().getBlockLocations(updateBlockFile, 0, Integer.MAX_VALUE); + dfsclient.getNamenode().abandonBlock(blocks.get(0).getBlock(), fileId, updateBlockFile, dfsclient.clientName); + fout.close(); + // OP_SET_STORAGE_POLICY 45 filesystem.setStoragePolicy(pathFileCreate, HdfsConstants.HOT_STORAGE_POLICY_NAME);